aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h76
-rw-r--r--include/linux/alarmtimer.h4
-rw-r--r--include/linux/ata.h12
-rw-r--r--include/linux/backing-dev-defs.h255
-rw-r--r--include/linux/backing-dev.h558
-rw-r--r--include/linux/backlight.h8
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/bcma/bcma.h9
-rw-r--r--include/linux/bcma/bcma_driver_pci.h11
-rw-r--r--include/linux/bio.h20
-rw-r--r--include/linux/blk-cgroup.h655
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h25
-rw-r--r--include/linux/blkdev.h69
-rw-r--r--include/linux/bootmem.h8
-rw-r--r--include/linux/bottom_half.h1
-rw-r--r--include/linux/bpf.h36
-rw-r--r--include/linux/brcmphy.h7
-rw-r--r--include/linux/cgroup.h25
-rw-r--r--include/linux/clk.h27
-rw-r--r--include/linux/clkdev.h6
-rw-r--r--include/linux/clockchips.h37
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/compiler.h20
-rw-r--r--include/linux/configfs.h1
-rw-r--r--include/linux/context_tracking.h10
-rw-r--r--include/linux/context_tracking_state.h1
-rw-r--r--include/linux/cpu_cooling.h39
-rw-r--r--include/linux/cpufreq.h5
-rw-r--r--include/linux/cpuidle.h20
-rw-r--r--include/linux/crc-itu-t.h2
-rw-r--r--include/linux/crypto.h501
-rw-r--r--include/linux/cryptouser.h105
-rw-r--r--include/linux/debugfs.h1
-rw-r--r--include/linux/dmapool.h2
-rw-r--r--include/linux/dmar.h85
-rw-r--r--include/linux/dmi.h4
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/etherdevice.h42
-rw-r--r--include/linux/f2fs_fs.h8
-rw-r--r--include/linux/filter.h30
-rw-r--r--include/linux/frontswap.h14
-rw-r--r--include/linux/fs.h45
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/gfp.h5
-rw-r--r--include/linux/gpio.h7
-rw-r--r--include/linux/gpio/consumer.h43
-rw-r--r--include/linux/gpio/driver.h13
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/hrtimer.h167
-rw-r--r--include/linux/htirq.h22
-rw-r--r--include/linux/i2c/twl.h1
-rw-r--r--include/linux/ide.h27
-rw-r--r--include/linux/ieee802154.h16
-rw-r--r--include/linux/if_link.h9
-rw-r--r--include/linux/if_macvlan.h2
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/if_vlan.h28
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/inet_diag.h1
-rw-r--r--include/linux/inetdevice.h3
-rw-r--r--include/linux/init_task.h5
-rw-r--r--include/linux/intel-iommu.h13
-rw-r--r--include/linux/interrupt.h9
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/io.h8
-rw-r--r--include/linux/iommu.h44
-rw-r--r--include/linux/irq.h88
-rw-r--r--include/linux/irqdesc.h12
-rw-r--r--include/linux/irqdomain.h8
-rw-r--r--include/linux/jbd2.h4
-rw-r--r--include/linux/jiffies.h130
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kmemleak.h6
-rw-r--r--include/linux/kvm_host.h96
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/lglock.h5
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/livepatch.h8
-rw-r--r--include/linux/lockdep.h14
-rw-r--r--include/linux/mbus.h5
-rw-r--r--include/linux/mdio-gpio.h3
-rw-r--r--include/linux/memblock.h49
-rw-r--r--include/linux/memcontrol.h29
-rw-r--r--include/linux/mfd/arizona/core.h9
-rw-r--r--include/linux/mfd/arizona/pdata.h5
-rw-r--r--include/linux/mfd/arizona/registers.h27
-rw-r--r--include/linux/mfd/axp20x.h93
-rw-r--r--include/linux/mfd/cros_ec.h86
-rw-r--r--include/linux/mfd/cros_ec_commands.h277
-rw-r--r--include/linux/mfd/da9055/core.h2
-rw-r--r--include/linux/mfd/da9063/pdata.h1
-rw-r--r--include/linux/mfd/max77686.h5
-rw-r--r--include/linux/mfd/stmpe.h44
-rw-r--r--include/linux/mlx4/cmd.h6
-rw-r--r--include/linux/mlx4/device.h30
-rw-r--r--include/linux/mlx5/cq.h3
-rw-r--r--include/linux/mlx5/device.h215
-rw-r--r--include/linux/mlx5/driver.h173
-rw-r--r--include/linux/mlx5/flow_table.h54
-rw-r--r--include/linux/mlx5/mlx5_ifc.h6584
-rw-r--r--include/linux/mlx5/qp.h25
-rw-r--r--include/linux/mlx5/vport.h55
-rw-r--r--include/linux/mm-arch-hooks.h25
-rw-r--r--include/linux/mm.h45
-rw-r--r--include/linux/mm_types.h18
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/dw_mmc.h6
-rw-r--r--include/linux/mmc/host.h28
-rw-r--r--include/linux/mmc/mmc.h4
-rw-r--r--include/linux/mmc/sdhci-pci-data.h2
-rw-r--r--include/linux/mmu_notifier.h12
-rw-r--r--include/linux/module.h12
-rw-r--r--include/linux/mpi.h15
-rw-r--r--include/linux/mtd/cfi.h188
-rw-r--r--include/linux/mtd/nand.h6
-rw-r--r--include/linux/namei.h41
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdev_features.h5
-rw-r--r--include/linux/netdevice.h31
-rw-r--r--include/linux/netfilter.h45
-rw-r--r--include/linux/netfilter/ipset/ip_set.h61
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h38
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h27
-rw-r--r--include/linux/netfilter/x_tables.h60
-rw-r--r--include/linux/netfilter_bridge.h7
-rw-r--r--include/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/linux/netfilter_defs.h9
-rw-r--r--include/linux/netfilter_ingress.h41
-rw-r--r--include/linux/netfilter_ipv6.h3
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nmi.h3
-rw-r--r--include/linux/nvme.h31
-rw-r--r--include/linux/nx842.h11
-rw-r--r--include/linux/of.h6
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--include/linux/oom.h12
-rw-r--r--include/linux/osq_lock.h5
-rw-r--r--include/linux/pagemap.h3
-rw-r--r--include/linux/pci.h44
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/perf_event.h50
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/pinctrl/consumer.h2
-rw-r--r--include/linux/pinctrl/pinctrl.h2
-rw-r--r--include/linux/pinctrl/pinmux.h6
-rw-r--r--include/linux/platform_data/gpio-omap.h12
-rw-r--r--include/linux/platform_data/irq-renesas-irqc.h27
-rw-r--r--include/linux/platform_data/keyboard-spear.h2
-rw-r--r--include/linux/platform_data/nfcmrvl.h40
-rw-r--r--include/linux/platform_data/ntc_thermistor.h1
-rw-r--r--include/linux/platform_data/st-nci.h (renamed from include/linux/platform_data/st21nfcb.h)14
-rw-r--r--include/linux/platform_data/st_nci.h29
-rw-r--r--include/linux/platform_data/video-msm_fb.h146
-rw-r--r--include/linux/pm.h14
-rw-r--r--include/linux/pm_clock.h10
-rw-r--r--include/linux/pm_wakeirq.h51
-rw-r--r--include/linux/pm_wakeup.h9
-rw-r--r--include/linux/power/max17042_battery.h4
-rw-r--r--include/linux/power_supply.h11
-rw-r--r--include/linux/preempt.h159
-rw-r--r--include/linux/preempt_mask.h117
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/pwm.h12
-rw-r--r--include/linux/pxa2xx_ssp.h3
-rw-r--r--include/linux/random.h9
-rw-r--r--include/linux/rculist.h10
-rw-r--r--include/linux/rcupdate.h76
-rw-r--r--include/linux/rcutiny.h16
-rw-r--r--include/linux/rcutree.h9
-rw-r--r--include/linux/regmap.h14
-rw-r--r--include/linux/regulator/driver.h11
-rw-r--r--include/linux/regulator/machine.h9
-rw-r--r--include/linux/regulator/max8973-regulator.h4
-rw-r--r--include/linux/rio.h2
-rw-r--r--include/linux/rtnetlink.h16
-rw-r--r--include/linux/scatterlist.h40
-rw-r--r--include/linux/sched.h145
-rw-r--r--include/linux/sched/sysctl.h12
-rw-r--r--include/linux/security.h13
-rw-r--r--include/linux/seqlock.h47
-rw-r--r--include/linux/skbuff.h77
-rw-r--r--include/linux/slab.h26
-rw-r--r--include/linux/smpboot.h5
-rw-r--r--include/linux/sock_diag.h42
-rw-r--r--include/linux/spi/cc2520.h1
-rw-r--r--include/linux/spinlock.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/sw842.h12
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/tcp.h15
-rw-r--r--include/linux/thermal.h97
-rw-r--r--include/linux/tick.h19
-rw-r--r--include/linux/time64.h2
-rw-r--r--include/linux/timekeeper_internal.h19
-rw-r--r--include/linux/timekeeping.h2
-rw-r--r--include/linux/timer.h63
-rw-r--r--include/linux/timerqueue.h8
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/types.h12
-rw-r--r--include/linux/u64_stats_sync.h7
-rw-r--r--include/linux/uaccess.h48
-rw-r--r--include/linux/wait.h17
-rw-r--r--include/linux/writeback.h221
208 files changed, 11706 insertions, 2427 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index e4da5e35e29c..c187817471fb 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -158,6 +158,16 @@ typedef u32 phys_cpuid_t;
158#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) 158#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
159#endif 159#endif
160 160
161static inline bool invalid_logical_cpuid(u32 cpuid)
162{
163 return (int)cpuid < 0;
164}
165
166static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
167{
168 return phys_id == PHYS_CPUID_INVALID;
169}
170
161#ifdef CONFIG_ACPI_HOTPLUG_CPU 171#ifdef CONFIG_ACPI_HOTPLUG_CPU
162/* Arch dependent functions for cpu hotplug support */ 172/* Arch dependent functions for cpu hotplug support */
163int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); 173int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
@@ -243,50 +253,12 @@ extern bool wmi_has_guid(const char *guid);
243#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 253#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
244#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 254#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
245 255
246#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) 256extern char acpi_video_backlight_string[];
247
248extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
249extern long acpi_is_video_device(acpi_handle handle); 257extern long acpi_is_video_device(acpi_handle handle);
250extern void acpi_video_dmi_promote_vendor(void);
251extern void acpi_video_dmi_demote_vendor(void);
252extern int acpi_video_backlight_support(void);
253extern int acpi_video_display_switch_support(void);
254
255#else
256
257static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle)
258{
259 return 0;
260}
261
262static inline long acpi_is_video_device(acpi_handle handle)
263{
264 return 0;
265}
266
267static inline void acpi_video_dmi_promote_vendor(void)
268{
269}
270
271static inline void acpi_video_dmi_demote_vendor(void)
272{
273}
274
275static inline int acpi_video_backlight_support(void)
276{
277 return 0;
278}
279
280static inline int acpi_video_display_switch_support(void)
281{
282 return 0;
283}
284
285#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
286
287extern int acpi_blacklisted(void); 258extern int acpi_blacklisted(void);
288extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); 259extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
289extern void acpi_osi_setup(char *str); 260extern void acpi_osi_setup(char *str);
261extern bool acpi_osi_is_win8(void);
290 262
291#ifdef CONFIG_ACPI_NUMA 263#ifdef CONFIG_ACPI_NUMA
292int acpi_get_node(acpi_handle handle); 264int acpi_get_node(acpi_handle handle);
@@ -332,6 +304,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
332 304
333int acpi_resources_are_enforced(void); 305int acpi_resources_are_enforced(void);
334 306
307int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
308 unsigned long flags, char *desc);
309
335#ifdef CONFIG_HIBERNATION 310#ifdef CONFIG_HIBERNATION
336void __init acpi_no_s4_hw_signature(void); 311void __init acpi_no_s4_hw_signature(void);
337#endif 312#endif
@@ -440,6 +415,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
440#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 415#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
441 416
442extern void acpi_early_init(void); 417extern void acpi_early_init(void);
418extern void acpi_subsystem_init(void);
443 419
444extern int acpi_nvs_register(__u64 start, __u64 size); 420extern int acpi_nvs_register(__u64 start, __u64 size);
445 421
@@ -494,6 +470,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
494} 470}
495 471
496static inline void acpi_early_init(void) { } 472static inline void acpi_early_init(void) { }
473static inline void acpi_subsystem_init(void) { }
497 474
498static inline int early_acpi_boot_init(void) 475static inline int early_acpi_boot_init(void)
499{ 476{
@@ -525,6 +502,13 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
525 return 0; 502 return 0;
526} 503}
527 504
505static inline int acpi_reserve_region(u64 start, unsigned int length,
506 u8 space_id, unsigned long flags,
507 char *desc)
508{
509 return -ENXIO;
510}
511
528struct acpi_table_header; 512struct acpi_table_header;
529static inline int acpi_table_parse(char *id, 513static inline int acpi_table_parse(char *id,
530 int (*handler)(struct acpi_table_header *)) 514 int (*handler)(struct acpi_table_header *))
@@ -569,6 +553,11 @@ static inline int acpi_device_modalias(struct device *dev,
569 return -ENODEV; 553 return -ENODEV;
570} 554}
571 555
556static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
557{
558 return false;
559}
560
572#define ACPI_PTR(_ptr) (NULL) 561#define ACPI_PTR(_ptr) (NULL)
573 562
574#endif /* !CONFIG_ACPI */ 563#endif /* !CONFIG_ACPI */
@@ -721,6 +710,8 @@ static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
721 if (adev) 710 if (adev)
722 adev->driver_gpios = NULL; 711 adev->driver_gpios = NULL;
723} 712}
713
714int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
724#else 715#else
725static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, 716static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
726 const struct acpi_gpio_mapping *gpios) 717 const struct acpi_gpio_mapping *gpios)
@@ -728,6 +719,11 @@ static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
728 return -ENXIO; 719 return -ENXIO;
729} 720}
730static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} 721static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
722
723static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
724{
725 return -ENXIO;
726}
731#endif 727#endif
732 728
733/* Device properties */ 729/* Device properties */
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index a899402a5a0e..52f3b7da4f2d 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -43,8 +43,8 @@ struct alarm {
43 43
44void alarm_init(struct alarm *alarm, enum alarmtimer_type type, 44void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
45 enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); 45 enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
46int alarm_start(struct alarm *alarm, ktime_t start); 46void alarm_start(struct alarm *alarm, ktime_t start);
47int alarm_start_relative(struct alarm *alarm, ktime_t start); 47void alarm_start_relative(struct alarm *alarm, ktime_t start);
48void alarm_restart(struct alarm *alarm); 48void alarm_restart(struct alarm *alarm);
49int alarm_try_to_cancel(struct alarm *alarm); 49int alarm_try_to_cancel(struct alarm *alarm);
50int alarm_cancel(struct alarm *alarm); 50int alarm_cancel(struct alarm *alarm);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index b666b773e111..fed36418dd1c 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -704,9 +704,19 @@ static inline bool ata_id_wcache_enabled(const u16 *id)
704 704
705static inline bool ata_id_has_read_log_dma_ext(const u16 *id) 705static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
706{ 706{
707 /* Word 86 must have bit 15 set */
707 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15))) 708 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
708 return false; 709 return false;
709 return id[ATA_ID_COMMAND_SET_3] & (1 << 3); 710
711 /* READ LOG DMA EXT support can be signaled either from word 119
712 * or from word 120. The format is the same for both words: Bit
713 * 15 must be cleared, bit 14 set and bit 3 set.
714 */
715 if ((id[ATA_ID_COMMAND_SET_3] & 0xC008) == 0x4008 ||
716 (id[ATA_ID_COMMAND_SET_4] & 0xC008) == 0x4008)
717 return true;
718
719 return false;
710} 720}
711 721
712static inline bool ata_id_has_sense_reporting(const u16 *id) 722static inline bool ata_id_has_sense_reporting(const u16 *id)
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
new file mode 100644
index 000000000000..a48d90e3bcbb
--- /dev/null
+++ b/include/linux/backing-dev-defs.h
@@ -0,0 +1,255 @@
1#ifndef __LINUX_BACKING_DEV_DEFS_H
2#define __LINUX_BACKING_DEV_DEFS_H
3
4#include <linux/list.h>
5#include <linux/radix-tree.h>
6#include <linux/rbtree.h>
7#include <linux/spinlock.h>
8#include <linux/percpu_counter.h>
9#include <linux/percpu-refcount.h>
10#include <linux/flex_proportions.h>
11#include <linux/timer.h>
12#include <linux/workqueue.h>
13
14struct page;
15struct device;
16struct dentry;
17
18/*
19 * Bits in bdi_writeback.state
20 */
21enum wb_state {
22 WB_registered, /* bdi_register() was done */
23 WB_writeback_running, /* Writeback is in progress */
24 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
25};
26
27enum wb_congested_state {
28 WB_async_congested, /* The async (write) queue is getting full */
29 WB_sync_congested, /* The sync queue is getting full */
30};
31
32typedef int (congested_fn)(void *, int);
33
34enum wb_stat_item {
35 WB_RECLAIMABLE,
36 WB_WRITEBACK,
37 WB_DIRTIED,
38 WB_WRITTEN,
39 NR_WB_STAT_ITEMS
40};
41
42#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
43
44/*
45 * For cgroup writeback, multiple wb's may map to the same blkcg. Those
46 * wb's can operate mostly independently but should share the congested
47 * state. To facilitate such sharing, the congested state is tracked using
48 * the following struct which is created on demand, indexed by blkcg ID on
49 * its bdi, and refcounted.
50 */
51struct bdi_writeback_congested {
52 unsigned long state; /* WB_[a]sync_congested flags */
53
54#ifdef CONFIG_CGROUP_WRITEBACK
55 struct backing_dev_info *bdi; /* the associated bdi */
56 atomic_t refcnt; /* nr of attached wb's and blkg */
57 int blkcg_id; /* ID of the associated blkcg */
58 struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
59#endif
60};
61
62/*
63 * Each wb (bdi_writeback) can perform writeback operations, is measured
64 * and throttled, independently. Without cgroup writeback, each bdi
65 * (bdi_writeback) is served by its embedded bdi->wb.
66 *
67 * On the default hierarchy, blkcg implicitly enables memcg. This allows
68 * using memcg's page ownership for attributing writeback IOs, and every
69 * memcg - blkcg combination can be served by its own wb by assigning a
70 * dedicated wb to each memcg, which enables isolation across different
71 * cgroups and propagation of IO back pressure down from the IO layer upto
72 * the tasks which are generating the dirty pages to be written back.
73 *
74 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
75 * refcounted with the number of inodes attached to it, and pins the memcg
76 * and the corresponding blkcg. As the corresponding blkcg for a memcg may
77 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
78 * is tested for blkcg after lookup and removed from index on mismatch so
79 * that a new wb for the combination can be created.
80 */
81struct bdi_writeback {
82 struct backing_dev_info *bdi; /* our parent bdi */
83
84 unsigned long state; /* Always use atomic bitops on this */
85 unsigned long last_old_flush; /* last old data flush */
86
87 struct list_head b_dirty; /* dirty inodes */
88 struct list_head b_io; /* parked for writeback */
89 struct list_head b_more_io; /* parked for more writeback */
90 struct list_head b_dirty_time; /* time stamps are dirty */
91 spinlock_t list_lock; /* protects the b_* lists */
92
93 struct percpu_counter stat[NR_WB_STAT_ITEMS];
94
95 struct bdi_writeback_congested *congested;
96
97 unsigned long bw_time_stamp; /* last time write bw is updated */
98 unsigned long dirtied_stamp;
99 unsigned long written_stamp; /* pages written at bw_time_stamp */
100 unsigned long write_bandwidth; /* the estimated write bandwidth */
101 unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
102
103 /*
104 * The base dirty throttle rate, re-calculated on every 200ms.
105 * All the bdi tasks' dirty rate will be curbed under it.
106 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
107 * in small steps and is much more smooth/stable than the latter.
108 */
109 unsigned long dirty_ratelimit;
110 unsigned long balanced_dirty_ratelimit;
111
112 struct fprop_local_percpu completions;
113 int dirty_exceeded;
114
115 spinlock_t work_lock; /* protects work_list & dwork scheduling */
116 struct list_head work_list;
117 struct delayed_work dwork; /* work item used for writeback */
118
119#ifdef CONFIG_CGROUP_WRITEBACK
120 struct percpu_ref refcnt; /* used only for !root wb's */
121 struct fprop_local_percpu memcg_completions;
122 struct cgroup_subsys_state *memcg_css; /* the associated memcg */
123 struct cgroup_subsys_state *blkcg_css; /* and blkcg */
124 struct list_head memcg_node; /* anchored at memcg->cgwb_list */
125 struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
126
127 union {
128 struct work_struct release_work;
129 struct rcu_head rcu;
130 };
131#endif
132};
133
134struct backing_dev_info {
135 struct list_head bdi_list;
136 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
137 unsigned int capabilities; /* Device capabilities */
138 congested_fn *congested_fn; /* Function pointer if device is md/dm */
139 void *congested_data; /* Pointer to aux data for congested func */
140
141 char *name;
142
143 unsigned int min_ratio;
144 unsigned int max_ratio, max_prop_frac;
145
146 /*
147 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
148 * any dirty wbs, which is depended upon by bdi_has_dirty().
149 */
150 atomic_long_t tot_write_bandwidth;
151
152 struct bdi_writeback wb; /* the root writeback info for this bdi */
153 struct bdi_writeback_congested wb_congested; /* its congested state */
154#ifdef CONFIG_CGROUP_WRITEBACK
155 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
156 struct rb_root cgwb_congested_tree; /* their congested states */
157 atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
158#endif
159 wait_queue_head_t wb_waitq;
160
161 struct device *dev;
162
163 struct timer_list laptop_mode_wb_timer;
164
165#ifdef CONFIG_DEBUG_FS
166 struct dentry *debug_dir;
167 struct dentry *debug_stats;
168#endif
169};
170
171enum {
172 BLK_RW_ASYNC = 0,
173 BLK_RW_SYNC = 1,
174};
175
176void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
177void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
178
179static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
180{
181 clear_wb_congested(bdi->wb.congested, sync);
182}
183
184static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
185{
186 set_wb_congested(bdi->wb.congested, sync);
187}
188
189#ifdef CONFIG_CGROUP_WRITEBACK
190
191/**
192 * wb_tryget - try to increment a wb's refcount
193 * @wb: bdi_writeback to get
194 */
195static inline bool wb_tryget(struct bdi_writeback *wb)
196{
197 if (wb != &wb->bdi->wb)
198 return percpu_ref_tryget(&wb->refcnt);
199 return true;
200}
201
202/**
203 * wb_get - increment a wb's refcount
204 * @wb: bdi_writeback to get
205 */
206static inline void wb_get(struct bdi_writeback *wb)
207{
208 if (wb != &wb->bdi->wb)
209 percpu_ref_get(&wb->refcnt);
210}
211
212/**
213 * wb_put - decrement a wb's refcount
214 * @wb: bdi_writeback to put
215 */
216static inline void wb_put(struct bdi_writeback *wb)
217{
218 if (wb != &wb->bdi->wb)
219 percpu_ref_put(&wb->refcnt);
220}
221
222/**
223 * wb_dying - is a wb dying?
224 * @wb: bdi_writeback of interest
225 *
226 * Returns whether @wb is unlinked and being drained.
227 */
228static inline bool wb_dying(struct bdi_writeback *wb)
229{
230 return percpu_ref_is_dying(&wb->refcnt);
231}
232
233#else /* CONFIG_CGROUP_WRITEBACK */
234
235static inline bool wb_tryget(struct bdi_writeback *wb)
236{
237 return true;
238}
239
240static inline void wb_get(struct bdi_writeback *wb)
241{
242}
243
244static inline void wb_put(struct bdi_writeback *wb)
245{
246}
247
248static inline bool wb_dying(struct bdi_writeback *wb)
249{
250 return false;
251}
252
253#endif /* CONFIG_CGROUP_WRITEBACK */
254
255#endif /* __LINUX_BACKING_DEV_DEFS_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index aff923ae8c4b..0e6d4828a77a 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -8,106 +8,13 @@
8#ifndef _LINUX_BACKING_DEV_H 8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H 9#define _LINUX_BACKING_DEV_H
10 10
11#include <linux/percpu_counter.h>
12#include <linux/log2.h>
13#include <linux/flex_proportions.h>
14#include <linux/kernel.h> 11#include <linux/kernel.h>
15#include <linux/fs.h> 12#include <linux/fs.h>
16#include <linux/sched.h> 13#include <linux/sched.h>
17#include <linux/timer.h> 14#include <linux/blkdev.h>
18#include <linux/writeback.h> 15#include <linux/writeback.h>
19#include <linux/atomic.h> 16#include <linux/blk-cgroup.h>
20#include <linux/sysctl.h> 17#include <linux/backing-dev-defs.h>
21#include <linux/workqueue.h>
22
23struct page;
24struct device;
25struct dentry;
26
27/*
28 * Bits in backing_dev_info.state
29 */
30enum bdi_state {
31 BDI_async_congested, /* The async (write) queue is getting full */
32 BDI_sync_congested, /* The sync queue is getting full */
33 BDI_registered, /* bdi_register() was done */
34 BDI_writeback_running, /* Writeback is in progress */
35};
36
37typedef int (congested_fn)(void *, int);
38
39enum bdi_stat_item {
40 BDI_RECLAIMABLE,
41 BDI_WRITEBACK,
42 BDI_DIRTIED,
43 BDI_WRITTEN,
44 NR_BDI_STAT_ITEMS
45};
46
47#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
48
49struct bdi_writeback {
50 struct backing_dev_info *bdi; /* our parent bdi */
51
52 unsigned long last_old_flush; /* last old data flush */
53
54 struct delayed_work dwork; /* work item used for writeback */
55 struct list_head b_dirty; /* dirty inodes */
56 struct list_head b_io; /* parked for writeback */
57 struct list_head b_more_io; /* parked for more writeback */
58 struct list_head b_dirty_time; /* time stamps are dirty */
59 spinlock_t list_lock; /* protects the b_* lists */
60};
61
62struct backing_dev_info {
63 struct list_head bdi_list;
64 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
65 unsigned long state; /* Always use atomic bitops on this */
66 unsigned int capabilities; /* Device capabilities */
67 congested_fn *congested_fn; /* Function pointer if device is md/dm */
68 void *congested_data; /* Pointer to aux data for congested func */
69
70 char *name;
71
72 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
73
74 unsigned long bw_time_stamp; /* last time write bw is updated */
75 unsigned long dirtied_stamp;
76 unsigned long written_stamp; /* pages written at bw_time_stamp */
77 unsigned long write_bandwidth; /* the estimated write bandwidth */
78 unsigned long avg_write_bandwidth; /* further smoothed write bw */
79
80 /*
81 * The base dirty throttle rate, re-calculated on every 200ms.
82 * All the bdi tasks' dirty rate will be curbed under it.
83 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
84 * in small steps and is much more smooth/stable than the latter.
85 */
86 unsigned long dirty_ratelimit;
87 unsigned long balanced_dirty_ratelimit;
88
89 struct fprop_local_percpu completions;
90 int dirty_exceeded;
91
92 unsigned int min_ratio;
93 unsigned int max_ratio, max_prop_frac;
94
95 struct bdi_writeback wb; /* default writeback info for this bdi */
96 spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
97
98 struct list_head work_list;
99
100 struct device *dev;
101
102 struct timer_list laptop_mode_wb_timer;
103
104#ifdef CONFIG_DEBUG_FS
105 struct dentry *debug_dir;
106 struct dentry *debug_stats;
107#endif
108};
109
110struct backing_dev_info *inode_to_bdi(struct inode *inode);
111 18
112int __must_check bdi_init(struct backing_dev_info *bdi); 19int __must_check bdi_init(struct backing_dev_info *bdi);
113void bdi_destroy(struct backing_dev_info *bdi); 20void bdi_destroy(struct backing_dev_info *bdi);
@@ -116,99 +23,100 @@ __printf(3, 4)
116int bdi_register(struct backing_dev_info *bdi, struct device *parent, 23int bdi_register(struct backing_dev_info *bdi, struct device *parent,
117 const char *fmt, ...); 24 const char *fmt, ...);
118int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 25int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
119void bdi_unregister(struct backing_dev_info *bdi);
120int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 26int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
121void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 27void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
122 enum wb_reason reason); 28 bool range_cyclic, enum wb_reason reason);
123void bdi_start_background_writeback(struct backing_dev_info *bdi); 29void wb_start_background_writeback(struct bdi_writeback *wb);
124void bdi_writeback_workfn(struct work_struct *work); 30void wb_workfn(struct work_struct *work);
125int bdi_has_dirty_io(struct backing_dev_info *bdi); 31void wb_wakeup_delayed(struct bdi_writeback *wb);
126void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
127 32
128extern spinlock_t bdi_lock; 33extern spinlock_t bdi_lock;
129extern struct list_head bdi_list; 34extern struct list_head bdi_list;
130 35
131extern struct workqueue_struct *bdi_wq; 36extern struct workqueue_struct *bdi_wq;
132 37
133static inline int wb_has_dirty_io(struct bdi_writeback *wb) 38static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
134{ 39{
135 return !list_empty(&wb->b_dirty) || 40 return test_bit(WB_has_dirty_io, &wb->state);
136 !list_empty(&wb->b_io) || 41}
137 !list_empty(&wb->b_more_io); 42
43static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
44{
45 /*
46 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
47 * any dirty wbs. See wb_update_write_bandwidth().
48 */
49 return atomic_long_read(&bdi->tot_write_bandwidth);
138} 50}
139 51
140static inline void __add_bdi_stat(struct backing_dev_info *bdi, 52static inline void __add_wb_stat(struct bdi_writeback *wb,
141 enum bdi_stat_item item, s64 amount) 53 enum wb_stat_item item, s64 amount)
142{ 54{
143 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH); 55 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
144} 56}
145 57
146static inline void __inc_bdi_stat(struct backing_dev_info *bdi, 58static inline void __inc_wb_stat(struct bdi_writeback *wb,
147 enum bdi_stat_item item) 59 enum wb_stat_item item)
148{ 60{
149 __add_bdi_stat(bdi, item, 1); 61 __add_wb_stat(wb, item, 1);
150} 62}
151 63
152static inline void inc_bdi_stat(struct backing_dev_info *bdi, 64static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
153 enum bdi_stat_item item)
154{ 65{
155 unsigned long flags; 66 unsigned long flags;
156 67
157 local_irq_save(flags); 68 local_irq_save(flags);
158 __inc_bdi_stat(bdi, item); 69 __inc_wb_stat(wb, item);
159 local_irq_restore(flags); 70 local_irq_restore(flags);
160} 71}
161 72
162static inline void __dec_bdi_stat(struct backing_dev_info *bdi, 73static inline void __dec_wb_stat(struct bdi_writeback *wb,
163 enum bdi_stat_item item) 74 enum wb_stat_item item)
164{ 75{
165 __add_bdi_stat(bdi, item, -1); 76 __add_wb_stat(wb, item, -1);
166} 77}
167 78
168static inline void dec_bdi_stat(struct backing_dev_info *bdi, 79static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
169 enum bdi_stat_item item)
170{ 80{
171 unsigned long flags; 81 unsigned long flags;
172 82
173 local_irq_save(flags); 83 local_irq_save(flags);
174 __dec_bdi_stat(bdi, item); 84 __dec_wb_stat(wb, item);
175 local_irq_restore(flags); 85 local_irq_restore(flags);
176} 86}
177 87
178static inline s64 bdi_stat(struct backing_dev_info *bdi, 88static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
179 enum bdi_stat_item item)
180{ 89{
181 return percpu_counter_read_positive(&bdi->bdi_stat[item]); 90 return percpu_counter_read_positive(&wb->stat[item]);
182} 91}
183 92
184static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi, 93static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
185 enum bdi_stat_item item) 94 enum wb_stat_item item)
186{ 95{
187 return percpu_counter_sum_positive(&bdi->bdi_stat[item]); 96 return percpu_counter_sum_positive(&wb->stat[item]);
188} 97}
189 98
190static inline s64 bdi_stat_sum(struct backing_dev_info *bdi, 99static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
191 enum bdi_stat_item item)
192{ 100{
193 s64 sum; 101 s64 sum;
194 unsigned long flags; 102 unsigned long flags;
195 103
196 local_irq_save(flags); 104 local_irq_save(flags);
197 sum = __bdi_stat_sum(bdi, item); 105 sum = __wb_stat_sum(wb, item);
198 local_irq_restore(flags); 106 local_irq_restore(flags);
199 107
200 return sum; 108 return sum;
201} 109}
202 110
203extern void bdi_writeout_inc(struct backing_dev_info *bdi); 111extern void wb_writeout_inc(struct bdi_writeback *wb);
204 112
205/* 113/*
206 * maximal error of a stat counter. 114 * maximal error of a stat counter.
207 */ 115 */
208static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) 116static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
209{ 117{
210#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
211 return nr_cpu_ids * BDI_STAT_BATCH; 119 return nr_cpu_ids * WB_STAT_BATCH;
212#else 120#else
213 return 1; 121 return 1;
214#endif 122#endif
@@ -232,50 +140,57 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
232 * BDI_CAP_NO_WRITEBACK: Don't write pages back 140 * BDI_CAP_NO_WRITEBACK: Don't write pages back
233 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages 141 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
234 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. 142 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
143 *
144 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
235 */ 145 */
236#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 146#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
237#define BDI_CAP_NO_WRITEBACK 0x00000002 147#define BDI_CAP_NO_WRITEBACK 0x00000002
238#define BDI_CAP_NO_ACCT_WB 0x00000004 148#define BDI_CAP_NO_ACCT_WB 0x00000004
239#define BDI_CAP_STABLE_WRITES 0x00000008 149#define BDI_CAP_STABLE_WRITES 0x00000008
240#define BDI_CAP_STRICTLIMIT 0x00000010 150#define BDI_CAP_STRICTLIMIT 0x00000010
151#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
241 152
242#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ 153#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
243 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) 154 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
244 155
245extern struct backing_dev_info noop_backing_dev_info; 156extern struct backing_dev_info noop_backing_dev_info;
246 157
247int writeback_in_progress(struct backing_dev_info *bdi); 158/**
248 159 * writeback_in_progress - determine whether there is writeback in progress
249static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) 160 * @wb: bdi_writeback of interest
161 *
162 * Determine whether there is writeback waiting to be handled against a
163 * bdi_writeback.
164 */
165static inline bool writeback_in_progress(struct bdi_writeback *wb)
250{ 166{
251 if (bdi->congested_fn) 167 return test_bit(WB_writeback_running, &wb->state);
252 return bdi->congested_fn(bdi->congested_data, bdi_bits);
253 return (bdi->state & bdi_bits);
254} 168}
255 169
256static inline int bdi_read_congested(struct backing_dev_info *bdi) 170static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
257{ 171{
258 return bdi_congested(bdi, 1 << BDI_sync_congested); 172 struct super_block *sb;
259}
260 173
261static inline int bdi_write_congested(struct backing_dev_info *bdi) 174 if (!inode)
262{ 175 return &noop_backing_dev_info;
263 return bdi_congested(bdi, 1 << BDI_async_congested); 176
177 sb = inode->i_sb;
178#ifdef CONFIG_BLOCK
179 if (sb_is_blkdev_sb(sb))
180 return blk_get_backing_dev_info(I_BDEV(inode));
181#endif
182 return sb->s_bdi;
264} 183}
265 184
266static inline int bdi_rw_congested(struct backing_dev_info *bdi) 185static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
267{ 186{
268 return bdi_congested(bdi, (1 << BDI_sync_congested) | 187 struct backing_dev_info *bdi = wb->bdi;
269 (1 << BDI_async_congested));
270}
271 188
272enum { 189 if (bdi->congested_fn)
273 BLK_RW_ASYNC = 0, 190 return bdi->congested_fn(bdi->congested_data, cong_bits);
274 BLK_RW_SYNC = 1, 191 return wb->congested->state & cong_bits;
275}; 192}
276 193
277void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
278void set_bdi_congested(struct backing_dev_info *bdi, int sync);
279long congestion_wait(int sync, long timeout); 194long congestion_wait(int sync, long timeout);
280long wait_iff_congested(struct zone *zone, int sync, long timeout); 195long wait_iff_congested(struct zone *zone, int sync, long timeout);
281int pdflush_proc_obsolete(struct ctl_table *table, int write, 196int pdflush_proc_obsolete(struct ctl_table *table, int write,
@@ -319,4 +234,333 @@ static inline int bdi_sched_wait(void *word)
319 return 0; 234 return 0;
320} 235}
321 236
322#endif /* _LINUX_BACKING_DEV_H */ 237#ifdef CONFIG_CGROUP_WRITEBACK
238
239struct bdi_writeback_congested *
240wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
241void wb_congested_put(struct bdi_writeback_congested *congested);
242struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
243 struct cgroup_subsys_state *memcg_css,
244 gfp_t gfp);
245void wb_memcg_offline(struct mem_cgroup *memcg);
246void wb_blkcg_offline(struct blkcg *blkcg);
247int inode_congested(struct inode *inode, int cong_bits);
248
249/**
250 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
251 * @inode: inode of interest
252 *
253 * cgroup writeback requires support from both the bdi and filesystem.
254 * Test whether @inode has both.
255 */
256static inline bool inode_cgwb_enabled(struct inode *inode)
257{
258 struct backing_dev_info *bdi = inode_to_bdi(inode);
259
260 return bdi_cap_account_dirty(bdi) &&
261 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
262 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
263}
264
265/**
266 * wb_find_current - find wb for %current on a bdi
267 * @bdi: bdi of interest
268 *
269 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
270 * Must be called under rcu_read_lock() which protects the returend wb.
271 * NULL if not found.
272 */
273static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
274{
275 struct cgroup_subsys_state *memcg_css;
276 struct bdi_writeback *wb;
277
278 memcg_css = task_css(current, memory_cgrp_id);
279 if (!memcg_css->parent)
280 return &bdi->wb;
281
282 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
283
284 /*
285 * %current's blkcg equals the effective blkcg of its memcg. No
286 * need to use the relatively expensive cgroup_get_e_css().
287 */
288 if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
289 return wb;
290 return NULL;
291}
292
293/**
294 * wb_get_create_current - get or create wb for %current on a bdi
295 * @bdi: bdi of interest
296 * @gfp: allocation mask
297 *
298 * Equivalent to wb_get_create() on %current's memcg. This function is
299 * called from a relatively hot path and optimizes the common cases using
300 * wb_find_current().
301 */
302static inline struct bdi_writeback *
303wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
304{
305 struct bdi_writeback *wb;
306
307 rcu_read_lock();
308 wb = wb_find_current(bdi);
309 if (wb && unlikely(!wb_tryget(wb)))
310 wb = NULL;
311 rcu_read_unlock();
312
313 if (unlikely(!wb)) {
314 struct cgroup_subsys_state *memcg_css;
315
316 memcg_css = task_get_css(current, memory_cgrp_id);
317 wb = wb_get_create(bdi, memcg_css, gfp);
318 css_put(memcg_css);
319 }
320 return wb;
321}
322
323/**
324 * inode_to_wb_is_valid - test whether an inode has a wb associated
325 * @inode: inode of interest
326 *
327 * Returns %true if @inode has a wb associated. May be called without any
328 * locking.
329 */
330static inline bool inode_to_wb_is_valid(struct inode *inode)
331{
332 return inode->i_wb;
333}
334
335/**
336 * inode_to_wb - determine the wb of an inode
337 * @inode: inode of interest
338 *
339 * Returns the wb @inode is currently associated with. The caller must be
340 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
341 * associated wb's list_lock.
342 */
343static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
344{
345#ifdef CONFIG_LOCKDEP
346 WARN_ON_ONCE(debug_locks &&
347 (!lockdep_is_held(&inode->i_lock) &&
348 !lockdep_is_held(&inode->i_mapping->tree_lock) &&
349 !lockdep_is_held(&inode->i_wb->list_lock)));
350#endif
351 return inode->i_wb;
352}
353
354/**
355 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
356 * @inode: target inode
357 * @lockedp: temp bool output param, to be passed to the end function
358 *
359 * The caller wants to access the wb associated with @inode but isn't
360 * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
361 * function determines the wb associated with @inode and ensures that the
362 * association doesn't change until the transaction is finished with
363 * unlocked_inode_to_wb_end().
364 *
365 * The caller must call unlocked_inode_to_wb_end() with *@lockdep
366 * afterwards and can't sleep during transaction. IRQ may or may not be
367 * disabled on return.
368 */
369static inline struct bdi_writeback *
370unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
371{
372 rcu_read_lock();
373
374 /*
375 * Paired with store_release in inode_switch_wb_work_fn() and
376 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
377 */
378 *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
379
380 if (unlikely(*lockedp))
381 spin_lock_irq(&inode->i_mapping->tree_lock);
382
383 /*
384 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
385 * inode_to_wb() will bark. Deref directly.
386 */
387 return inode->i_wb;
388}
389
390/**
391 * unlocked_inode_to_wb_end - end inode wb access transaction
392 * @inode: target inode
393 * @locked: *@lockedp from unlocked_inode_to_wb_begin()
394 */
395static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
396{
397 if (unlikely(locked))
398 spin_unlock_irq(&inode->i_mapping->tree_lock);
399
400 rcu_read_unlock();
401}
402
403struct wb_iter {
404 int start_blkcg_id;
405 struct radix_tree_iter tree_iter;
406 void **slot;
407};
408
409static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
410 struct backing_dev_info *bdi)
411{
412 struct radix_tree_iter *titer = &iter->tree_iter;
413
414 WARN_ON_ONCE(!rcu_read_lock_held());
415
416 if (iter->start_blkcg_id >= 0) {
417 iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
418 iter->start_blkcg_id = -1;
419 } else {
420 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
421 }
422
423 if (!iter->slot)
424 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
425 if (iter->slot)
426 return *iter->slot;
427 return NULL;
428}
429
430static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
431 struct backing_dev_info *bdi,
432 int start_blkcg_id)
433{
434 iter->start_blkcg_id = start_blkcg_id;
435
436 if (start_blkcg_id)
437 return __wb_iter_next(iter, bdi);
438 else
439 return &bdi->wb;
440}
441
442/**
443 * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
444 * @wb_cur: cursor struct bdi_writeback pointer
445 * @bdi: bdi to walk wb's of
446 * @iter: pointer to struct wb_iter to be used as iteration buffer
447 * @start_blkcg_id: blkcg ID to start iteration from
448 *
449 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
450 * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
451 * to be used as temp storage during iteration. rcu_read_lock() must be
452 * held throughout iteration.
453 */
454#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
455 for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
456 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
457
458#else /* CONFIG_CGROUP_WRITEBACK */
459
460static inline bool inode_cgwb_enabled(struct inode *inode)
461{
462 return false;
463}
464
465static inline struct bdi_writeback_congested *
466wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
467{
468 return bdi->wb.congested;
469}
470
471static inline void wb_congested_put(struct bdi_writeback_congested *congested)
472{
473}
474
475static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
476{
477 return &bdi->wb;
478}
479
480static inline struct bdi_writeback *
481wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
482{
483 return &bdi->wb;
484}
485
486static inline bool inode_to_wb_is_valid(struct inode *inode)
487{
488 return true;
489}
490
491static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
492{
493 return &inode_to_bdi(inode)->wb;
494}
495
496static inline struct bdi_writeback *
497unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
498{
499 return inode_to_wb(inode);
500}
501
502static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
503{
504}
505
506static inline void wb_memcg_offline(struct mem_cgroup *memcg)
507{
508}
509
510static inline void wb_blkcg_offline(struct blkcg *blkcg)
511{
512}
513
514struct wb_iter {
515 int next_id;
516};
517
518#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
519 for ((iter)->next_id = (start_blkcg_id); \
520 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
521
522static inline int inode_congested(struct inode *inode, int cong_bits)
523{
524 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
525}
526
527#endif /* CONFIG_CGROUP_WRITEBACK */
528
529static inline int inode_read_congested(struct inode *inode)
530{
531 return inode_congested(inode, 1 << WB_sync_congested);
532}
533
534static inline int inode_write_congested(struct inode *inode)
535{
536 return inode_congested(inode, 1 << WB_async_congested);
537}
538
539static inline int inode_rw_congested(struct inode *inode)
540{
541 return inode_congested(inode, (1 << WB_sync_congested) |
542 (1 << WB_async_congested));
543}
544
545static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
546{
547 return wb_congested(&bdi->wb, cong_bits);
548}
549
550static inline int bdi_read_congested(struct backing_dev_info *bdi)
551{
552 return bdi_congested(bdi, 1 << WB_sync_congested);
553}
554
555static inline int bdi_write_congested(struct backing_dev_info *bdi)
556{
557 return bdi_congested(bdi, 1 << WB_async_congested);
558}
559
560static inline int bdi_rw_congested(struct backing_dev_info *bdi)
561{
562 return bdi_congested(bdi, (1 << WB_sync_congested) |
563 (1 << WB_async_congested));
564}
565
566#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index adb14a8616df..1e7a69adbe6f 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -117,12 +117,16 @@ struct backlight_device {
117 int use_count; 117 int use_count;
118}; 118};
119 119
120static inline void backlight_update_status(struct backlight_device *bd) 120static inline int backlight_update_status(struct backlight_device *bd)
121{ 121{
122 int ret = -ENOENT;
123
122 mutex_lock(&bd->update_lock); 124 mutex_lock(&bd->update_lock);
123 if (bd->ops && bd->ops->update_status) 125 if (bd->ops && bd->ops->update_status)
124 bd->ops->update_status(bd); 126 ret = bd->ops->update_status(bd);
125 mutex_unlock(&bd->update_lock); 127 mutex_unlock(&bd->update_lock);
128
129 return ret;
126} 130}
127 131
128extern struct backlight_device *backlight_device_register(const char *name, 132extern struct backlight_device *backlight_device_register(const char *name,
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 0e97856b2cff..14eea946e640 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -74,5 +74,6 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
74#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */ 74#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
75#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */ 75#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
76#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) 76#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
77#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
77 78
78#endif /* __BASIC_MMIO_GPIO_H */ 79#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index e34f906647d3..2ff4a9961e1d 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -305,6 +305,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner);
305 305
306extern void bcma_driver_unregister(struct bcma_driver *drv); 306extern void bcma_driver_unregister(struct bcma_driver *drv);
307 307
308/* module_bcma_driver() - Helper macro for drivers that don't do
309 * anything special in module init/exit. This eliminates a lot of
310 * boilerplate. Each module may only use this macro once, and
311 * calling it replaces module_init() and module_exit()
312 */
313#define module_bcma_driver(__bcma_driver) \
314 module_driver(__bcma_driver, bcma_driver_register, \
315 bcma_driver_unregister)
316
308/* Set a fallback SPROM. 317/* Set a fallback SPROM.
309 * See kdoc at the function definition for complete documentation. */ 318 * See kdoc at the function definition for complete documentation. */
310extern int bcma_arch_register_fallback_sprom( 319extern int bcma_arch_register_fallback_sprom(
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 5ba6918ca20b..9657f11d48a7 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -246,7 +246,18 @@ static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
246} 246}
247#endif 247#endif
248 248
249#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
249extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); 250extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
250extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); 251extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
252#else
253static inline int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
254{
255 return -ENOTSUPP;
256}
257static inline int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
258{
259 return -ENOTSUPP;
260}
261#endif
251 262
252#endif /* LINUX_BCMA_DRIVER_PCI_H_ */ 263#endif /* LINUX_BCMA_DRIVER_PCI_H_ */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index da3a127c9958..5e963a6d7c14 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -290,7 +290,21 @@ static inline unsigned bio_segments(struct bio *bio)
290 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 290 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
291 * runs 291 * runs
292 */ 292 */
293#define bio_get(bio) atomic_inc(&(bio)->bi_cnt) 293static inline void bio_get(struct bio *bio)
294{
295 bio->bi_flags |= (1 << BIO_REFFED);
296 smp_mb__before_atomic();
297 atomic_inc(&bio->__bi_cnt);
298}
299
300static inline void bio_cnt_set(struct bio *bio, unsigned int count)
301{
302 if (count != 1) {
303 bio->bi_flags |= (1 << BIO_REFFED);
304 smp_mb__before_atomic();
305 }
306 atomic_set(&bio->__bi_cnt, count);
307}
294 308
295enum bip_flags { 309enum bip_flags {
296 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 310 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
@@ -413,7 +427,6 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
413} 427}
414 428
415extern void bio_endio(struct bio *, int); 429extern void bio_endio(struct bio *, int);
416extern void bio_endio_nodec(struct bio *, int);
417struct request_queue; 430struct request_queue;
418extern int bio_phys_segments(struct request_queue *, struct bio *); 431extern int bio_phys_segments(struct request_queue *, struct bio *);
419 432
@@ -469,9 +482,12 @@ extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
469extern unsigned int bvec_nr_vecs(unsigned short idx); 482extern unsigned int bvec_nr_vecs(unsigned short idx);
470 483
471#ifdef CONFIG_BLK_CGROUP 484#ifdef CONFIG_BLK_CGROUP
485int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
472int bio_associate_current(struct bio *bio); 486int bio_associate_current(struct bio *bio);
473void bio_disassociate_task(struct bio *bio); 487void bio_disassociate_task(struct bio *bio);
474#else /* CONFIG_BLK_CGROUP */ 488#else /* CONFIG_BLK_CGROUP */
489static inline int bio_associate_blkcg(struct bio *bio,
490 struct cgroup_subsys_state *blkcg_css) { return 0; }
475static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } 491static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
476static inline void bio_disassociate_task(struct bio *bio) { } 492static inline void bio_disassociate_task(struct bio *bio) { }
477#endif /* CONFIG_BLK_CGROUP */ 493#endif /* CONFIG_BLK_CGROUP */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
new file mode 100644
index 000000000000..58cfab80dd70
--- /dev/null
+++ b/include/linux/blk-cgroup.h
@@ -0,0 +1,655 @@
1#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h>
18#include <linux/seq_file.h>
19#include <linux/radix-tree.h>
20#include <linux/blkdev.h>
21#include <linux/atomic.h>
22
23/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX
25
26#ifdef CONFIG_BLK_CGROUP
27
28enum blkg_rwstat_type {
29 BLKG_RWSTAT_READ,
30 BLKG_RWSTAT_WRITE,
31 BLKG_RWSTAT_SYNC,
32 BLKG_RWSTAT_ASYNC,
33
34 BLKG_RWSTAT_NR,
35 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36};
37
38struct blkcg_gq;
39
40struct blkcg {
41 struct cgroup_subsys_state css;
42 spinlock_t lock;
43
44 struct radix_tree_root blkg_tree;
45 struct blkcg_gq *blkg_hint;
46 struct hlist_head blkg_list;
47
48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
49
50#ifdef CONFIG_CGROUP_WRITEBACK
51 struct list_head cgwb_list;
52#endif
53};
54
55struct blkg_stat {
56 struct u64_stats_sync syncp;
57 uint64_t cnt;
58};
59
60struct blkg_rwstat {
61 struct u64_stats_sync syncp;
62 uint64_t cnt[BLKG_RWSTAT_NR];
63};
64
65/*
66 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
67 * request_queue (q). This is used by blkcg policies which need to track
68 * information per blkcg - q pair.
69 *
70 * There can be multiple active blkcg policies and each has its private
71 * data on each blkg, the size of which is determined by
72 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
73 * together with blkg and invokes pd_init/exit_fn() methods.
74 *
75 * Such private data must embed struct blkg_policy_data (pd) at the
76 * beginning and pd_size can't be smaller than pd.
77 */
78struct blkg_policy_data {
79 /* the blkg and policy id this per-policy data belongs to */
80 struct blkcg_gq *blkg;
81 int plid;
82
83 /* used during policy activation */
84 struct list_head alloc_node;
85};
86
87/*
88 * Policies that need to keep per-blkcg data which is independent
89 * from any request_queue associated to it must specify its size
90 * with the cpd_size field of the blkcg_policy structure and
91 * embed a blkcg_policy_data in it. blkcg core allocates
92 * policy-specific per-blkcg structures lazily the first time
93 * they are actually needed, so it handles them together with
94 * blkgs. cpd_init() is invoked to let each policy handle
95 * per-blkcg data.
96 */
97struct blkcg_policy_data {
98 /* the policy id this per-policy data belongs to */
99 int plid;
100
101 /* used during policy activation */
102 struct list_head alloc_node;
103};
104
105/* association between a blk cgroup and a request queue */
106struct blkcg_gq {
107 /* Pointer to the associated request_queue */
108 struct request_queue *q;
109 struct list_head q_node;
110 struct hlist_node blkcg_node;
111 struct blkcg *blkcg;
112
113 /*
114 * Each blkg gets congested separately and the congestion state is
115 * propagated to the matching bdi_writeback_congested.
116 */
117 struct bdi_writeback_congested *wb_congested;
118
119 /* all non-root blkcg_gq's are guaranteed to have access to parent */
120 struct blkcg_gq *parent;
121
122 /* request allocation list for this blkcg-q pair */
123 struct request_list rl;
124
125 /* reference count */
126 atomic_t refcnt;
127
128 /* is this blkg online? protected by both blkcg and q locks */
129 bool online;
130
131 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
132
133 struct rcu_head rcu_head;
134};
135
136typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
137typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
138typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
139typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
140typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
141typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
142
143struct blkcg_policy {
144 int plid;
145 /* policy specific private data size */
146 size_t pd_size;
147 /* policy specific per-blkcg data size */
148 size_t cpd_size;
149 /* cgroup files for the policy */
150 struct cftype *cftypes;
151
152 /* operations */
153 blkcg_pol_init_cpd_fn *cpd_init_fn;
154 blkcg_pol_init_pd_fn *pd_init_fn;
155 blkcg_pol_online_pd_fn *pd_online_fn;
156 blkcg_pol_offline_pd_fn *pd_offline_fn;
157 blkcg_pol_exit_pd_fn *pd_exit_fn;
158 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
159};
160
161extern struct blkcg blkcg_root;
162extern struct cgroup_subsys_state * const blkcg_root_css;
163
164struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
165struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
166 struct request_queue *q);
167int blkcg_init_queue(struct request_queue *q);
168void blkcg_drain_queue(struct request_queue *q);
169void blkcg_exit_queue(struct request_queue *q);
170
171/* Blkio controller policy registration */
172int blkcg_policy_register(struct blkcg_policy *pol);
173void blkcg_policy_unregister(struct blkcg_policy *pol);
174int blkcg_activate_policy(struct request_queue *q,
175 const struct blkcg_policy *pol);
176void blkcg_deactivate_policy(struct request_queue *q,
177 const struct blkcg_policy *pol);
178
179void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
180 u64 (*prfill)(struct seq_file *,
181 struct blkg_policy_data *, int),
182 const struct blkcg_policy *pol, int data,
183 bool show_total);
184u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
185u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
186 const struct blkg_rwstat *rwstat);
187u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
188u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
189 int off);
190
191u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
192struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
193 int off);
194
195struct blkg_conf_ctx {
196 struct gendisk *disk;
197 struct blkcg_gq *blkg;
198 u64 v;
199};
200
201int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
202 const char *input, struct blkg_conf_ctx *ctx);
203void blkg_conf_finish(struct blkg_conf_ctx *ctx);
204
205
206static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
207{
208 return css ? container_of(css, struct blkcg, css) : NULL;
209}
210
211static inline struct blkcg *task_blkcg(struct task_struct *tsk)
212{
213 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
214}
215
216static inline struct blkcg *bio_blkcg(struct bio *bio)
217{
218 if (bio && bio->bi_css)
219 return css_to_blkcg(bio->bi_css);
220 return task_blkcg(current);
221}
222
223static inline struct cgroup_subsys_state *
224task_get_blkcg_css(struct task_struct *task)
225{
226 return task_get_css(task, blkio_cgrp_id);
227}
228
229/**
230 * blkcg_parent - get the parent of a blkcg
231 * @blkcg: blkcg of interest
232 *
233 * Return the parent blkcg of @blkcg. Can be called anytime.
234 */
235static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
236{
237 return css_to_blkcg(blkcg->css.parent);
238}
239
240/**
241 * blkg_to_pdata - get policy private data
242 * @blkg: blkg of interest
243 * @pol: policy of interest
244 *
245 * Return pointer to private data associated with the @blkg-@pol pair.
246 */
247static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
248 struct blkcg_policy *pol)
249{
250 return blkg ? blkg->pd[pol->plid] : NULL;
251}
252
253static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
254 struct blkcg_policy *pol)
255{
256 return blkcg ? blkcg->pd[pol->plid] : NULL;
257}
258
259/**
260 * pdata_to_blkg - get blkg associated with policy private data
261 * @pd: policy private data of interest
262 *
263 * @pd is policy private data. Determine the blkg it's associated with.
264 */
265static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
266{
267 return pd ? pd->blkg : NULL;
268}
269
270/**
271 * blkg_path - format cgroup path of blkg
272 * @blkg: blkg of interest
273 * @buf: target buffer
274 * @buflen: target buffer length
275 *
276 * Format the path of the cgroup of @blkg into @buf.
277 */
278static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
279{
280 char *p;
281
282 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
283 if (!p) {
284 strncpy(buf, "<unavailable>", buflen);
285 return -ENAMETOOLONG;
286 }
287
288 memmove(buf, p, buf + buflen - p);
289 return 0;
290}
291
292/**
293 * blkg_get - get a blkg reference
294 * @blkg: blkg to get
295 *
296 * The caller should be holding an existing reference.
297 */
298static inline void blkg_get(struct blkcg_gq *blkg)
299{
300 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
301 atomic_inc(&blkg->refcnt);
302}
303
304void __blkg_release_rcu(struct rcu_head *rcu);
305
306/**
307 * blkg_put - put a blkg reference
308 * @blkg: blkg to put
309 */
310static inline void blkg_put(struct blkcg_gq *blkg)
311{
312 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
313 if (atomic_dec_and_test(&blkg->refcnt))
314 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
315}
316
317struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
318 bool update_hint);
319
320/**
321 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
322 * @d_blkg: loop cursor pointing to the current descendant
323 * @pos_css: used for iteration
324 * @p_blkg: target blkg to walk descendants of
325 *
326 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
327 * read locked. If called under either blkcg or queue lock, the iteration
328 * is guaranteed to include all and only online blkgs. The caller may
329 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
330 * @p_blkg is included in the iteration and the first node to be visited.
331 */
332#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
333 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
334 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
335 (p_blkg)->q, false)))
336
337/**
338 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
339 * @d_blkg: loop cursor pointing to the current descendant
340 * @pos_css: used for iteration
341 * @p_blkg: target blkg to walk descendants of
342 *
343 * Similar to blkg_for_each_descendant_pre() but performs post-order
344 * traversal instead. Synchronization rules are the same. @p_blkg is
345 * included in the iteration and the last node to be visited.
346 */
347#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
348 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
349 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
350 (p_blkg)->q, false)))
351
352/**
353 * blk_get_rl - get request_list to use
354 * @q: request_queue of interest
355 * @bio: bio which will be attached to the allocated request (may be %NULL)
356 *
357 * The caller wants to allocate a request from @q to use for @bio. Find
358 * the request_list to use and obtain a reference on it. Should be called
359 * under queue_lock. This function is guaranteed to return non-%NULL
360 * request_list.
361 */
362static inline struct request_list *blk_get_rl(struct request_queue *q,
363 struct bio *bio)
364{
365 struct blkcg *blkcg;
366 struct blkcg_gq *blkg;
367
368 rcu_read_lock();
369
370 blkcg = bio_blkcg(bio);
371
372 /* bypass blkg lookup and use @q->root_rl directly for root */
373 if (blkcg == &blkcg_root)
374 goto root_rl;
375
376 /*
377 * Try to use blkg->rl. blkg lookup may fail under memory pressure
378 * or if either the blkcg or queue is going away. Fall back to
379 * root_rl in such cases.
380 */
381 blkg = blkg_lookup_create(blkcg, q);
382 if (unlikely(IS_ERR(blkg)))
383 goto root_rl;
384
385 blkg_get(blkg);
386 rcu_read_unlock();
387 return &blkg->rl;
388root_rl:
389 rcu_read_unlock();
390 return &q->root_rl;
391}
392
393/**
394 * blk_put_rl - put request_list
395 * @rl: request_list to put
396 *
397 * Put the reference acquired by blk_get_rl(). Should be called under
398 * queue_lock.
399 */
400static inline void blk_put_rl(struct request_list *rl)
401{
402 /* root_rl may not have blkg set */
403 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
404 blkg_put(rl->blkg);
405}
406
407/**
408 * blk_rq_set_rl - associate a request with a request_list
409 * @rq: request of interest
410 * @rl: target request_list
411 *
412 * Associate @rq with @rl so that accounting and freeing can know the
413 * request_list @rq came from.
414 */
415static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
416{
417 rq->rl = rl;
418}
419
420/**
421 * blk_rq_rl - return the request_list a request came from
422 * @rq: request of interest
423 *
424 * Return the request_list @rq is allocated from.
425 */
426static inline struct request_list *blk_rq_rl(struct request *rq)
427{
428 return rq->rl;
429}
430
431struct request_list *__blk_queue_next_rl(struct request_list *rl,
432 struct request_queue *q);
433/**
434 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
435 *
436 * Should be used under queue_lock.
437 */
438#define blk_queue_for_each_rl(rl, q) \
439 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
440
441static inline void blkg_stat_init(struct blkg_stat *stat)
442{
443 u64_stats_init(&stat->syncp);
444}
445
446/**
447 * blkg_stat_add - add a value to a blkg_stat
448 * @stat: target blkg_stat
449 * @val: value to add
450 *
451 * Add @val to @stat. The caller is responsible for synchronizing calls to
452 * this function.
453 */
454static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
455{
456 u64_stats_update_begin(&stat->syncp);
457 stat->cnt += val;
458 u64_stats_update_end(&stat->syncp);
459}
460
461/**
462 * blkg_stat_read - read the current value of a blkg_stat
463 * @stat: blkg_stat to read
464 *
465 * Read the current value of @stat. This function can be called without
466 * synchroniztion and takes care of u64 atomicity.
467 */
468static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
469{
470 unsigned int start;
471 uint64_t v;
472
473 do {
474 start = u64_stats_fetch_begin_irq(&stat->syncp);
475 v = stat->cnt;
476 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
477
478 return v;
479}
480
481/**
482 * blkg_stat_reset - reset a blkg_stat
483 * @stat: blkg_stat to reset
484 */
485static inline void blkg_stat_reset(struct blkg_stat *stat)
486{
487 stat->cnt = 0;
488}
489
490/**
491 * blkg_stat_merge - merge a blkg_stat into another
492 * @to: the destination blkg_stat
493 * @from: the source
494 *
495 * Add @from's count to @to.
496 */
497static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
498{
499 blkg_stat_add(to, blkg_stat_read(from));
500}
501
502static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
503{
504 u64_stats_init(&rwstat->syncp);
505}
506
507/**
508 * blkg_rwstat_add - add a value to a blkg_rwstat
509 * @rwstat: target blkg_rwstat
510 * @rw: mask of REQ_{WRITE|SYNC}
511 * @val: value to add
512 *
513 * Add @val to @rwstat. The counters are chosen according to @rw. The
514 * caller is responsible for synchronizing calls to this function.
515 */
516static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
517 int rw, uint64_t val)
518{
519 u64_stats_update_begin(&rwstat->syncp);
520
521 if (rw & REQ_WRITE)
522 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
523 else
524 rwstat->cnt[BLKG_RWSTAT_READ] += val;
525 if (rw & REQ_SYNC)
526 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
527 else
528 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
529
530 u64_stats_update_end(&rwstat->syncp);
531}
532
533/**
534 * blkg_rwstat_read - read the current values of a blkg_rwstat
535 * @rwstat: blkg_rwstat to read
536 *
537 * Read the current snapshot of @rwstat and return it as the return value.
538 * This function can be called without synchronization and takes care of
539 * u64 atomicity.
540 */
541static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
542{
543 unsigned int start;
544 struct blkg_rwstat tmp;
545
546 do {
547 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
548 tmp = *rwstat;
549 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
550
551 return tmp;
552}
553
554/**
555 * blkg_rwstat_total - read the total count of a blkg_rwstat
556 * @rwstat: blkg_rwstat to read
557 *
558 * Return the total count of @rwstat regardless of the IO direction. This
559 * function can be called without synchronization and takes care of u64
560 * atomicity.
561 */
562static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
563{
564 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
565
566 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
567}
568
569/**
570 * blkg_rwstat_reset - reset a blkg_rwstat
571 * @rwstat: blkg_rwstat to reset
572 */
573static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
574{
575 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
576}
577
578/**
579 * blkg_rwstat_merge - merge a blkg_rwstat into another
580 * @to: the destination blkg_rwstat
581 * @from: the source
582 *
583 * Add @from's counts to @to.
584 */
585static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
586 struct blkg_rwstat *from)
587{
588 struct blkg_rwstat v = blkg_rwstat_read(from);
589 int i;
590
591 u64_stats_update_begin(&to->syncp);
592 for (i = 0; i < BLKG_RWSTAT_NR; i++)
593 to->cnt[i] += v.cnt[i];
594 u64_stats_update_end(&to->syncp);
595}
596
597#else /* CONFIG_BLK_CGROUP */
598
599struct blkcg {
600};
601
602struct blkg_policy_data {
603};
604
605struct blkcg_policy_data {
606};
607
608struct blkcg_gq {
609};
610
611struct blkcg_policy {
612};
613
614#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
615
616static inline struct cgroup_subsys_state *
617task_get_blkcg_css(struct task_struct *task)
618{
619 return NULL;
620}
621
622#ifdef CONFIG_BLOCK
623
624static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
625static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
626static inline void blkcg_drain_queue(struct request_queue *q) { }
627static inline void blkcg_exit_queue(struct request_queue *q) { }
628static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
629static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
630static inline int blkcg_activate_policy(struct request_queue *q,
631 const struct blkcg_policy *pol) { return 0; }
632static inline void blkcg_deactivate_policy(struct request_queue *q,
633 const struct blkcg_policy *pol) { }
634
635static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
636
637static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
638 struct blkcg_policy *pol) { return NULL; }
639static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
640static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
641static inline void blkg_get(struct blkcg_gq *blkg) { }
642static inline void blkg_put(struct blkcg_gq *blkg) { }
643
644static inline struct request_list *blk_get_rl(struct request_queue *q,
645 struct bio *bio) { return &q->root_rl; }
646static inline void blk_put_rl(struct request_list *rl) { }
647static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
648static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
649
650#define blk_queue_for_each_rl(rl, q) \
651 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
652
653#endif /* CONFIG_BLOCK */
654#endif /* CONFIG_BLK_CGROUP */
655#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2056a99b92f8..37d1602c4f7a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -96,6 +96,7 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
96 96
97typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 97typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
98 bool); 98 bool);
99typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
99 100
100struct blk_mq_ops { 101struct blk_mq_ops {
101 /* 102 /*
@@ -182,6 +183,7 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
182struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 183struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
183 gfp_t gfp, bool reserved); 184 gfp_t gfp, bool reserved);
184struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 185struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
186struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
185 187
186enum { 188enum {
187 BLK_MQ_UNIQUE_TAG_BITS = 16, 189 BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -224,6 +226,8 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async);
224void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 226void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
225void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 227void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
226 void *priv); 228 void *priv);
229void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
230 void *priv);
227void blk_mq_freeze_queue(struct request_queue *q); 231void blk_mq_freeze_queue(struct request_queue *q);
228void blk_mq_unfreeze_queue(struct request_queue *q); 232void blk_mq_unfreeze_queue(struct request_queue *q);
229void blk_mq_freeze_queue_start(struct request_queue *q); 233void blk_mq_freeze_queue_start(struct request_queue *q);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index b7299febc4b4..6ab9d12d1f17 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -65,7 +65,7 @@ struct bio {
65 unsigned int bi_seg_front_size; 65 unsigned int bi_seg_front_size;
66 unsigned int bi_seg_back_size; 66 unsigned int bi_seg_back_size;
67 67
68 atomic_t bi_remaining; 68 atomic_t __bi_remaining;
69 69
70 bio_end_io_t *bi_end_io; 70 bio_end_io_t *bi_end_io;
71 71
@@ -92,7 +92,7 @@ struct bio {
92 92
93 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ 93 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
94 94
95 atomic_t bi_cnt; /* pin count */ 95 atomic_t __bi_cnt; /* pin count */
96 96
97 struct bio_vec *bi_io_vec; /* the actual vec list */ 97 struct bio_vec *bi_io_vec; /* the actual vec list */
98 98
@@ -112,16 +112,15 @@ struct bio {
112 * bio flags 112 * bio flags
113 */ 113 */
114#define BIO_UPTODATE 0 /* ok after I/O completion */ 114#define BIO_UPTODATE 0 /* ok after I/O completion */
115#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ 115#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
116#define BIO_EOF 2 /* out-out-bounds error */ 116#define BIO_CLONED 2 /* doesn't own data */
117#define BIO_SEG_VALID 3 /* bi_phys_segments valid */ 117#define BIO_BOUNCED 3 /* bio is a bounce bio */
118#define BIO_CLONED 4 /* doesn't own data */ 118#define BIO_USER_MAPPED 4 /* contains user pages */
119#define BIO_BOUNCED 5 /* bio is a bounce bio */ 119#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
120#define BIO_USER_MAPPED 6 /* contains user pages */ 120#define BIO_QUIET 6 /* Make BIO Quiet */
121#define BIO_EOPNOTSUPP 7 /* not supported */ 121#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */
122#define BIO_NULL_MAPPED 8 /* contains invalid user pages */ 122#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */
123#define BIO_QUIET 9 /* Make BIO Quiet */ 123#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
124#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
125 124
126/* 125/*
127 * Flags starting here get preserved by bio_reset() - this includes 126 * Flags starting here get preserved by bio_reset() - this includes
@@ -193,6 +192,7 @@ enum rq_flag_bits {
193 __REQ_HASHED, /* on IO scheduler merge hash */ 192 __REQ_HASHED, /* on IO scheduler merge hash */
194 __REQ_MQ_INFLIGHT, /* track inflight for MQ */ 193 __REQ_MQ_INFLIGHT, /* track inflight for MQ */
195 __REQ_NO_TIMEOUT, /* requests may never expire */ 194 __REQ_NO_TIMEOUT, /* requests may never expire */
195 __REQ_CLONE, /* cloned bios */
196 __REQ_NR_BITS, /* stops here */ 196 __REQ_NR_BITS, /* stops here */
197}; 197};
198 198
@@ -247,5 +247,6 @@ enum rq_flag_bits {
247#define REQ_HASHED (1ULL << __REQ_HASHED) 247#define REQ_HASHED (1ULL << __REQ_HASHED)
248#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 248#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
249#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) 249#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
250#define REQ_CLONE (1ULL << __REQ_CLONE)
250 251
251#endif /* __LINUX_BLK_TYPES_H */ 252#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5d93a6645e88..7f2f54b4587f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -12,7 +12,7 @@
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/workqueue.h> 13#include <linux/workqueue.h>
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/backing-dev.h> 15#include <linux/backing-dev-defs.h>
16#include <linux/wait.h> 16#include <linux/wait.h>
17#include <linux/mempool.h> 17#include <linux/mempool.h>
18#include <linux/bio.h> 18#include <linux/bio.h>
@@ -22,15 +22,13 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/percpu-refcount.h> 24#include <linux/percpu-refcount.h>
25 25#include <linux/scatterlist.h>
26#include <asm/scatterlist.h>
27 26
28struct module; 27struct module;
29struct scsi_ioctl_command; 28struct scsi_ioctl_command;
30 29
31struct request_queue; 30struct request_queue;
32struct elevator_queue; 31struct elevator_queue;
33struct request_pm_state;
34struct blk_trace; 32struct blk_trace;
35struct request; 33struct request;
36struct sg_io_hdr; 34struct sg_io_hdr;
@@ -75,18 +73,7 @@ struct request_list {
75enum rq_cmd_type_bits { 73enum rq_cmd_type_bits {
76 REQ_TYPE_FS = 1, /* fs request */ 74 REQ_TYPE_FS = 1, /* fs request */
77 REQ_TYPE_BLOCK_PC, /* scsi command */ 75 REQ_TYPE_BLOCK_PC, /* scsi command */
78 REQ_TYPE_SENSE, /* sense request */ 76 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
79 REQ_TYPE_PM_SUSPEND, /* suspend request */
80 REQ_TYPE_PM_RESUME, /* resume request */
81 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
82 REQ_TYPE_SPECIAL, /* driver defined type */
83 /*
84 * for ATA/ATAPI devices. this really doesn't belong here, ide should
85 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
86 * private REQ_LB opcodes to differentiate what type of request this is
87 */
88 REQ_TYPE_ATA_TASKFILE,
89 REQ_TYPE_ATA_PC,
90}; 77};
91 78
92#define BLK_MAX_CDB 16 79#define BLK_MAX_CDB 16
@@ -108,7 +95,7 @@ struct request {
108 struct blk_mq_ctx *mq_ctx; 95 struct blk_mq_ctx *mq_ctx;
109 96
110 u64 cmd_flags; 97 u64 cmd_flags;
111 enum rq_cmd_type_bits cmd_type; 98 unsigned cmd_type;
112 unsigned long atomic_flags; 99 unsigned long atomic_flags;
113 100
114 int cpu; 101 int cpu;
@@ -216,19 +203,6 @@ static inline unsigned short req_get_ioprio(struct request *req)
216 return req->ioprio; 203 return req->ioprio;
217} 204}
218 205
219/*
220 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
221 * requests. Some step values could eventually be made generic.
222 */
223struct request_pm_state
224{
225 /* PM state machine step value, currently driver specific */
226 int pm_step;
227 /* requested PM state value (S1, S2, S3, S4, ...) */
228 u32 pm_state;
229 void* data; /* for driver use */
230};
231
232#include <linux/elevator.h> 206#include <linux/elevator.h>
233 207
234struct blk_queue_ctx; 208struct blk_queue_ctx;
@@ -469,7 +443,7 @@ struct request_queue {
469 struct mutex sysfs_lock; 443 struct mutex sysfs_lock;
470 444
471 int bypass_depth; 445 int bypass_depth;
472 int mq_freeze_depth; 446 atomic_t mq_freeze_depth;
473 447
474#if defined(CONFIG_BLK_DEV_BSG) 448#if defined(CONFIG_BLK_DEV_BSG)
475 bsg_job_fn *bsg_job_fn; 449 bsg_job_fn *bsg_job_fn;
@@ -610,10 +584,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
610 (((rq)->cmd_flags & REQ_STARTED) && \ 584 (((rq)->cmd_flags & REQ_STARTED) && \
611 ((rq)->cmd_type == REQ_TYPE_FS)) 585 ((rq)->cmd_type == REQ_TYPE_FS))
612 586
613#define blk_pm_request(rq) \
614 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
615 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
616
617#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 587#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
618#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 588#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
619/* rq->queuelist of dequeued request must be list_empty() */ 589/* rq->queuelist of dequeued request must be list_empty() */
@@ -804,11 +774,7 @@ extern void blk_add_request_payload(struct request *rq, struct page *page,
804 unsigned int len); 774 unsigned int len);
805extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 775extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
806extern int blk_lld_busy(struct request_queue *q); 776extern int blk_lld_busy(struct request_queue *q);
807extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 777extern void blk_rq_prep_clone(struct request *rq, struct request *rq_src);
808 struct bio_set *bs, gfp_t gfp_mask,
809 int (*bio_ctr)(struct bio *, struct bio *, void *),
810 void *data);
811extern void blk_rq_unprep_clone(struct request *rq);
812extern int blk_insert_cloned_request(struct request_queue *q, 778extern int blk_insert_cloned_request(struct request_queue *q,
813 struct request *rq); 779 struct request *rq);
814extern void blk_delay_queue(struct request_queue *, unsigned long); 780extern void blk_delay_queue(struct request_queue *, unsigned long);
@@ -821,30 +787,12 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
821extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 787extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
822 struct scsi_ioctl_command __user *); 788 struct scsi_ioctl_command __user *);
823 789
824/*
825 * A queue has just exitted congestion. Note this in the global counter of
826 * congested queues, and wake up anyone who was waiting for requests to be
827 * put back.
828 */
829static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
830{
831 clear_bdi_congested(&q->backing_dev_info, sync);
832}
833
834/*
835 * A queue has just entered congestion. Flag that in the queue's VM-visible
836 * state flags and increment the global gounter of congested queues.
837 */
838static inline void blk_set_queue_congested(struct request_queue *q, int sync)
839{
840 set_bdi_congested(&q->backing_dev_info, sync);
841}
842
843extern void blk_start_queue(struct request_queue *q); 790extern void blk_start_queue(struct request_queue *q);
844extern void blk_stop_queue(struct request_queue *q); 791extern void blk_stop_queue(struct request_queue *q);
845extern void blk_sync_queue(struct request_queue *q); 792extern void blk_sync_queue(struct request_queue *q);
846extern void __blk_stop_queue(struct request_queue *q); 793extern void __blk_stop_queue(struct request_queue *q);
847extern void __blk_run_queue(struct request_queue *q); 794extern void __blk_run_queue(struct request_queue *q);
795extern void __blk_run_queue_uncond(struct request_queue *q);
848extern void blk_run_queue(struct request_queue *); 796extern void blk_run_queue(struct request_queue *);
849extern void blk_run_queue_async(struct request_queue *q); 797extern void blk_run_queue_async(struct request_queue *q);
850extern int blk_rq_map_user(struct request_queue *, struct request *, 798extern int blk_rq_map_user(struct request_queue *, struct request *,
@@ -933,7 +881,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
933 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 881 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
934 return q->limits.max_hw_sectors; 882 return q->limits.max_hw_sectors;
935 883
936 if (!q->limits.chunk_sectors) 884 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
937 return blk_queue_get_max_sectors(q, rq->cmd_flags); 885 return blk_queue_get_max_sectors(q, rq->cmd_flags);
938 886
939 return min(blk_max_size_offset(q, blk_rq_pos(rq)), 887 return min(blk_max_size_offset(q, blk_rq_pos(rq)),
@@ -1054,6 +1002,7 @@ bool __must_check blk_get_queue(struct request_queue *);
1054struct request_queue *blk_alloc_queue(gfp_t); 1002struct request_queue *blk_alloc_queue(gfp_t);
1055struct request_queue *blk_alloc_queue_node(gfp_t, int); 1003struct request_queue *blk_alloc_queue_node(gfp_t, int);
1056extern void blk_put_queue(struct request_queue *); 1004extern void blk_put_queue(struct request_queue *);
1005extern void blk_set_queue_dying(struct request_queue *);
1057 1006
1058/* 1007/*
1059 * block layer runtime pm functions 1008 * block layer runtime pm functions
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 0995c2de8162..f589222bfa87 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -357,12 +357,12 @@ extern void *alloc_large_system_hash(const char *tablename,
357/* Only NUMA needs hash distribution. 64bit NUMA architectures have 357/* Only NUMA needs hash distribution. 64bit NUMA architectures have
358 * sufficient vmalloc space. 358 * sufficient vmalloc space.
359 */ 359 */
360#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT) 360#ifdef CONFIG_NUMA
361#define HASHDIST_DEFAULT 1 361#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
362extern int hashdist; /* Distribute hashes across NUMA nodes? */
362#else 363#else
363#define HASHDIST_DEFAULT 0 364#define hashdist (0)
364#endif 365#endif
365extern int hashdist; /* Distribute hashes across NUMA nodes? */
366 366
367 367
368#endif /* _LINUX_BOOTMEM_H */ 368#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 86c12c93e3cf..8fdcb783197d 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,7 +2,6 @@
2#define _LINUX_BH_H 2#define _LINUX_BH_H
3 3
4#include <linux/preempt.h> 4#include <linux/preempt.h>
5#include <linux/preempt_mask.h>
6 5
7#ifdef CONFIG_TRACE_IRQFLAGS 6#ifdef CONFIG_TRACE_IRQFLAGS
8extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); 7extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d5cda067115a..4383476a0d48 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -105,7 +105,8 @@ struct bpf_verifier_ops {
105 */ 105 */
106 bool (*is_valid_access)(int off, int size, enum bpf_access_type type); 106 bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
107 107
108 u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off, 108 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
109 int src_reg, int ctx_off,
109 struct bpf_insn *insn); 110 struct bpf_insn *insn);
110}; 111};
111 112
@@ -123,15 +124,41 @@ struct bpf_prog_aux {
123 const struct bpf_verifier_ops *ops; 124 const struct bpf_verifier_ops *ops;
124 struct bpf_map **used_maps; 125 struct bpf_map **used_maps;
125 struct bpf_prog *prog; 126 struct bpf_prog *prog;
126 struct work_struct work; 127 union {
128 struct work_struct work;
129 struct rcu_head rcu;
130 };
127}; 131};
128 132
133struct bpf_array {
134 struct bpf_map map;
135 u32 elem_size;
136 /* 'ownership' of prog_array is claimed by the first program that
137 * is going to use this map or by the first program which FD is stored
138 * in the map to make sure that all callers and callees have the same
139 * prog_type and JITed flag
140 */
141 enum bpf_prog_type owner_prog_type;
142 bool owner_jited;
143 union {
144 char value[0] __aligned(8);
145 struct bpf_prog *prog[0] __aligned(8);
146 };
147};
148#define MAX_TAIL_CALL_CNT 32
149
150u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
151void bpf_prog_array_map_clear(struct bpf_map *map);
152bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
153const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
154
129#ifdef CONFIG_BPF_SYSCALL 155#ifdef CONFIG_BPF_SYSCALL
130void bpf_register_prog_type(struct bpf_prog_type_list *tl); 156void bpf_register_prog_type(struct bpf_prog_type_list *tl);
131void bpf_register_map_type(struct bpf_map_type_list *tl); 157void bpf_register_map_type(struct bpf_map_type_list *tl);
132 158
133struct bpf_prog *bpf_prog_get(u32 ufd); 159struct bpf_prog *bpf_prog_get(u32 ufd);
134void bpf_prog_put(struct bpf_prog *prog); 160void bpf_prog_put(struct bpf_prog *prog);
161void bpf_prog_put_rcu(struct bpf_prog *prog);
135 162
136struct bpf_map *bpf_map_get(struct fd f); 163struct bpf_map *bpf_map_get(struct fd f);
137void bpf_map_put(struct bpf_map *map); 164void bpf_map_put(struct bpf_map *map);
@@ -160,5 +187,10 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
160 187
161extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 188extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
162extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 189extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
190extern const struct bpf_func_proto bpf_tail_call_proto;
191extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
192extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
193extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
194extern const struct bpf_func_proto bpf_get_current_comm_proto;
163 195
164#endif /* _LINUX_BPF_H */ 196#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 656da2a12ffe..697ca7795bd9 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
1#ifndef _LINUX_BRCMPHY_H 1#ifndef _LINUX_BRCMPHY_H
2#define _LINUX_BRCMPHY_H 2#define _LINUX_BRCMPHY_H
3 3
4#include <linux/phy.h>
5
6/* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used
7 * to configure the switch internal registers via MDIO accesses.
8 */
9#define BRCM_PSEUDO_PHY_ADDR 30
10
4#define PHY_ID_BCM50610 0x0143bd60 11#define PHY_ID_BCM50610 0x0143bd60
5#define PHY_ID_BCM50610M 0x0143bd70 12#define PHY_ID_BCM50610M 0x0143bd70
6#define PHY_ID_BCM5241 0x0143bc30 13#define PHY_ID_BCM5241 0x0143bc30
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b9cb94c3102a..e7da0aa65b2d 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -774,6 +774,31 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
774} 774}
775 775
776/** 776/**
777 * task_get_css - find and get the css for (task, subsys)
778 * @task: the target task
779 * @subsys_id: the target subsystem ID
780 *
781 * Find the css for the (@task, @subsys_id) combination, increment a
782 * reference on and return it. This function is guaranteed to return a
783 * valid css.
784 */
785static inline struct cgroup_subsys_state *
786task_get_css(struct task_struct *task, int subsys_id)
787{
788 struct cgroup_subsys_state *css;
789
790 rcu_read_lock();
791 while (true) {
792 css = task_css(task, subsys_id);
793 if (likely(css_tryget_online(css)))
794 break;
795 cpu_relax();
796 }
797 rcu_read_unlock();
798 return css;
799}
800
801/**
777 * task_css_is_root - test whether a task belongs to the root css 802 * task_css_is_root - test whether a task belongs to the root css
778 * @task: the target task 803 * @task: the target task
779 * @subsys_id: the target subsystem ID 804 * @subsys_id: the target subsystem ID
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 68c16a6bedb3..0df4a51e1a78 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -306,6 +306,20 @@ void devm_clk_put(struct device *dev, struct clk *clk);
306 * @clk: clock source 306 * @clk: clock source
307 * @rate: desired clock rate in Hz 307 * @rate: desired clock rate in Hz
308 * 308 *
309 * This answers the question "if I were to pass @rate to clk_set_rate(),
310 * what clock rate would I end up with?" without changing the hardware
311 * in any way. In other words:
312 *
313 * rate = clk_round_rate(clk, r);
314 *
315 * and:
316 *
317 * clk_set_rate(clk, r);
318 * rate = clk_get_rate(clk);
319 *
320 * are equivalent except the former does not modify the clock hardware
321 * in any way.
322 *
309 * Returns rounded clock rate in Hz, or negative errno. 323 * Returns rounded clock rate in Hz, or negative errno.
310 */ 324 */
311long clk_round_rate(struct clk *clk, unsigned long rate); 325long clk_round_rate(struct clk *clk, unsigned long rate);
@@ -471,19 +485,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
471 clk_unprepare(clk); 485 clk_unprepare(clk);
472} 486}
473 487
474/**
475 * clk_add_alias - add a new clock alias
476 * @alias: name for clock alias
477 * @alias_dev_name: device name
478 * @id: platform specific clock name
479 * @dev: device
480 *
481 * Allows using generic clock names for drivers by adding a new alias.
482 * Assumes clkdev, see clkdev.h for more info.
483 */
484int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
485 struct device *dev);
486
487struct device_node; 488struct device_node;
488struct of_phandle_args; 489struct of_phandle_args;
489 490
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 94bad77eeb4a..a240b18e86fa 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -22,6 +22,7 @@ struct clk_lookup {
22 const char *dev_id; 22 const char *dev_id;
23 const char *con_id; 23 const char *con_id;
24 struct clk *clk; 24 struct clk *clk;
25 struct clk_hw *clk_hw;
25}; 26};
26 27
27#define CLKDEV_INIT(d, n, c) \ 28#define CLKDEV_INIT(d, n, c) \
@@ -37,8 +38,11 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
37void clkdev_add(struct clk_lookup *cl); 38void clkdev_add(struct clk_lookup *cl);
38void clkdev_drop(struct clk_lookup *cl); 39void clkdev_drop(struct clk_lookup *cl);
39 40
41struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
42 const char *dev_fmt, ...);
43
40void clkdev_add_table(struct clk_lookup *, size_t); 44void clkdev_add_table(struct clk_lookup *, size_t);
41int clk_add_alias(const char *, const char *, char *, struct device *); 45int clk_add_alias(const char *, const char *, const char *, struct device *);
42 46
43int clk_register_clkdev(struct clk *, const char *, const char *, ...); 47int clk_register_clkdev(struct clk *, const char *, const char *, ...);
44int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); 48int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 96c280b2c263..597a1e836f22 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -37,12 +37,15 @@ enum clock_event_mode {
37 * reached from DETACHED or SHUTDOWN. 37 * reached from DETACHED or SHUTDOWN.
38 * ONESHOT: Device is programmed to generate event only once. Can be reached 38 * ONESHOT: Device is programmed to generate event only once. Can be reached
39 * from DETACHED or SHUTDOWN. 39 * from DETACHED or SHUTDOWN.
40 * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
41 * stopped.
40 */ 42 */
41enum clock_event_state { 43enum clock_event_state {
42 CLOCK_EVT_STATE_DETACHED, 44 CLOCK_EVT_STATE_DETACHED,
43 CLOCK_EVT_STATE_SHUTDOWN, 45 CLOCK_EVT_STATE_SHUTDOWN,
44 CLOCK_EVT_STATE_PERIODIC, 46 CLOCK_EVT_STATE_PERIODIC,
45 CLOCK_EVT_STATE_ONESHOT, 47 CLOCK_EVT_STATE_ONESHOT,
48 CLOCK_EVT_STATE_ONESHOT_STOPPED,
46}; 49};
47 50
48/* 51/*
@@ -84,12 +87,13 @@ enum clock_event_state {
84 * @mult: nanosecond to cycles multiplier 87 * @mult: nanosecond to cycles multiplier
85 * @shift: nanoseconds to cycles divisor (power of two) 88 * @shift: nanoseconds to cycles divisor (power of two)
86 * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE 89 * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
87 * @state: current state of the device, assigned by the core code 90 * @state_use_accessors:current state of the device, assigned by the core code
88 * @features: features 91 * @features: features
89 * @retries: number of forced programming retries 92 * @retries: number of forced programming retries
90 * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. 93 * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
91 * @set_state_periodic: switch state to periodic, if !set_mode 94 * @set_state_periodic: switch state to periodic, if !set_mode
92 * @set_state_oneshot: switch state to oneshot, if !set_mode 95 * @set_state_oneshot: switch state to oneshot, if !set_mode
96 * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
93 * @set_state_shutdown: switch state to shutdown, if !set_mode 97 * @set_state_shutdown: switch state to shutdown, if !set_mode
94 * @tick_resume: resume clkevt device, if !set_mode 98 * @tick_resume: resume clkevt device, if !set_mode
95 * @broadcast: function to broadcast events 99 * @broadcast: function to broadcast events
@@ -113,7 +117,7 @@ struct clock_event_device {
113 u32 mult; 117 u32 mult;
114 u32 shift; 118 u32 shift;
115 enum clock_event_mode mode; 119 enum clock_event_mode mode;
116 enum clock_event_state state; 120 enum clock_event_state state_use_accessors;
117 unsigned int features; 121 unsigned int features;
118 unsigned long retries; 122 unsigned long retries;
119 123
@@ -121,11 +125,12 @@ struct clock_event_device {
121 * State transition callback(s): Only one of the two groups should be 125 * State transition callback(s): Only one of the two groups should be
122 * defined: 126 * defined:
123 * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. 127 * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
124 * - set_state_{shutdown|periodic|oneshot}(), tick_resume(). 128 * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
125 */ 129 */
126 void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); 130 void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
127 int (*set_state_periodic)(struct clock_event_device *); 131 int (*set_state_periodic)(struct clock_event_device *);
128 int (*set_state_oneshot)(struct clock_event_device *); 132 int (*set_state_oneshot)(struct clock_event_device *);
133 int (*set_state_oneshot_stopped)(struct clock_event_device *);
129 int (*set_state_shutdown)(struct clock_event_device *); 134 int (*set_state_shutdown)(struct clock_event_device *);
130 int (*tick_resume)(struct clock_event_device *); 135 int (*tick_resume)(struct clock_event_device *);
131 136
@@ -144,6 +149,32 @@ struct clock_event_device {
144 struct module *owner; 149 struct module *owner;
145} ____cacheline_aligned; 150} ____cacheline_aligned;
146 151
152/* Helpers to verify state of a clockevent device */
153static inline bool clockevent_state_detached(struct clock_event_device *dev)
154{
155 return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
156}
157
158static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
159{
160 return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
161}
162
163static inline bool clockevent_state_periodic(struct clock_event_device *dev)
164{
165 return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC;
166}
167
168static inline bool clockevent_state_oneshot(struct clock_event_device *dev)
169{
170 return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT;
171}
172
173static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev)
174{
175 return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
176}
177
147/* 178/*
148 * Calculate a multiplication factor for scaled math, which is used to convert 179 * Calculate a multiplication factor for scaled math, which is used to convert
149 * nanoseconds based values to clock ticks: 180 * nanoseconds based values to clock ticks:
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d27d0152271f..278dd279a7a8 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -181,7 +181,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
181 181
182extern int clocksource_unregister(struct clocksource*); 182extern int clocksource_unregister(struct clocksource*);
183extern void clocksource_touch_watchdog(void); 183extern void clocksource_touch_watchdog(void);
184extern struct clocksource* clocksource_get_next(void);
185extern void clocksource_change_rating(struct clocksource *cs, int rating); 184extern void clocksource_change_rating(struct clocksource *cs, int rating);
186extern void clocksource_suspend(void); 185extern void clocksource_suspend(void);
187extern void clocksource_resume(void); 186extern void clocksource_resume(void);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 867722591be2..05be2352fef8 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -250,7 +250,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
250 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 250 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
251 251
252#define WRITE_ONCE(x, val) \ 252#define WRITE_ONCE(x, val) \
253 ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; }) 253 ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
254
255/**
256 * READ_ONCE_CTRL - Read a value heading a control dependency
257 * @x: The value to be read, heading the control dependency
258 *
259 * Control dependencies are tricky. See Documentation/memory-barriers.txt
260 * for important information on how to use them. Note that in many cases,
261 * use of smp_load_acquire() will be much simpler. Control dependencies
262 * should be avoided except on the hottest of hotpaths.
263 */
264#define READ_ONCE_CTRL(x) \
265({ \
266 typeof(x) __val = READ_ONCE(x); \
267 smp_read_barrier_depends(); /* Enforce control dependency. */ \
268 __val; \
269})
254 270
255#endif /* __KERNEL__ */ 271#endif /* __KERNEL__ */
256 272
@@ -450,7 +466,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
450 * with an explicit memory barrier or atomic instruction that provides the 466 * with an explicit memory barrier or atomic instruction that provides the
451 * required ordering. 467 * required ordering.
452 * 468 *
453 * If possible use READ_ONCE/ASSIGN_ONCE instead. 469 * If possible use READ_ONCE()/WRITE_ONCE() instead.
454 */ 470 */
455#define __ACCESS_ONCE(x) ({ \ 471#define __ACCESS_ONCE(x) ({ \
456 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ 472 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 34025df61829..c9e5c57e4edf 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -71,7 +71,6 @@ static inline char *config_item_name(struct config_item * item)
71 return item->ci_name; 71 return item->ci_name;
72} 72}
73 73
74extern void config_item_init(struct config_item *);
75extern void config_item_init_type_name(struct config_item *item, 74extern void config_item_init_type_name(struct config_item *item,
76 const char *name, 75 const char *name,
77 struct config_item_type *type); 76 struct config_item_type *type);
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 2821838256b4..b96bd299966f 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -14,8 +14,6 @@ extern void context_tracking_enter(enum ctx_state state);
14extern void context_tracking_exit(enum ctx_state state); 14extern void context_tracking_exit(enum ctx_state state);
15extern void context_tracking_user_enter(void); 15extern void context_tracking_user_enter(void);
16extern void context_tracking_user_exit(void); 16extern void context_tracking_user_exit(void);
17extern void __context_tracking_task_switch(struct task_struct *prev,
18 struct task_struct *next);
19 17
20static inline void user_enter(void) 18static inline void user_enter(void)
21{ 19{
@@ -51,19 +49,11 @@ static inline void exception_exit(enum ctx_state prev_ctx)
51 } 49 }
52} 50}
53 51
54static inline void context_tracking_task_switch(struct task_struct *prev,
55 struct task_struct *next)
56{
57 if (context_tracking_is_enabled())
58 __context_tracking_task_switch(prev, next);
59}
60#else 52#else
61static inline void user_enter(void) { } 53static inline void user_enter(void) { }
62static inline void user_exit(void) { } 54static inline void user_exit(void) { }
63static inline enum ctx_state exception_enter(void) { return 0; } 55static inline enum ctx_state exception_enter(void) { return 0; }
64static inline void exception_exit(enum ctx_state prev_ctx) { } 56static inline void exception_exit(enum ctx_state prev_ctx) { }
65static inline void context_tracking_task_switch(struct task_struct *prev,
66 struct task_struct *next) { }
67#endif /* !CONFIG_CONTEXT_TRACKING */ 57#endif /* !CONFIG_CONTEXT_TRACKING */
68 58
69 59
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 6b7b96a32b75..678ecdf90cf6 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -12,6 +12,7 @@ struct context_tracking {
12 * may be further optimized using static keys. 12 * may be further optimized using static keys.
13 */ 13 */
14 bool active; 14 bool active;
15 int recursion;
15 enum ctx_state { 16 enum ctx_state {
16 CONTEXT_KERNEL = 0, 17 CONTEXT_KERNEL = 0,
17 CONTEXT_USER, 18 CONTEXT_USER,
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index bd955270d5aa..c156f5082758 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -28,6 +28,9 @@
28#include <linux/thermal.h> 28#include <linux/thermal.h>
29#include <linux/cpumask.h> 29#include <linux/cpumask.h>
30 30
31typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
32 unsigned long voltage, u32 *power);
33
31#ifdef CONFIG_CPU_THERMAL 34#ifdef CONFIG_CPU_THERMAL
32/** 35/**
33 * cpufreq_cooling_register - function to create cpufreq cooling device. 36 * cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -36,6 +39,10 @@
36struct thermal_cooling_device * 39struct thermal_cooling_device *
37cpufreq_cooling_register(const struct cpumask *clip_cpus); 40cpufreq_cooling_register(const struct cpumask *clip_cpus);
38 41
42struct thermal_cooling_device *
43cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
44 u32 capacitance, get_static_t plat_static_func);
45
39/** 46/**
40 * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. 47 * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
41 * @np: a valid struct device_node to the cooling device device tree node. 48 * @np: a valid struct device_node to the cooling device device tree node.
@@ -45,6 +52,12 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus);
45struct thermal_cooling_device * 52struct thermal_cooling_device *
46of_cpufreq_cooling_register(struct device_node *np, 53of_cpufreq_cooling_register(struct device_node *np,
47 const struct cpumask *clip_cpus); 54 const struct cpumask *clip_cpus);
55
56struct thermal_cooling_device *
57of_cpufreq_power_cooling_register(struct device_node *np,
58 const struct cpumask *clip_cpus,
59 u32 capacitance,
60 get_static_t plat_static_func);
48#else 61#else
49static inline struct thermal_cooling_device * 62static inline struct thermal_cooling_device *
50of_cpufreq_cooling_register(struct device_node *np, 63of_cpufreq_cooling_register(struct device_node *np,
@@ -52,6 +65,15 @@ of_cpufreq_cooling_register(struct device_node *np,
52{ 65{
53 return ERR_PTR(-ENOSYS); 66 return ERR_PTR(-ENOSYS);
54} 67}
68
69static inline struct thermal_cooling_device *
70of_cpufreq_power_cooling_register(struct device_node *np,
71 const struct cpumask *clip_cpus,
72 u32 capacitance,
73 get_static_t plat_static_func)
74{
75 return NULL;
76}
55#endif 77#endif
56 78
57/** 79/**
@@ -68,11 +90,28 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus)
68 return ERR_PTR(-ENOSYS); 90 return ERR_PTR(-ENOSYS);
69} 91}
70static inline struct thermal_cooling_device * 92static inline struct thermal_cooling_device *
93cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
94 u32 capacitance, get_static_t plat_static_func)
95{
96 return NULL;
97}
98
99static inline struct thermal_cooling_device *
71of_cpufreq_cooling_register(struct device_node *np, 100of_cpufreq_cooling_register(struct device_node *np,
72 const struct cpumask *clip_cpus) 101 const struct cpumask *clip_cpus)
73{ 102{
74 return ERR_PTR(-ENOSYS); 103 return ERR_PTR(-ENOSYS);
75} 104}
105
106static inline struct thermal_cooling_device *
107of_cpufreq_power_cooling_register(struct device_node *np,
108 const struct cpumask *clip_cpus,
109 u32 capacitance,
110 get_static_t plat_static_func)
111{
112 return NULL;
113}
114
76static inline 115static inline
77void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 116void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
78{ 117{
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 2ee4888c1f47..29ad97c34fd5 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -65,7 +65,9 @@ struct cpufreq_policy {
65 65
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 67 should set cpufreq */
68 unsigned int cpu; /* cpu nr of CPU managing this policy */ 68 unsigned int cpu; /* cpu managing this policy, must be online */
69 unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
70
69 struct clk *clk; 71 struct clk *clk;
70 struct cpufreq_cpuinfo cpuinfo;/* see above */ 72 struct cpufreq_cpuinfo cpuinfo;/* see above */
71 73
@@ -80,6 +82,7 @@ struct cpufreq_policy {
80 struct cpufreq_governor *governor; /* see below */ 82 struct cpufreq_governor *governor; /* see below */
81 void *governor_data; 83 void *governor_data;
82 bool governor_enabled; /* governor start/stop flag */ 84 bool governor_enabled; /* governor start/stop flag */
85 char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
83 86
84 struct work_struct update; /* if update_policy() needs to be 87 struct work_struct update; /* if update_policy() needs to be
85 * called, but you're in IRQ context */ 88 * called, but you're in IRQ context */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 9c5e89254796..d075d34279df 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -151,10 +151,6 @@ extern void cpuidle_resume(void);
151extern int cpuidle_enable_device(struct cpuidle_device *dev); 151extern int cpuidle_enable_device(struct cpuidle_device *dev);
152extern void cpuidle_disable_device(struct cpuidle_device *dev); 152extern void cpuidle_disable_device(struct cpuidle_device *dev);
153extern int cpuidle_play_dead(void); 153extern int cpuidle_play_dead(void);
154extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
155 struct cpuidle_device *dev);
156extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
157 struct cpuidle_device *dev);
158 154
159extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 155extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
160#else 156#else
@@ -190,16 +186,28 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
190{return -ENODEV; } 186{return -ENODEV; }
191static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 187static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
192static inline int cpuidle_play_dead(void) {return -ENODEV; } 188static inline int cpuidle_play_dead(void) {return -ENODEV; }
189static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
190 struct cpuidle_device *dev) {return NULL; }
191#endif
192
193#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
194extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
195 struct cpuidle_device *dev);
196extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
197 struct cpuidle_device *dev);
198#else
193static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 199static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
194 struct cpuidle_device *dev) 200 struct cpuidle_device *dev)
195{return -ENODEV; } 201{return -ENODEV; }
196static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, 202static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
197 struct cpuidle_device *dev) 203 struct cpuidle_device *dev)
198{return -ENODEV; } 204{return -ENODEV; }
199static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
200 struct cpuidle_device *dev) {return NULL; }
201#endif 205#endif
202 206
207/* kernel/sched/idle.c */
208extern void sched_idle_set_state(struct cpuidle_state *idle_state);
209extern void default_idle_call(void);
210
203#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 211#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
204void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); 212void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
205#else 213#else
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index 84920f3cc83e..a9953c762eee 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Implements the standard CRC ITU-T V.41: 4 * Implements the standard CRC ITU-T V.41:
5 * Width 16 5 * Width 16
6 * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1) 6 * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
7 * Init 0 7 * Init 0
8 * 8 *
9 * This source code is licensed under the GNU General Public License, 9 * This source code is licensed under the GNU General Public License,
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 10df5d2d093a..81ef938b0a8e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -53,6 +53,7 @@
53#define CRYPTO_ALG_TYPE_SHASH 0x00000009 53#define CRYPTO_ALG_TYPE_SHASH 0x00000009
54#define CRYPTO_ALG_TYPE_AHASH 0x0000000a 54#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
55#define CRYPTO_ALG_TYPE_RNG 0x0000000c 55#define CRYPTO_ALG_TYPE_RNG 0x0000000c
56#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
56#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f 57#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
57 58
58#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 59#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
@@ -101,6 +102,12 @@
101#define CRYPTO_ALG_INTERNAL 0x00002000 102#define CRYPTO_ALG_INTERNAL 0x00002000
102 103
103/* 104/*
105 * Temporary flag used to prevent legacy AEAD implementations from
106 * being used by user-space.
107 */
108#define CRYPTO_ALG_AEAD_NEW 0x00004000
109
110/*
104 * Transform masks and values (for crt_flags). 111 * Transform masks and values (for crt_flags).
105 */ 112 */
106#define CRYPTO_TFM_REQ_MASK 0x000fff00 113#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -138,9 +145,9 @@ struct crypto_async_request;
138struct crypto_aead; 145struct crypto_aead;
139struct crypto_blkcipher; 146struct crypto_blkcipher;
140struct crypto_hash; 147struct crypto_hash;
141struct crypto_rng;
142struct crypto_tfm; 148struct crypto_tfm;
143struct crypto_type; 149struct crypto_type;
150struct aead_request;
144struct aead_givcrypt_request; 151struct aead_givcrypt_request;
145struct skcipher_givcrypt_request; 152struct skcipher_givcrypt_request;
146 153
@@ -175,32 +182,6 @@ struct ablkcipher_request {
175 void *__ctx[] CRYPTO_MINALIGN_ATTR; 182 void *__ctx[] CRYPTO_MINALIGN_ATTR;
176}; 183};
177 184
178/**
179 * struct aead_request - AEAD request
180 * @base: Common attributes for async crypto requests
181 * @assoclen: Length in bytes of associated data for authentication
182 * @cryptlen: Length of data to be encrypted or decrypted
183 * @iv: Initialisation vector
184 * @assoc: Associated data
185 * @src: Source data
186 * @dst: Destination data
187 * @__ctx: Start of private context data
188 */
189struct aead_request {
190 struct crypto_async_request base;
191
192 unsigned int assoclen;
193 unsigned int cryptlen;
194
195 u8 *iv;
196
197 struct scatterlist *assoc;
198 struct scatterlist *src;
199 struct scatterlist *dst;
200
201 void *__ctx[] CRYPTO_MINALIGN_ATTR;
202};
203
204struct blkcipher_desc { 185struct blkcipher_desc {
205 struct crypto_blkcipher *tfm; 186 struct crypto_blkcipher *tfm;
206 void *info; 187 void *info;
@@ -294,7 +275,7 @@ struct ablkcipher_alg {
294}; 275};
295 276
296/** 277/**
297 * struct aead_alg - AEAD cipher definition 278 * struct old_aead_alg - AEAD cipher definition
298 * @maxauthsize: Set the maximum authentication tag size supported by the 279 * @maxauthsize: Set the maximum authentication tag size supported by the
299 * transformation. A transformation may support smaller tag sizes. 280 * transformation. A transformation may support smaller tag sizes.
300 * As the authentication tag is a message digest to ensure the 281 * As the authentication tag is a message digest to ensure the
@@ -319,7 +300,7 @@ struct ablkcipher_alg {
319 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are 300 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
320 * mandatory and must be filled. 301 * mandatory and must be filled.
321 */ 302 */
322struct aead_alg { 303struct old_aead_alg {
323 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 304 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
324 unsigned int keylen); 305 unsigned int keylen);
325 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); 306 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
@@ -426,40 +407,12 @@ struct compress_alg {
426 unsigned int slen, u8 *dst, unsigned int *dlen); 407 unsigned int slen, u8 *dst, unsigned int *dlen);
427}; 408};
428 409
429/**
430 * struct rng_alg - random number generator definition
431 * @rng_make_random: The function defined by this variable obtains a random
432 * number. The random number generator transform must generate
433 * the random number out of the context provided with this
434 * call.
435 * @rng_reset: Reset of the random number generator by clearing the entire state.
436 * With the invocation of this function call, the random number
437 * generator shall completely reinitialize its state. If the random
438 * number generator requires a seed for setting up a new state,
439 * the seed must be provided by the consumer while invoking this
440 * function. The required size of the seed is defined with
441 * @seedsize .
442 * @seedsize: The seed size required for a random number generator
443 * initialization defined with this variable. Some random number
444 * generators like the SP800-90A DRBG does not require a seed as the
445 * seeding is implemented internally without the need of support by
446 * the consumer. In this case, the seed size is set to zero.
447 */
448struct rng_alg {
449 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
450 unsigned int dlen);
451 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
452
453 unsigned int seedsize;
454};
455
456 410
457#define cra_ablkcipher cra_u.ablkcipher 411#define cra_ablkcipher cra_u.ablkcipher
458#define cra_aead cra_u.aead 412#define cra_aead cra_u.aead
459#define cra_blkcipher cra_u.blkcipher 413#define cra_blkcipher cra_u.blkcipher
460#define cra_cipher cra_u.cipher 414#define cra_cipher cra_u.cipher
461#define cra_compress cra_u.compress 415#define cra_compress cra_u.compress
462#define cra_rng cra_u.rng
463 416
464/** 417/**
465 * struct crypto_alg - definition of a cryptograpic cipher algorithm 418 * struct crypto_alg - definition of a cryptograpic cipher algorithm
@@ -505,7 +458,7 @@ struct rng_alg {
505 * transformation algorithm. 458 * transformation algorithm.
506 * @cra_type: Type of the cryptographic transformation. This is a pointer to 459 * @cra_type: Type of the cryptographic transformation. This is a pointer to
507 * struct crypto_type, which implements callbacks common for all 460 * struct crypto_type, which implements callbacks common for all
508 * trasnformation types. There are multiple options: 461 * transformation types. There are multiple options:
509 * &crypto_blkcipher_type, &crypto_ablkcipher_type, 462 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
510 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. 463 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
511 * This field might be empty. In that case, there are no common 464 * This field might be empty. In that case, there are no common
@@ -555,11 +508,10 @@ struct crypto_alg {
555 508
556 union { 509 union {
557 struct ablkcipher_alg ablkcipher; 510 struct ablkcipher_alg ablkcipher;
558 struct aead_alg aead; 511 struct old_aead_alg aead;
559 struct blkcipher_alg blkcipher; 512 struct blkcipher_alg blkcipher;
560 struct cipher_alg cipher; 513 struct cipher_alg cipher;
561 struct compress_alg compress; 514 struct compress_alg compress;
562 struct rng_alg rng;
563 } cra_u; 515 } cra_u;
564 516
565 int (*cra_init)(struct crypto_tfm *tfm); 517 int (*cra_init)(struct crypto_tfm *tfm);
@@ -567,7 +519,7 @@ struct crypto_alg {
567 void (*cra_destroy)(struct crypto_alg *alg); 519 void (*cra_destroy)(struct crypto_alg *alg);
568 520
569 struct module *cra_module; 521 struct module *cra_module;
570}; 522} CRYPTO_MINALIGN_ATTR;
571 523
572/* 524/*
573 * Algorithm registration interface. 525 * Algorithm registration interface.
@@ -602,21 +554,6 @@ struct ablkcipher_tfm {
602 unsigned int reqsize; 554 unsigned int reqsize;
603}; 555};
604 556
605struct aead_tfm {
606 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
607 unsigned int keylen);
608 int (*encrypt)(struct aead_request *req);
609 int (*decrypt)(struct aead_request *req);
610 int (*givencrypt)(struct aead_givcrypt_request *req);
611 int (*givdecrypt)(struct aead_givcrypt_request *req);
612
613 struct crypto_aead *base;
614
615 unsigned int ivsize;
616 unsigned int authsize;
617 unsigned int reqsize;
618};
619
620struct blkcipher_tfm { 557struct blkcipher_tfm {
621 void *iv; 558 void *iv;
622 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 559 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
@@ -655,19 +592,11 @@ struct compress_tfm {
655 u8 *dst, unsigned int *dlen); 592 u8 *dst, unsigned int *dlen);
656}; 593};
657 594
658struct rng_tfm {
659 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
660 unsigned int dlen);
661 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
662};
663
664#define crt_ablkcipher crt_u.ablkcipher 595#define crt_ablkcipher crt_u.ablkcipher
665#define crt_aead crt_u.aead
666#define crt_blkcipher crt_u.blkcipher 596#define crt_blkcipher crt_u.blkcipher
667#define crt_cipher crt_u.cipher 597#define crt_cipher crt_u.cipher
668#define crt_hash crt_u.hash 598#define crt_hash crt_u.hash
669#define crt_compress crt_u.compress 599#define crt_compress crt_u.compress
670#define crt_rng crt_u.rng
671 600
672struct crypto_tfm { 601struct crypto_tfm {
673 602
@@ -675,12 +604,10 @@ struct crypto_tfm {
675 604
676 union { 605 union {
677 struct ablkcipher_tfm ablkcipher; 606 struct ablkcipher_tfm ablkcipher;
678 struct aead_tfm aead;
679 struct blkcipher_tfm blkcipher; 607 struct blkcipher_tfm blkcipher;
680 struct cipher_tfm cipher; 608 struct cipher_tfm cipher;
681 struct hash_tfm hash; 609 struct hash_tfm hash;
682 struct compress_tfm compress; 610 struct compress_tfm compress;
683 struct rng_tfm rng;
684 } crt_u; 611 } crt_u;
685 612
686 void (*exit)(struct crypto_tfm *tfm); 613 void (*exit)(struct crypto_tfm *tfm);
@@ -694,10 +621,6 @@ struct crypto_ablkcipher {
694 struct crypto_tfm base; 621 struct crypto_tfm base;
695}; 622};
696 623
697struct crypto_aead {
698 struct crypto_tfm base;
699};
700
701struct crypto_blkcipher { 624struct crypto_blkcipher {
702 struct crypto_tfm base; 625 struct crypto_tfm base;
703}; 626};
@@ -714,10 +637,6 @@ struct crypto_hash {
714 struct crypto_tfm base; 637 struct crypto_tfm base;
715}; 638};
716 639
717struct crypto_rng {
718 struct crypto_tfm base;
719};
720
721enum { 640enum {
722 CRYPTOA_UNSPEC, 641 CRYPTOA_UNSPEC,
723 CRYPTOA_ALG, 642 CRYPTOA_ALG,
@@ -1194,400 +1113,6 @@ static inline void ablkcipher_request_set_crypt(
1194} 1113}
1195 1114
1196/** 1115/**
1197 * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
1198 *
1199 * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
1200 * (listed as type "aead" in /proc/crypto)
1201 *
1202 * The most prominent examples for this type of encryption is GCM and CCM.
1203 * However, the kernel supports other types of AEAD ciphers which are defined
1204 * with the following cipher string:
1205 *
1206 * authenc(keyed message digest, block cipher)
1207 *
1208 * For example: authenc(hmac(sha256), cbc(aes))
1209 *
1210 * The example code provided for the asynchronous block cipher operation
1211 * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
1212 * the *aead* pendants discussed in the following. In addtion, for the AEAD
1213 * operation, the aead_request_set_assoc function must be used to set the
1214 * pointer to the associated data memory location before performing the
1215 * encryption or decryption operation. In case of an encryption, the associated
1216 * data memory is filled during the encryption operation. For decryption, the
1217 * associated data memory must contain data that is used to verify the integrity
1218 * of the decrypted data. Another deviation from the asynchronous block cipher
1219 * operation is that the caller should explicitly check for -EBADMSG of the
1220 * crypto_aead_decrypt. That error indicates an authentication error, i.e.
1221 * a breach in the integrity of the message. In essence, that -EBADMSG error
1222 * code is the key bonus an AEAD cipher has over "standard" block chaining
1223 * modes.
1224 */
1225
1226static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
1227{
1228 return (struct crypto_aead *)tfm;
1229}
1230
1231/**
1232 * crypto_alloc_aead() - allocate AEAD cipher handle
1233 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1234 * AEAD cipher
1235 * @type: specifies the type of the cipher
1236 * @mask: specifies the mask for the cipher
1237 *
1238 * Allocate a cipher handle for an AEAD. The returned struct
1239 * crypto_aead is the cipher handle that is required for any subsequent
1240 * API invocation for that AEAD.
1241 *
1242 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1243 * of an error, PTR_ERR() returns the error code.
1244 */
1245struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
1246
1247static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
1248{
1249 return &tfm->base;
1250}
1251
1252/**
1253 * crypto_free_aead() - zeroize and free aead handle
1254 * @tfm: cipher handle to be freed
1255 */
1256static inline void crypto_free_aead(struct crypto_aead *tfm)
1257{
1258 crypto_free_tfm(crypto_aead_tfm(tfm));
1259}
1260
1261static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
1262{
1263 return &crypto_aead_tfm(tfm)->crt_aead;
1264}
1265
1266/**
1267 * crypto_aead_ivsize() - obtain IV size
1268 * @tfm: cipher handle
1269 *
1270 * The size of the IV for the aead referenced by the cipher handle is
1271 * returned. This IV size may be zero if the cipher does not need an IV.
1272 *
1273 * Return: IV size in bytes
1274 */
1275static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
1276{
1277 return crypto_aead_crt(tfm)->ivsize;
1278}
1279
1280/**
1281 * crypto_aead_authsize() - obtain maximum authentication data size
1282 * @tfm: cipher handle
1283 *
1284 * The maximum size of the authentication data for the AEAD cipher referenced
1285 * by the AEAD cipher handle is returned. The authentication data size may be
1286 * zero if the cipher implements a hard-coded maximum.
1287 *
1288 * The authentication data may also be known as "tag value".
1289 *
1290 * Return: authentication data size / tag size in bytes
1291 */
1292static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
1293{
1294 return crypto_aead_crt(tfm)->authsize;
1295}
1296
1297/**
1298 * crypto_aead_blocksize() - obtain block size of cipher
1299 * @tfm: cipher handle
1300 *
1301 * The block size for the AEAD referenced with the cipher handle is returned.
1302 * The caller may use that information to allocate appropriate memory for the
1303 * data returned by the encryption or decryption operation
1304 *
1305 * Return: block size of cipher
1306 */
1307static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
1308{
1309 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
1310}
1311
1312static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
1313{
1314 return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
1315}
1316
1317static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
1318{
1319 return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
1320}
1321
1322static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
1323{
1324 crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
1325}
1326
1327static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
1328{
1329 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
1330}
1331
1332/**
1333 * crypto_aead_setkey() - set key for cipher
1334 * @tfm: cipher handle
1335 * @key: buffer holding the key
1336 * @keylen: length of the key in bytes
1337 *
1338 * The caller provided key is set for the AEAD referenced by the cipher
1339 * handle.
1340 *
1341 * Note, the key length determines the cipher type. Many block ciphers implement
1342 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1343 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1344 * is performed.
1345 *
1346 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1347 */
1348static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1349 unsigned int keylen)
1350{
1351 struct aead_tfm *crt = crypto_aead_crt(tfm);
1352
1353 return crt->setkey(crt->base, key, keylen);
1354}
1355
1356/**
1357 * crypto_aead_setauthsize() - set authentication data size
1358 * @tfm: cipher handle
1359 * @authsize: size of the authentication data / tag in bytes
1360 *
1361 * Set the authentication data size / tag size. AEAD requires an authentication
1362 * tag (or MAC) in addition to the associated data.
1363 *
1364 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1365 */
1366int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
1367
1368static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
1369{
1370 return __crypto_aead_cast(req->base.tfm);
1371}
1372
1373/**
1374 * crypto_aead_encrypt() - encrypt plaintext
1375 * @req: reference to the aead_request handle that holds all information
1376 * needed to perform the cipher operation
1377 *
1378 * Encrypt plaintext data using the aead_request handle. That data structure
1379 * and how it is filled with data is discussed with the aead_request_*
1380 * functions.
1381 *
1382 * IMPORTANT NOTE The encryption operation creates the authentication data /
1383 * tag. That data is concatenated with the created ciphertext.
1384 * The ciphertext memory size is therefore the given number of
1385 * block cipher blocks + the size defined by the
1386 * crypto_aead_setauthsize invocation. The caller must ensure
1387 * that sufficient memory is available for the ciphertext and
1388 * the authentication tag.
1389 *
1390 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1391 */
1392static inline int crypto_aead_encrypt(struct aead_request *req)
1393{
1394 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
1395}
1396
1397/**
1398 * crypto_aead_decrypt() - decrypt ciphertext
1399 * @req: reference to the ablkcipher_request handle that holds all information
1400 * needed to perform the cipher operation
1401 *
1402 * Decrypt ciphertext data using the aead_request handle. That data structure
1403 * and how it is filled with data is discussed with the aead_request_*
1404 * functions.
1405 *
1406 * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
1407 * authentication data / tag. That authentication data / tag
1408 * must have the size defined by the crypto_aead_setauthsize
1409 * invocation.
1410 *
1411 *
1412 * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
1413 * cipher operation performs the authentication of the data during the
1414 * decryption operation. Therefore, the function returns this error if
1415 * the authentication of the ciphertext was unsuccessful (i.e. the
1416 * integrity of the ciphertext or the associated data was violated);
1417 * < 0 if an error occurred.
1418 */
1419static inline int crypto_aead_decrypt(struct aead_request *req)
1420{
1421 if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
1422 return -EINVAL;
1423
1424 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
1425}
1426
1427/**
1428 * DOC: Asynchronous AEAD Request Handle
1429 *
1430 * The aead_request data structure contains all pointers to data required for
1431 * the AEAD cipher operation. This includes the cipher handle (which can be
1432 * used by multiple aead_request instances), pointer to plaintext and
1433 * ciphertext, asynchronous callback function, etc. It acts as a handle to the
1434 * aead_request_* API calls in a similar way as AEAD handle to the
1435 * crypto_aead_* API calls.
1436 */
1437
1438/**
1439 * crypto_aead_reqsize() - obtain size of the request data structure
1440 * @tfm: cipher handle
1441 *
1442 * Return: number of bytes
1443 */
1444static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
1445{
1446 return crypto_aead_crt(tfm)->reqsize;
1447}
1448
1449/**
1450 * aead_request_set_tfm() - update cipher handle reference in request
1451 * @req: request handle to be modified
1452 * @tfm: cipher handle that shall be added to the request handle
1453 *
1454 * Allow the caller to replace the existing aead handle in the request
1455 * data structure with a different one.
1456 */
1457static inline void aead_request_set_tfm(struct aead_request *req,
1458 struct crypto_aead *tfm)
1459{
1460 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
1461}
1462
1463/**
1464 * aead_request_alloc() - allocate request data structure
1465 * @tfm: cipher handle to be registered with the request
1466 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1467 *
1468 * Allocate the request data structure that must be used with the AEAD
1469 * encrypt and decrypt API calls. During the allocation, the provided aead
1470 * handle is registered in the request data structure.
1471 *
1472 * Return: allocated request handle in case of success; IS_ERR() is true in case
1473 * of an error, PTR_ERR() returns the error code.
1474 */
1475static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
1476 gfp_t gfp)
1477{
1478 struct aead_request *req;
1479
1480 req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
1481
1482 if (likely(req))
1483 aead_request_set_tfm(req, tfm);
1484
1485 return req;
1486}
1487
1488/**
1489 * aead_request_free() - zeroize and free request data structure
1490 * @req: request data structure cipher handle to be freed
1491 */
1492static inline void aead_request_free(struct aead_request *req)
1493{
1494 kzfree(req);
1495}
1496
1497/**
1498 * aead_request_set_callback() - set asynchronous callback function
1499 * @req: request handle
1500 * @flags: specify zero or an ORing of the flags
1501 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1502 * increase the wait queue beyond the initial maximum size;
1503 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1504 * @compl: callback function pointer to be registered with the request handle
1505 * @data: The data pointer refers to memory that is not used by the kernel
1506 * crypto API, but provided to the callback function for it to use. Here,
1507 * the caller can provide a reference to memory the callback function can
1508 * operate on. As the callback function is invoked asynchronously to the
1509 * related functionality, it may need to access data structures of the
1510 * related functionality which can be referenced using this pointer. The
1511 * callback function can access the memory via the "data" field in the
1512 * crypto_async_request data structure provided to the callback function.
1513 *
1514 * Setting the callback function that is triggered once the cipher operation
1515 * completes
1516 *
1517 * The callback function is registered with the aead_request handle and
1518 * must comply with the following template
1519 *
1520 * void callback_function(struct crypto_async_request *req, int error)
1521 */
1522static inline void aead_request_set_callback(struct aead_request *req,
1523 u32 flags,
1524 crypto_completion_t compl,
1525 void *data)
1526{
1527 req->base.complete = compl;
1528 req->base.data = data;
1529 req->base.flags = flags;
1530}
1531
1532/**
1533 * aead_request_set_crypt - set data buffers
1534 * @req: request handle
1535 * @src: source scatter / gather list
1536 * @dst: destination scatter / gather list
1537 * @cryptlen: number of bytes to process from @src
1538 * @iv: IV for the cipher operation which must comply with the IV size defined
1539 * by crypto_aead_ivsize()
1540 *
1541 * Setting the source data and destination data scatter / gather lists.
1542 *
1543 * For encryption, the source is treated as the plaintext and the
1544 * destination is the ciphertext. For a decryption operation, the use is
1545 * reversed - the source is the ciphertext and the destination is the plaintext.
1546 *
1547 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
1548 * the caller must concatenate the ciphertext followed by the
1549 * authentication tag and provide the entire data stream to the
1550 * decryption operation (i.e. the data length used for the
1551 * initialization of the scatterlist and the data length for the
1552 * decryption operation is identical). For encryption, however,
1553 * the authentication tag is created while encrypting the data.
1554 * The destination buffer must hold sufficient space for the
1555 * ciphertext and the authentication tag while the encryption
1556 * invocation must only point to the plaintext data size. The
1557 * following code snippet illustrates the memory usage
1558 * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
1559 * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
1560 * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
1561 */
1562static inline void aead_request_set_crypt(struct aead_request *req,
1563 struct scatterlist *src,
1564 struct scatterlist *dst,
1565 unsigned int cryptlen, u8 *iv)
1566{
1567 req->src = src;
1568 req->dst = dst;
1569 req->cryptlen = cryptlen;
1570 req->iv = iv;
1571}
1572
1573/**
1574 * aead_request_set_assoc() - set the associated data scatter / gather list
1575 * @req: request handle
1576 * @assoc: associated data scatter / gather list
1577 * @assoclen: number of bytes to process from @assoc
1578 *
1579 * For encryption, the memory is filled with the associated data. For
1580 * decryption, the memory must point to the associated data.
1581 */
1582static inline void aead_request_set_assoc(struct aead_request *req,
1583 struct scatterlist *assoc,
1584 unsigned int assoclen)
1585{
1586 req->assoc = assoc;
1587 req->assoclen = assoclen;
1588}
1589
1590/**
1591 * DOC: Synchronous Block Cipher API 1116 * DOC: Synchronous Block Cipher API
1592 * 1117 *
1593 * The synchronous block cipher API is used with the ciphers of type 1118 * The synchronous block cipher API is used with the ciphers of type
diff --git a/include/linux/cryptouser.h b/include/linux/cryptouser.h
deleted file mode 100644
index 4abf2ea6a887..000000000000
--- a/include/linux/cryptouser.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Crypto user configuration API.
3 *
4 * Copyright (C) 2011 secunet Security Networks AG
5 * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21/* Netlink configuration messages. */
22enum {
23 CRYPTO_MSG_BASE = 0x10,
24 CRYPTO_MSG_NEWALG = 0x10,
25 CRYPTO_MSG_DELALG,
26 CRYPTO_MSG_UPDATEALG,
27 CRYPTO_MSG_GETALG,
28 __CRYPTO_MSG_MAX
29};
30#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
31#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE)
32
33#define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME
34
35/* Netlink message attributes. */
36enum crypto_attr_type_t {
37 CRYPTOCFGA_UNSPEC,
38 CRYPTOCFGA_PRIORITY_VAL, /* __u32 */
39 CRYPTOCFGA_REPORT_LARVAL, /* struct crypto_report_larval */
40 CRYPTOCFGA_REPORT_HASH, /* struct crypto_report_hash */
41 CRYPTOCFGA_REPORT_BLKCIPHER, /* struct crypto_report_blkcipher */
42 CRYPTOCFGA_REPORT_AEAD, /* struct crypto_report_aead */
43 CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */
44 CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */
45 CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
46 __CRYPTOCFGA_MAX
47
48#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
49};
50
51struct crypto_user_alg {
52 char cru_name[CRYPTO_MAX_ALG_NAME];
53 char cru_driver_name[CRYPTO_MAX_ALG_NAME];
54 char cru_module_name[CRYPTO_MAX_ALG_NAME];
55 __u32 cru_type;
56 __u32 cru_mask;
57 __u32 cru_refcnt;
58 __u32 cru_flags;
59};
60
61struct crypto_report_larval {
62 char type[CRYPTO_MAX_NAME];
63};
64
65struct crypto_report_hash {
66 char type[CRYPTO_MAX_NAME];
67 unsigned int blocksize;
68 unsigned int digestsize;
69};
70
71struct crypto_report_cipher {
72 char type[CRYPTO_MAX_ALG_NAME];
73 unsigned int blocksize;
74 unsigned int min_keysize;
75 unsigned int max_keysize;
76};
77
78struct crypto_report_blkcipher {
79 char type[CRYPTO_MAX_NAME];
80 char geniv[CRYPTO_MAX_NAME];
81 unsigned int blocksize;
82 unsigned int min_keysize;
83 unsigned int max_keysize;
84 unsigned int ivsize;
85};
86
87struct crypto_report_aead {
88 char type[CRYPTO_MAX_NAME];
89 char geniv[CRYPTO_MAX_NAME];
90 unsigned int blocksize;
91 unsigned int maxauthsize;
92 unsigned int ivsize;
93};
94
95struct crypto_report_comp {
96 char type[CRYPTO_MAX_NAME];
97};
98
99struct crypto_report_rng {
100 char type[CRYPTO_MAX_NAME];
101 unsigned int seedsize;
102};
103
104#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
105 sizeof(struct crypto_report_blkcipher))
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index cb25af461054..420311bcee38 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -45,7 +45,6 @@ extern struct dentry *arch_debugfs_dir;
45 45
46/* declared over in file.c */ 46/* declared over in file.c */
47extern const struct file_operations debugfs_file_operations; 47extern const struct file_operations debugfs_file_operations;
48extern const struct inode_operations debugfs_link_operations;
49 48
50struct dentry *debugfs_create_file(const char *name, umode_t mode, 49struct dentry *debugfs_create_file(const char *name, umode_t mode,
51 struct dentry *parent, void *data, 50 struct dentry *parent, void *data,
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 52456aa566a0..e1043f79122f 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,8 +11,8 @@
11#ifndef LINUX_DMAPOOL_H 11#ifndef LINUX_DMAPOOL_H
12#define LINUX_DMAPOOL_H 12#define LINUX_DMAPOOL_H
13 13
14#include <linux/scatterlist.h>
14#include <asm/io.h> 15#include <asm/io.h>
15#include <asm/scatterlist.h>
16 16
17struct device; 17struct device;
18 18
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 30624954dec5..e9bc9292bd3a 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -185,33 +185,85 @@ static inline int dmar_device_remove(void *handle)
185 185
186struct irte { 186struct irte {
187 union { 187 union {
188 /* Shared between remapped and posted mode*/
188 struct { 189 struct {
189 __u64 present : 1, 190 __u64 present : 1, /* 0 */
190 fpd : 1, 191 fpd : 1, /* 1 */
191 dst_mode : 1, 192 __res0 : 6, /* 2 - 6 */
192 redir_hint : 1, 193 avail : 4, /* 8 - 11 */
193 trigger_mode : 1, 194 __res1 : 3, /* 12 - 14 */
194 dlvry_mode : 3, 195 pst : 1, /* 15 */
195 avail : 4, 196 vector : 8, /* 16 - 23 */
196 __reserved_1 : 4, 197 __res2 : 40; /* 24 - 63 */
197 vector : 8, 198 };
198 __reserved_2 : 8, 199
199 dest_id : 32; 200 /* Remapped mode */
201 struct {
202 __u64 r_present : 1, /* 0 */
203 r_fpd : 1, /* 1 */
204 dst_mode : 1, /* 2 */
205 redir_hint : 1, /* 3 */
206 trigger_mode : 1, /* 4 */
207 dlvry_mode : 3, /* 5 - 7 */
208 r_avail : 4, /* 8 - 11 */
209 r_res0 : 4, /* 12 - 15 */
210 r_vector : 8, /* 16 - 23 */
211 r_res1 : 8, /* 24 - 31 */
212 dest_id : 32; /* 32 - 63 */
213 };
214
215 /* Posted mode */
216 struct {
217 __u64 p_present : 1, /* 0 */
218 p_fpd : 1, /* 1 */
219 p_res0 : 6, /* 2 - 7 */
220 p_avail : 4, /* 8 - 11 */
221 p_res1 : 2, /* 12 - 13 */
222 p_urgent : 1, /* 14 */
223 p_pst : 1, /* 15 */
224 p_vector : 8, /* 16 - 23 */
225 p_res2 : 14, /* 24 - 37 */
226 pda_l : 26; /* 38 - 63 */
200 }; 227 };
201 __u64 low; 228 __u64 low;
202 }; 229 };
203 230
204 union { 231 union {
232 /* Shared between remapped and posted mode*/
205 struct { 233 struct {
206 __u64 sid : 16, 234 __u64 sid : 16, /* 64 - 79 */
207 sq : 2, 235 sq : 2, /* 80 - 81 */
208 svt : 2, 236 svt : 2, /* 82 - 83 */
209 __reserved_3 : 44; 237 __res3 : 44; /* 84 - 127 */
238 };
239
240 /* Posted mode*/
241 struct {
242 __u64 p_sid : 16, /* 64 - 79 */
243 p_sq : 2, /* 80 - 81 */
244 p_svt : 2, /* 82 - 83 */
245 p_res3 : 12, /* 84 - 95 */
246 pda_h : 32; /* 96 - 127 */
210 }; 247 };
211 __u64 high; 248 __u64 high;
212 }; 249 };
213}; 250};
214 251
252static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
253{
254 dst->present = src->present;
255 dst->fpd = src->fpd;
256 dst->avail = src->avail;
257 dst->pst = src->pst;
258 dst->vector = src->vector;
259 dst->sid = src->sid;
260 dst->sq = src->sq;
261 dst->svt = src->svt;
262}
263
264#define PDA_LOW_BIT 26
265#define PDA_HIGH_BIT 32
266
215enum { 267enum {
216 IRQ_REMAP_XAPIC_MODE, 268 IRQ_REMAP_XAPIC_MODE,
217 IRQ_REMAP_X2APIC_MODE, 269 IRQ_REMAP_X2APIC_MODE,
@@ -227,6 +279,7 @@ extern void dmar_msi_read(int irq, struct msi_msg *msg);
227extern void dmar_msi_write(int irq, struct msi_msg *msg); 279extern void dmar_msi_write(int irq, struct msi_msg *msg);
228extern int dmar_set_interrupt(struct intel_iommu *iommu); 280extern int dmar_set_interrupt(struct intel_iommu *iommu);
229extern irqreturn_t dmar_fault(int irq, void *dev_id); 281extern irqreturn_t dmar_fault(int irq, void *dev_id);
230extern int arch_setup_dmar_msi(unsigned int irq); 282extern int dmar_alloc_hwirq(int id, int node, void *arg);
283extern void dmar_free_hwirq(int irq);
231 284
232#endif /* __DMAR_H__ */ 285#endif /* __DMAR_H__ */
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index f820f0a336c9..5055ac34142d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -2,6 +2,7 @@
2#define __DMI_H__ 2#define __DMI_H__
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/kobject.h>
5#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
6 7
7/* enum dmi_field is in mod_devicetable.h */ 8/* enum dmi_field is in mod_devicetable.h */
@@ -74,7 +75,7 @@ struct dmi_header {
74 u8 type; 75 u8 type;
75 u8 length; 76 u8 length;
76 u16 handle; 77 u16 handle;
77}; 78} __packed;
78 79
79struct dmi_device { 80struct dmi_device {
80 struct list_head list; 81 struct list_head list;
@@ -93,6 +94,7 @@ struct dmi_dev_onboard {
93 int devfn; 94 int devfn;
94}; 95};
95 96
97extern struct kobject *dmi_kobj;
96extern int dmi_check_system(const struct dmi_system_id *list); 98extern int dmi_check_system(const struct dmi_system_id *list);
97const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); 99const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
98extern const char * dmi_get_system_info(int field); 100extern const char * dmi_get_system_info(int field);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index af5be0368dec..5f19efe4eb3f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -96,6 +96,8 @@ typedef struct {
96#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ 96#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
97#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ 97#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
98#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ 98#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
99#define EFI_MEMORY_MORE_RELIABLE \
100 ((u64)0x0000000000010000ULL) /* higher reliability */
99#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ 101#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
100#define EFI_MEMORY_DESCRIPTOR_VERSION 1 102#define EFI_MEMORY_DESCRIPTOR_VERSION 1
101 103
@@ -583,6 +585,9 @@ void efi_native_runtime_setup(void);
583#define EFI_FILE_INFO_ID \ 585#define EFI_FILE_INFO_ID \
584 EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) 586 EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
585 587
588#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
589 EFI_GUID( 0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 )
590
586#define EFI_FILE_SYSTEM_GUID \ 591#define EFI_FILE_SYSTEM_GUID \
587 EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) 592 EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
588 593
@@ -823,6 +828,7 @@ extern struct efi {
823 unsigned long fw_vendor; /* fw_vendor */ 828 unsigned long fw_vendor; /* fw_vendor */
824 unsigned long runtime; /* runtime table */ 829 unsigned long runtime; /* runtime table */
825 unsigned long config_table; /* config tables */ 830 unsigned long config_table; /* config tables */
831 unsigned long esrt; /* ESRT table */
826 efi_get_time_t *get_time; 832 efi_get_time_t *get_time;
827 efi_set_time_t *set_time; 833 efi_set_time_t *set_time;
828 efi_get_wakeup_time_t *get_wakeup_time; 834 efi_get_wakeup_time_t *get_wakeup_time;
@@ -864,6 +870,7 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
864extern void efi_late_init(void); 870extern void efi_late_init(void);
865extern void efi_free_boot_services(void); 871extern void efi_free_boot_services(void);
866extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size); 872extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
873extern void efi_find_mirror(void);
867#else 874#else
868static inline void efi_late_init(void) {} 875static inline void efi_late_init(void) {}
869static inline void efi_free_boot_services(void) {} 876static inline void efi_free_boot_services(void) {}
@@ -875,6 +882,11 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
875#endif 882#endif
876extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); 883extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
877extern int efi_config_init(efi_config_table_type_t *arch_tables); 884extern int efi_config_init(efi_config_table_type_t *arch_tables);
885#ifdef CONFIG_EFI_ESRT
886extern void __init efi_esrt_init(void);
887#else
888static inline void efi_esrt_init(void) { }
889#endif
878extern int efi_config_parse_tables(void *config_tables, int count, int sz, 890extern int efi_config_parse_tables(void *config_tables, int count, int sz,
879 efi_config_table_type_t *arch_tables); 891 efi_config_table_type_t *arch_tables);
880extern u64 efi_get_iobase (void); 892extern u64 efi_get_iobase (void);
@@ -882,12 +894,15 @@ extern u32 efi_mem_type (unsigned long phys_addr);
882extern u64 efi_mem_attributes (unsigned long phys_addr); 894extern u64 efi_mem_attributes (unsigned long phys_addr);
883extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); 895extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
884extern int __init efi_uart_console_only (void); 896extern int __init efi_uart_console_only (void);
897extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
898extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
885extern void efi_initialize_iomem_resources(struct resource *code_resource, 899extern void efi_initialize_iomem_resources(struct resource *code_resource,
886 struct resource *data_resource, struct resource *bss_resource); 900 struct resource *data_resource, struct resource *bss_resource);
887extern void efi_get_time(struct timespec *now); 901extern void efi_get_time(struct timespec *now);
888extern void efi_reserve_boot_services(void); 902extern void efi_reserve_boot_services(void);
889extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); 903extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
890extern struct efi_memory_map memmap; 904extern struct efi_memory_map memmap;
905extern struct kobject *efi_kobj;
891 906
892extern int efi_reboot_quirk_mode; 907extern int efi_reboot_quirk_mode;
893extern bool efi_poweroff_required(void); 908extern bool efi_poweroff_required(void);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 45a91474487d..638b324f0291 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -39,6 +39,7 @@ typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct reques
39typedef int (elevator_init_fn) (struct request_queue *, 39typedef int (elevator_init_fn) (struct request_queue *,
40 struct elevator_type *e); 40 struct elevator_type *e);
41typedef void (elevator_exit_fn) (struct elevator_queue *); 41typedef void (elevator_exit_fn) (struct elevator_queue *);
42typedef void (elevator_registered_fn) (struct request_queue *);
42 43
43struct elevator_ops 44struct elevator_ops
44{ 45{
@@ -68,6 +69,7 @@ struct elevator_ops
68 69
69 elevator_init_fn *elevator_init_fn; 70 elevator_init_fn *elevator_init_fn;
70 elevator_exit_fn *elevator_exit_fn; 71 elevator_exit_fn *elevator_exit_fn;
72 elevator_registered_fn *elevator_registered_fn;
71}; 73};
72 74
73#define ELV_NAME_MAX (16) 75#define ELV_NAME_MAX (16)
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 606563ef8a72..9012f8775208 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -110,7 +110,29 @@ static inline bool is_zero_ether_addr(const u8 *addr)
110 */ 110 */
111static inline bool is_multicast_ether_addr(const u8 *addr) 111static inline bool is_multicast_ether_addr(const u8 *addr)
112{ 112{
113 return 0x01 & addr[0]; 113#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
114 u32 a = *(const u32 *)addr;
115#else
116 u16 a = *(const u16 *)addr;
117#endif
118#ifdef __BIG_ENDIAN
119 return 0x01 & (a >> ((sizeof(a) * 8) - 8));
120#else
121 return 0x01 & a;
122#endif
123}
124
125static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
126{
127#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
128#ifdef __BIG_ENDIAN
129 return 0x01 & ((*(const u64 *)addr) >> 56);
130#else
131 return 0x01 & (*(const u64 *)addr);
132#endif
133#else
134 return is_multicast_ether_addr(addr);
135#endif
114} 136}
115 137
116/** 138/**
@@ -169,6 +191,24 @@ static inline bool is_valid_ether_addr(const u8 *addr)
169} 191}
170 192
171/** 193/**
194 * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
195 * @proto: Ethertype/length value to be tested
196 *
197 * Check that the value from the Ethertype/length field is a valid Ethertype.
198 *
199 * Return true if the valid is an 802.3 supported Ethertype.
200 */
201static inline bool eth_proto_is_802_3(__be16 proto)
202{
203#ifndef __BIG_ENDIAN
204 /* if CPU is little endian mask off bits representing LSB */
205 proto &= htons(0xFF00);
206#endif
207 /* cast both to u16 and compare since LSB can be ignored */
208 return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
209}
210
211/**
172 * eth_random_addr - Generate software assigned random Ethernet address 212 * eth_random_addr - Generate software assigned random Ethernet address
173 * @addr: Pointer to a six-byte array containing the Ethernet address 213 * @addr: Pointer to a six-byte array containing the Ethernet address
174 * 214 *
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 591f8c3ef410..920408a21ffd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -50,6 +50,8 @@
50#define MAX_ACTIVE_NODE_LOGS 8 50#define MAX_ACTIVE_NODE_LOGS 8
51#define MAX_ACTIVE_DATA_LOGS 8 51#define MAX_ACTIVE_DATA_LOGS 8
52 52
53#define VERSION_LEN 256
54
53/* 55/*
54 * For superblock 56 * For superblock
55 */ 57 */
@@ -86,6 +88,12 @@ struct f2fs_super_block {
86 __le32 extension_count; /* # of extensions below */ 88 __le32 extension_count; /* # of extensions below */
87 __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ 89 __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
88 __le32 cp_payload; 90 __le32 cp_payload;
91 __u8 version[VERSION_LEN]; /* the kernel version */
92 __u8 init_version[VERSION_LEN]; /* the initial kernel version */
93 __le32 feature; /* defined features */
94 __u8 encryption_level; /* versioning level for encryption */
95 __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
96 __u8 reserved[871]; /* valid reserved region */
89} __packed; 97} __packed;
90 98
91/* 99/*
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fa11b3a367be..17724f6ea983 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -207,6 +207,16 @@ struct bpf_prog_aux;
207 .off = OFF, \ 207 .off = OFF, \
208 .imm = 0 }) 208 .imm = 0 })
209 209
210/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
211
212#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
213 ((struct bpf_insn) { \
214 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
215 .dst_reg = DST, \
216 .src_reg = SRC, \
217 .off = OFF, \
218 .imm = 0 })
219
210/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 220/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
211 221
212#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 222#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
@@ -267,6 +277,14 @@ struct bpf_prog_aux;
267 .off = 0, \ 277 .off = 0, \
268 .imm = 0 }) 278 .imm = 0 })
269 279
280/* Internal classic blocks for direct assignment */
281
282#define __BPF_STMT(CODE, K) \
283 ((struct sock_filter) BPF_STMT(CODE, K))
284
285#define __BPF_JUMP(CODE, K, JT, JF) \
286 ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
287
270#define bytes_to_bpf_size(bytes) \ 288#define bytes_to_bpf_size(bytes) \
271({ \ 289({ \
272 int bpf_size = -EINVAL; \ 290 int bpf_size = -EINVAL; \
@@ -360,12 +378,9 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
360 378
361int sk_filter(struct sock *sk, struct sk_buff *skb); 379int sk_filter(struct sock *sk, struct sk_buff *skb);
362 380
363void bpf_prog_select_runtime(struct bpf_prog *fp); 381int bpf_prog_select_runtime(struct bpf_prog *fp);
364void bpf_prog_free(struct bpf_prog *fp); 382void bpf_prog_free(struct bpf_prog *fp);
365 383
366int bpf_convert_filter(struct sock_filter *prog, int len,
367 struct bpf_insn *new_prog, int *new_len);
368
369struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); 384struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
370struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 385struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
371 gfp_t gfp_extra_flags); 386 gfp_t gfp_extra_flags);
@@ -377,14 +392,17 @@ static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
377 __bpf_prog_free(fp); 392 __bpf_prog_free(fp);
378} 393}
379 394
395typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
396 unsigned int flen);
397
380int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); 398int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
399int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
400 bpf_aux_classic_check_t trans);
381void bpf_prog_destroy(struct bpf_prog *fp); 401void bpf_prog_destroy(struct bpf_prog *fp);
382 402
383int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 403int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
384int sk_attach_bpf(u32 ufd, struct sock *sk); 404int sk_attach_bpf(u32 ufd, struct sock *sk);
385int sk_detach_filter(struct sock *sk); 405int sk_detach_filter(struct sock *sk);
386
387int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
388int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, 406int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
389 unsigned int len); 407 unsigned int len);
390 408
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 8293262401de..e65ef959546c 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -6,16 +6,16 @@
6#include <linux/bitops.h> 6#include <linux/bitops.h>
7 7
8struct frontswap_ops { 8struct frontswap_ops {
9 void (*init)(unsigned); 9 void (*init)(unsigned); /* this swap type was just swapon'ed */
10 int (*store)(unsigned, pgoff_t, struct page *); 10 int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
11 int (*load)(unsigned, pgoff_t, struct page *); 11 int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
12 void (*invalidate_page)(unsigned, pgoff_t); 12 void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
13 void (*invalidate_area)(unsigned); 13 void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
14 struct frontswap_ops *next; /* private pointer to next ops */
14}; 15};
15 16
16extern bool frontswap_enabled; 17extern bool frontswap_enabled;
17extern struct frontswap_ops * 18extern void frontswap_register_ops(struct frontswap_ops *ops);
18 frontswap_register_ops(struct frontswap_ops *ops);
19extern void frontswap_shrink(unsigned long); 19extern void frontswap_shrink(unsigned long);
20extern unsigned long frontswap_curr_pages(void); 20extern unsigned long frontswap_curr_pages(void);
21extern void frontswap_writethrough(bool); 21extern void frontswap_writethrough(bool);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 35ec87e490b1..e351da4a934f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -35,10 +35,10 @@
35#include <uapi/linux/fs.h> 35#include <uapi/linux/fs.h>
36 36
37struct backing_dev_info; 37struct backing_dev_info;
38struct bdi_writeback;
38struct export_operations; 39struct export_operations;
39struct hd_geometry; 40struct hd_geometry;
40struct iovec; 41struct iovec;
41struct nameidata;
42struct kiocb; 42struct kiocb;
43struct kobject; 43struct kobject;
44struct pipe_inode_info; 44struct pipe_inode_info;
@@ -635,6 +635,14 @@ struct inode {
635 635
636 struct hlist_node i_hash; 636 struct hlist_node i_hash;
637 struct list_head i_wb_list; /* backing dev IO list */ 637 struct list_head i_wb_list; /* backing dev IO list */
638#ifdef CONFIG_CGROUP_WRITEBACK
639 struct bdi_writeback *i_wb; /* the associated cgroup wb */
640
641 /* foreign inode detection, see wbc_detach_inode() */
642 int i_wb_frn_winner;
643 u16 i_wb_frn_avg_time;
644 u16 i_wb_frn_history;
645#endif
638 struct list_head i_lru; /* inode LRU list */ 646 struct list_head i_lru; /* inode LRU list */
639 struct list_head i_sb_list; 647 struct list_head i_sb_list;
640 union { 648 union {
@@ -656,6 +664,7 @@ struct inode {
656 struct pipe_inode_info *i_pipe; 664 struct pipe_inode_info *i_pipe;
657 struct block_device *i_bdev; 665 struct block_device *i_bdev;
658 struct cdev *i_cdev; 666 struct cdev *i_cdev;
667 char *i_link;
659 }; 668 };
660 669
661 __u32 i_generation; 670 __u32 i_generation;
@@ -1232,6 +1241,8 @@ struct mm_struct;
1232#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ 1241#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1233#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ 1242#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1234 1243
1244/* sb->s_iflags */
1245#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
1235 1246
1236/* Possible states of 'frozen' field */ 1247/* Possible states of 'frozen' field */
1237enum { 1248enum {
@@ -1270,6 +1281,7 @@ struct super_block {
1270 const struct quotactl_ops *s_qcop; 1281 const struct quotactl_ops *s_qcop;
1271 const struct export_operations *s_export_op; 1282 const struct export_operations *s_export_op;
1272 unsigned long s_flags; 1283 unsigned long s_flags;
1284 unsigned long s_iflags; /* internal SB_I_* flags */
1273 unsigned long s_magic; 1285 unsigned long s_magic;
1274 struct dentry *s_root; 1286 struct dentry *s_root;
1275 struct rw_semaphore s_umount; 1287 struct rw_semaphore s_umount;
@@ -1607,12 +1619,12 @@ struct file_operations {
1607 1619
1608struct inode_operations { 1620struct inode_operations {
1609 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); 1621 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
1610 void * (*follow_link) (struct dentry *, struct nameidata *); 1622 const char * (*follow_link) (struct dentry *, void **);
1611 int (*permission) (struct inode *, int); 1623 int (*permission) (struct inode *, int);
1612 struct posix_acl * (*get_acl)(struct inode *, int); 1624 struct posix_acl * (*get_acl)(struct inode *, int);
1613 1625
1614 int (*readlink) (struct dentry *, char __user *,int); 1626 int (*readlink) (struct dentry *, char __user *,int);
1615 void (*put_link) (struct dentry *, struct nameidata *, void *); 1627 void (*put_link) (struct inode *, void *);
1616 1628
1617 int (*create) (struct inode *,struct dentry *, umode_t, bool); 1629 int (*create) (struct inode *,struct dentry *, umode_t, bool);
1618 int (*link) (struct dentry *,struct inode *,struct dentry *); 1630 int (*link) (struct dentry *,struct inode *,struct dentry *);
@@ -1806,6 +1818,11 @@ struct super_operations {
1806 * 1818 *
1807 * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). 1819 * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
1808 * 1820 *
1821 * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
1822 * synchronize competing switching instances and to tell
1823 * wb stat updates to grab mapping->tree_lock. See
1824 * inode_switch_wb_work_fn() for details.
1825 *
1809 * Q: What is the difference between I_WILL_FREE and I_FREEING? 1826 * Q: What is the difference between I_WILL_FREE and I_FREEING?
1810 */ 1827 */
1811#define I_DIRTY_SYNC (1 << 0) 1828#define I_DIRTY_SYNC (1 << 0)
@@ -1825,6 +1842,7 @@ struct super_operations {
1825#define I_DIRTY_TIME (1 << 11) 1842#define I_DIRTY_TIME (1 << 11)
1826#define __I_DIRTY_TIME_EXPIRED 12 1843#define __I_DIRTY_TIME_EXPIRED 12
1827#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) 1844#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
1845#define I_WB_SWITCH (1 << 13)
1828 1846
1829#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) 1847#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
1830#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) 1848#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
@@ -1879,6 +1897,7 @@ enum file_time_flags {
1879 S_VERSION = 8, 1897 S_VERSION = 8,
1880}; 1898};
1881 1899
1900extern bool atime_needs_update(const struct path *, struct inode *);
1882extern void touch_atime(const struct path *); 1901extern void touch_atime(const struct path *);
1883static inline void file_accessed(struct file *file) 1902static inline void file_accessed(struct file *file)
1884{ 1903{
@@ -2240,7 +2259,13 @@ extern struct super_block *freeze_bdev(struct block_device *);
2240extern void emergency_thaw_all(void); 2259extern void emergency_thaw_all(void);
2241extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); 2260extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
2242extern int fsync_bdev(struct block_device *); 2261extern int fsync_bdev(struct block_device *);
2243extern int sb_is_blkdev_sb(struct super_block *sb); 2262
2263extern struct super_block *blockdev_superblock;
2264
2265static inline bool sb_is_blkdev_sb(struct super_block *sb)
2266{
2267 return sb == blockdev_superblock;
2268}
2244#else 2269#else
2245static inline void bd_forget(struct inode *inode) {} 2270static inline void bd_forget(struct inode *inode) {}
2246static inline int sync_blockdev(struct block_device *bdev) { return 0; } 2271static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -2279,6 +2304,9 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
2279extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, 2304extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
2280 void *holder); 2305 void *holder);
2281extern void blkdev_put(struct block_device *bdev, fmode_t mode); 2306extern void blkdev_put(struct block_device *bdev, fmode_t mode);
2307extern int __blkdev_reread_part(struct block_device *bdev);
2308extern int blkdev_reread_part(struct block_device *bdev);
2309
2282#ifdef CONFIG_SYSFS 2310#ifdef CONFIG_SYSFS
2283extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); 2311extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
2284extern void bd_unlink_disk_holder(struct block_device *bdev, 2312extern void bd_unlink_disk_holder(struct block_device *bdev,
@@ -2704,13 +2732,14 @@ extern const struct file_operations generic_ro_fops;
2704 2732
2705extern int readlink_copy(char __user *, int, const char *); 2733extern int readlink_copy(char __user *, int, const char *);
2706extern int page_readlink(struct dentry *, char __user *, int); 2734extern int page_readlink(struct dentry *, char __user *, int);
2707extern void *page_follow_link_light(struct dentry *, struct nameidata *); 2735extern const char *page_follow_link_light(struct dentry *, void **);
2708extern void page_put_link(struct dentry *, struct nameidata *, void *); 2736extern void page_put_link(struct inode *, void *);
2709extern int __page_symlink(struct inode *inode, const char *symname, int len, 2737extern int __page_symlink(struct inode *inode, const char *symname, int len,
2710 int nofs); 2738 int nofs);
2711extern int page_symlink(struct inode *inode, const char *symname, int len); 2739extern int page_symlink(struct inode *inode, const char *symname, int len);
2712extern const struct inode_operations page_symlink_inode_operations; 2740extern const struct inode_operations page_symlink_inode_operations;
2713extern void kfree_put_link(struct dentry *, struct nameidata *, void *); 2741extern void kfree_put_link(struct inode *, void *);
2742extern void free_page_put_link(struct inode *, void *);
2714extern int generic_readlink(struct dentry *, char __user *, int); 2743extern int generic_readlink(struct dentry *, char __user *, int);
2715extern void generic_fillattr(struct inode *, struct kstat *); 2744extern void generic_fillattr(struct inode *, struct kstat *);
2716int vfs_getattr_nosec(struct path *path, struct kstat *stat); 2745int vfs_getattr_nosec(struct path *path, struct kstat *stat);
@@ -2721,6 +2750,8 @@ void __inode_sub_bytes(struct inode *inode, loff_t bytes);
2721void inode_sub_bytes(struct inode *inode, loff_t bytes); 2750void inode_sub_bytes(struct inode *inode, loff_t bytes);
2722loff_t inode_get_bytes(struct inode *inode); 2751loff_t inode_get_bytes(struct inode *inode);
2723void inode_set_bytes(struct inode *inode, loff_t bytes); 2752void inode_set_bytes(struct inode *inode, loff_t bytes);
2753const char *simple_follow_link(struct dentry *, void **);
2754extern const struct inode_operations simple_symlink_inode_operations;
2724 2755
2725extern int iterate_dir(struct file *, struct dir_context *); 2756extern int iterate_dir(struct file *, struct dir_context *);
2726 2757
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0f313f93c586..65a517dd32f7 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -84,8 +84,6 @@ struct fsnotify_fname;
84 * Each group much define these ops. The fsnotify infrastructure will call 84 * Each group much define these ops. The fsnotify infrastructure will call
85 * these operations for each relevant group. 85 * these operations for each relevant group.
86 * 86 *
87 * should_send_event - given a group, inode, and mask this function determines
88 * if the group is interested in this event.
89 * handle_event - main call for a group to handle an fs event 87 * handle_event - main call for a group to handle an fs event
90 * free_group_priv - called when a group refcnt hits 0 to clean up the private union 88 * free_group_priv - called when a group refcnt hits 0 to clean up the private union
91 * freeing_mark - called when a mark is being destroyed for some reason. The group 89 * freeing_mark - called when a mark is being destroyed for some reason. The group
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 15928f0647e4..6ba7cf23748f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -368,6 +368,11 @@ extern void free_pages(unsigned long addr, unsigned int order);
368extern void free_hot_cold_page(struct page *page, bool cold); 368extern void free_hot_cold_page(struct page *page, bool cold);
369extern void free_hot_cold_page_list(struct list_head *list, bool cold); 369extern void free_hot_cold_page_list(struct list_head *list, bool cold);
370 370
371struct page_frag_cache;
372extern void *__alloc_page_frag(struct page_frag_cache *nc,
373 unsigned int fragsz, gfp_t gfp_mask);
374extern void __free_page_frag(void *addr);
375
371extern void __free_kmem_pages(struct page *page, unsigned int order); 376extern void __free_kmem_pages(struct page *page, unsigned int order);
372extern void free_kmem_pages(unsigned long addr, unsigned int order); 377extern void free_kmem_pages(unsigned long addr, unsigned int order);
373 378
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index ab81339a8590..d12b5d566e4b 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -196,13 +196,6 @@ static inline int gpio_export_link(struct device *dev, const char *name,
196 return -EINVAL; 196 return -EINVAL;
197} 197}
198 198
199static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
200{
201 /* GPIO can never have been requested */
202 WARN_ON(1);
203 return -EINVAL;
204}
205
206static inline void gpio_unexport(unsigned gpio) 199static inline void gpio_unexport(unsigned gpio)
207{ 200{
208 /* GPIO can never have been exported */ 201 /* GPIO can never have been exported */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 3a7c9ffd5ab9..fd098169fe87 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -100,24 +100,25 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
100/* Value get/set from non-sleeping context */ 100/* Value get/set from non-sleeping context */
101int gpiod_get_value(const struct gpio_desc *desc); 101int gpiod_get_value(const struct gpio_desc *desc);
102void gpiod_set_value(struct gpio_desc *desc, int value); 102void gpiod_set_value(struct gpio_desc *desc, int value);
103void gpiod_set_array(unsigned int array_size, 103void gpiod_set_array_value(unsigned int array_size,
104 struct gpio_desc **desc_array, int *value_array); 104 struct gpio_desc **desc_array, int *value_array);
105int gpiod_get_raw_value(const struct gpio_desc *desc); 105int gpiod_get_raw_value(const struct gpio_desc *desc);
106void gpiod_set_raw_value(struct gpio_desc *desc, int value); 106void gpiod_set_raw_value(struct gpio_desc *desc, int value);
107void gpiod_set_raw_array(unsigned int array_size, 107void gpiod_set_raw_array_value(unsigned int array_size,
108 struct gpio_desc **desc_array, int *value_array); 108 struct gpio_desc **desc_array,
109 int *value_array);
109 110
110/* Value get/set from sleeping context */ 111/* Value get/set from sleeping context */
111int gpiod_get_value_cansleep(const struct gpio_desc *desc); 112int gpiod_get_value_cansleep(const struct gpio_desc *desc);
112void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); 113void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
113void gpiod_set_array_cansleep(unsigned int array_size, 114void gpiod_set_array_value_cansleep(unsigned int array_size,
114 struct gpio_desc **desc_array, 115 struct gpio_desc **desc_array,
115 int *value_array); 116 int *value_array);
116int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); 117int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
117void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); 118void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
118void gpiod_set_raw_array_cansleep(unsigned int array_size, 119void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
119 struct gpio_desc **desc_array, 120 struct gpio_desc **desc_array,
120 int *value_array); 121 int *value_array);
121 122
122int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); 123int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
123 124
@@ -304,9 +305,9 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
304 /* GPIO can never have been requested */ 305 /* GPIO can never have been requested */
305 WARN_ON(1); 306 WARN_ON(1);
306} 307}
307static inline void gpiod_set_array(unsigned int array_size, 308static inline void gpiod_set_array_value(unsigned int array_size,
308 struct gpio_desc **desc_array, 309 struct gpio_desc **desc_array,
309 int *value_array) 310 int *value_array)
310{ 311{
311 /* GPIO can never have been requested */ 312 /* GPIO can never have been requested */
312 WARN_ON(1); 313 WARN_ON(1);
@@ -322,9 +323,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
322 /* GPIO can never have been requested */ 323 /* GPIO can never have been requested */
323 WARN_ON(1); 324 WARN_ON(1);
324} 325}
325static inline void gpiod_set_raw_array(unsigned int array_size, 326static inline void gpiod_set_raw_array_value(unsigned int array_size,
326 struct gpio_desc **desc_array, 327 struct gpio_desc **desc_array,
327 int *value_array) 328 int *value_array)
328{ 329{
329 /* GPIO can never have been requested */ 330 /* GPIO can never have been requested */
330 WARN_ON(1); 331 WARN_ON(1);
@@ -341,7 +342,7 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
341 /* GPIO can never have been requested */ 342 /* GPIO can never have been requested */
342 WARN_ON(1); 343 WARN_ON(1);
343} 344}
344static inline void gpiod_set_array_cansleep(unsigned int array_size, 345static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
345 struct gpio_desc **desc_array, 346 struct gpio_desc **desc_array,
346 int *value_array) 347 int *value_array)
347{ 348{
@@ -360,7 +361,7 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
360 /* GPIO can never have been requested */ 361 /* GPIO can never have been requested */
361 WARN_ON(1); 362 WARN_ON(1);
362} 363}
363static inline void gpiod_set_raw_array_cansleep(unsigned int array_size, 364static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
364 struct gpio_desc **desc_array, 365 struct gpio_desc **desc_array,
365 int *value_array) 366 int *value_array)
366{ 367{
@@ -449,7 +450,6 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
449int gpiod_export(struct gpio_desc *desc, bool direction_may_change); 450int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
450int gpiod_export_link(struct device *dev, const char *name, 451int gpiod_export_link(struct device *dev, const char *name,
451 struct gpio_desc *desc); 452 struct gpio_desc *desc);
452int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
453void gpiod_unexport(struct gpio_desc *desc); 453void gpiod_unexport(struct gpio_desc *desc);
454 454
455#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ 455#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
@@ -466,11 +466,6 @@ static inline int gpiod_export_link(struct device *dev, const char *name,
466 return -ENOSYS; 466 return -ENOSYS;
467} 467}
468 468
469static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
470{
471 return -ENOSYS;
472}
473
474static inline void gpiod_unexport(struct gpio_desc *desc) 469static inline void gpiod_unexport(struct gpio_desc *desc)
475{ 470{
476} 471}
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index f1b36593ec9f..cc7ec129b329 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -20,6 +20,7 @@ struct seq_file;
20 * struct gpio_chip - abstract a GPIO controller 20 * struct gpio_chip - abstract a GPIO controller
21 * @label: for diagnostics 21 * @label: for diagnostics
22 * @dev: optional device providing the GPIOs 22 * @dev: optional device providing the GPIOs
23 * @cdev: class device used by sysfs interface (may be NULL)
23 * @owner: helps prevent removal of modules exporting active GPIOs 24 * @owner: helps prevent removal of modules exporting active GPIOs
24 * @list: links gpio_chips together for traversal 25 * @list: links gpio_chips together for traversal
25 * @request: optional hook for chip-specific activation, such as 26 * @request: optional hook for chip-specific activation, such as
@@ -41,8 +42,12 @@ struct seq_file;
41 * @dbg_show: optional routine to show contents in debugfs; default code 42 * @dbg_show: optional routine to show contents in debugfs; default code
42 * will be used when this is omitted, but custom code can show extra 43 * will be used when this is omitted, but custom code can show extra
43 * state (such as pullup/pulldown configuration). 44 * state (such as pullup/pulldown configuration).
44 * @base: identifies the first GPIO number handled by this chip; or, if 45 * @base: identifies the first GPIO number handled by this chip;
45 * negative during registration, requests dynamic ID allocation. 46 * or, if negative during registration, requests dynamic ID allocation.
47 * DEPRECATION: providing anything non-negative and nailing the base
48 * base offset of GPIO chips is deprecated. Please pass -1 as base to
49 * let gpiolib select the chip base in all possible cases. We want to
50 * get rid of the static GPIO number space in the long run.
46 * @ngpio: the number of GPIOs handled by this controller; the last GPIO 51 * @ngpio: the number of GPIOs handled by this controller; the last GPIO
47 * handled is (base + ngpio - 1). 52 * handled is (base + ngpio - 1).
48 * @desc: array of ngpio descriptors. Private. 53 * @desc: array of ngpio descriptors. Private.
@@ -57,7 +62,6 @@ struct seq_file;
57 * implies that if the chip supports IRQs, these IRQs need to be threaded 62 * implies that if the chip supports IRQs, these IRQs need to be threaded
58 * as the chip access may sleep when e.g. reading out the IRQ status 63 * as the chip access may sleep when e.g. reading out the IRQ status
59 * registers. 64 * registers.
60 * @exported: flags if the gpiochip is exported for use from sysfs. Private.
61 * @irq_not_threaded: flag must be set if @can_sleep is set but the 65 * @irq_not_threaded: flag must be set if @can_sleep is set but the
62 * IRQs don't need to be threaded 66 * IRQs don't need to be threaded
63 * 67 *
@@ -74,6 +78,7 @@ struct seq_file;
74struct gpio_chip { 78struct gpio_chip {
75 const char *label; 79 const char *label;
76 struct device *dev; 80 struct device *dev;
81 struct device *cdev;
77 struct module *owner; 82 struct module *owner;
78 struct list_head list; 83 struct list_head list;
79 84
@@ -109,7 +114,6 @@ struct gpio_chip {
109 const char *const *names; 114 const char *const *names;
110 bool can_sleep; 115 bool can_sleep;
111 bool irq_not_threaded; 116 bool irq_not_threaded;
112 bool exported;
113 117
114#ifdef CONFIG_GPIOLIB_IRQCHIP 118#ifdef CONFIG_GPIOLIB_IRQCHIP
115 /* 119 /*
@@ -121,6 +125,7 @@ struct gpio_chip {
121 unsigned int irq_base; 125 unsigned int irq_base;
122 irq_flow_handler_t irq_handler; 126 irq_flow_handler_t irq_handler;
123 unsigned int irq_default_type; 127 unsigned int irq_default_type;
128 int irq_parent;
124#endif 129#endif
125 130
126#if defined(CONFIG_OF_GPIO) 131#if defined(CONFIG_OF_GPIO)
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f4af03404b97..dfd59d6bc6f0 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -1,7 +1,7 @@
1#ifndef LINUX_HARDIRQ_H 1#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H 2#define LINUX_HARDIRQ_H
3 3
4#include <linux/preempt_mask.h> 4#include <linux/preempt.h>
5#include <linux/lockdep.h> 5#include <linux/lockdep.h>
6#include <linux/ftrace_irq.h> 6#include <linux/ftrace_irq.h>
7#include <linux/vtime.h> 7#include <linux/vtime.h>
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 176b43670e5d..f17980de2662 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -815,6 +815,8 @@ void hid_disconnect(struct hid_device *hid);
815const struct hid_device_id *hid_match_id(struct hid_device *hdev, 815const struct hid_device_id *hid_match_id(struct hid_device *hdev,
816 const struct hid_device_id *id); 816 const struct hid_device_id *id);
817s32 hid_snto32(__u32 value, unsigned n); 817s32 hid_snto32(__u32 value, unsigned n);
818__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
819 unsigned offset, unsigned n);
818 820
819/** 821/**
820 * hid_device_io_start - enable HID input during probe, remove 822 * hid_device_io_start - enable HID input during probe, remove
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 9286a46b7d69..6aefcd0031a6 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -65,6 +65,7 @@ static inline void kunmap(struct page *page)
65 65
66static inline void *kmap_atomic(struct page *page) 66static inline void *kmap_atomic(struct page *page)
67{ 67{
68 preempt_disable();
68 pagefault_disable(); 69 pagefault_disable();
69 return page_address(page); 70 return page_address(page);
70} 71}
@@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct page *page)
73static inline void __kunmap_atomic(void *addr) 74static inline void __kunmap_atomic(void *addr)
74{ 75{
75 pagefault_enable(); 76 pagefault_enable();
77 preempt_enable();
76} 78}
77 79
78#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) 80#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 05f6df1fdf5b..76dd4f0da5ca 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -53,34 +53,25 @@ enum hrtimer_restart {
53 * 53 *
54 * 0x00 inactive 54 * 0x00 inactive
55 * 0x01 enqueued into rbtree 55 * 0x01 enqueued into rbtree
56 * 0x02 callback function running
57 * 0x04 timer is migrated to another cpu
58 * 56 *
59 * Special cases: 57 * The callback state is not part of the timer->state because clearing it would
60 * 0x03 callback function running and enqueued 58 * mean touching the timer after the callback, this makes it impossible to free
61 * (was requeued on another CPU) 59 * the timer from the callback function.
62 * 0x05 timer was migrated on CPU hotunplug
63 * 60 *
64 * The "callback function running and enqueued" status is only possible on 61 * Therefore we track the callback state in:
65 * SMP. It happens for example when a posix timer expired and the callback 62 *
63 * timer->base->cpu_base->running == timer
64 *
65 * On SMP it is possible to have a "callback function running and enqueued"
66 * status. It happens for example when a posix timer expired and the callback
66 * queued a signal. Between dropping the lock which protects the posix timer 67 * queued a signal. Between dropping the lock which protects the posix timer
67 * and reacquiring the base lock of the hrtimer, another CPU can deliver the 68 * and reacquiring the base lock of the hrtimer, another CPU can deliver the
68 * signal and rearm the timer. We have to preserve the callback running state, 69 * signal and rearm the timer.
69 * as otherwise the timer could be removed before the softirq code finishes the
70 * the handling of the timer.
71 *
72 * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
73 * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
74 * also affects HRTIMER_STATE_MIGRATE where the preservation is not
75 * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
76 * enqueued on the new cpu.
77 * 70 *
78 * All state transitions are protected by cpu_base->lock. 71 * All state transitions are protected by cpu_base->lock.
79 */ 72 */
80#define HRTIMER_STATE_INACTIVE 0x00 73#define HRTIMER_STATE_INACTIVE 0x00
81#define HRTIMER_STATE_ENQUEUED 0x01 74#define HRTIMER_STATE_ENQUEUED 0x01
82#define HRTIMER_STATE_CALLBACK 0x02
83#define HRTIMER_STATE_MIGRATE 0x04
84 75
85/** 76/**
86 * struct hrtimer - the basic hrtimer structure 77 * struct hrtimer - the basic hrtimer structure
@@ -130,6 +121,12 @@ struct hrtimer_sleeper {
130 struct task_struct *task; 121 struct task_struct *task;
131}; 122};
132 123
124#ifdef CONFIG_64BIT
125# define HRTIMER_CLOCK_BASE_ALIGN 64
126#else
127# define HRTIMER_CLOCK_BASE_ALIGN 32
128#endif
129
133/** 130/**
134 * struct hrtimer_clock_base - the timer base for a specific clock 131 * struct hrtimer_clock_base - the timer base for a specific clock
135 * @cpu_base: per cpu clock base 132 * @cpu_base: per cpu clock base
@@ -137,9 +134,7 @@ struct hrtimer_sleeper {
137 * timer to a base on another cpu. 134 * timer to a base on another cpu.
138 * @clockid: clock id for per_cpu support 135 * @clockid: clock id for per_cpu support
139 * @active: red black tree root node for the active timers 136 * @active: red black tree root node for the active timers
140 * @resolution: the resolution of the clock, in nanoseconds
141 * @get_time: function to retrieve the current time of the clock 137 * @get_time: function to retrieve the current time of the clock
142 * @softirq_time: the time when running the hrtimer queue in the softirq
143 * @offset: offset of this clock to the monotonic base 138 * @offset: offset of this clock to the monotonic base
144 */ 139 */
145struct hrtimer_clock_base { 140struct hrtimer_clock_base {
@@ -147,11 +142,9 @@ struct hrtimer_clock_base {
147 int index; 142 int index;
148 clockid_t clockid; 143 clockid_t clockid;
149 struct timerqueue_head active; 144 struct timerqueue_head active;
150 ktime_t resolution;
151 ktime_t (*get_time)(void); 145 ktime_t (*get_time)(void);
152 ktime_t softirq_time;
153 ktime_t offset; 146 ktime_t offset;
154}; 147} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
155 148
156enum hrtimer_base_type { 149enum hrtimer_base_type {
157 HRTIMER_BASE_MONOTONIC, 150 HRTIMER_BASE_MONOTONIC,
@@ -165,11 +158,16 @@ enum hrtimer_base_type {
165 * struct hrtimer_cpu_base - the per cpu clock bases 158 * struct hrtimer_cpu_base - the per cpu clock bases
166 * @lock: lock protecting the base and associated clock bases 159 * @lock: lock protecting the base and associated clock bases
167 * and timers 160 * and timers
161 * @seq: seqcount around __run_hrtimer
162 * @running: pointer to the currently running hrtimer
168 * @cpu: cpu number 163 * @cpu: cpu number
169 * @active_bases: Bitfield to mark bases with active timers 164 * @active_bases: Bitfield to mark bases with active timers
170 * @clock_was_set: Indicates that clock was set from irq context. 165 * @clock_was_set_seq: Sequence counter of clock was set events
166 * @migration_enabled: The migration of hrtimers to other cpus is enabled
167 * @nohz_active: The nohz functionality is enabled
171 * @expires_next: absolute time of the next event which was scheduled 168 * @expires_next: absolute time of the next event which was scheduled
172 * via clock_set_next_event() 169 * via clock_set_next_event()
170 * @next_timer: Pointer to the first expiring timer
173 * @in_hrtirq: hrtimer_interrupt() is currently executing 171 * @in_hrtirq: hrtimer_interrupt() is currently executing
174 * @hres_active: State of high resolution mode 172 * @hres_active: State of high resolution mode
175 * @hang_detected: The last hrtimer interrupt detected a hang 173 * @hang_detected: The last hrtimer interrupt detected a hang
@@ -178,27 +176,38 @@ enum hrtimer_base_type {
178 * @nr_hangs: Total number of hrtimer interrupt hangs 176 * @nr_hangs: Total number of hrtimer interrupt hangs
179 * @max_hang_time: Maximum time spent in hrtimer_interrupt 177 * @max_hang_time: Maximum time spent in hrtimer_interrupt
180 * @clock_base: array of clock bases for this cpu 178 * @clock_base: array of clock bases for this cpu
179 *
180 * Note: next_timer is just an optimization for __remove_hrtimer().
181 * Do not dereference the pointer because it is not reliable on
182 * cross cpu removals.
181 */ 183 */
182struct hrtimer_cpu_base { 184struct hrtimer_cpu_base {
183 raw_spinlock_t lock; 185 raw_spinlock_t lock;
186 seqcount_t seq;
187 struct hrtimer *running;
184 unsigned int cpu; 188 unsigned int cpu;
185 unsigned int active_bases; 189 unsigned int active_bases;
186 unsigned int clock_was_set; 190 unsigned int clock_was_set_seq;
191 bool migration_enabled;
192 bool nohz_active;
187#ifdef CONFIG_HIGH_RES_TIMERS 193#ifdef CONFIG_HIGH_RES_TIMERS
194 unsigned int in_hrtirq : 1,
195 hres_active : 1,
196 hang_detected : 1;
188 ktime_t expires_next; 197 ktime_t expires_next;
189 int in_hrtirq; 198 struct hrtimer *next_timer;
190 int hres_active; 199 unsigned int nr_events;
191 int hang_detected; 200 unsigned int nr_retries;
192 unsigned long nr_events; 201 unsigned int nr_hangs;
193 unsigned long nr_retries; 202 unsigned int max_hang_time;
194 unsigned long nr_hangs;
195 ktime_t max_hang_time;
196#endif 203#endif
197 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 204 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
198}; 205} ____cacheline_aligned;
199 206
200static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) 207static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
201{ 208{
209 BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
210
202 timer->node.expires = time; 211 timer->node.expires = time;
203 timer->_softexpires = time; 212 timer->_softexpires = time;
204} 213}
@@ -262,19 +271,16 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
262 return ktime_sub(timer->node.expires, timer->base->get_time()); 271 return ktime_sub(timer->node.expires, timer->base->get_time());
263} 272}
264 273
265#ifdef CONFIG_HIGH_RES_TIMERS
266struct clock_event_device;
267
268extern void hrtimer_interrupt(struct clock_event_device *dev);
269
270/*
271 * In high resolution mode the time reference must be read accurate
272 */
273static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) 274static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
274{ 275{
275 return timer->base->get_time(); 276 return timer->base->get_time();
276} 277}
277 278
279#ifdef CONFIG_HIGH_RES_TIMERS
280struct clock_event_device;
281
282extern void hrtimer_interrupt(struct clock_event_device *dev);
283
278static inline int hrtimer_is_hres_active(struct hrtimer *timer) 284static inline int hrtimer_is_hres_active(struct hrtimer *timer)
279{ 285{
280 return timer->base->cpu_base->hres_active; 286 return timer->base->cpu_base->hres_active;
@@ -295,21 +301,16 @@ extern void hrtimer_peek_ahead_timers(void);
295 301
296extern void clock_was_set_delayed(void); 302extern void clock_was_set_delayed(void);
297 303
304extern unsigned int hrtimer_resolution;
305
298#else 306#else
299 307
300# define MONOTONIC_RES_NSEC LOW_RES_NSEC 308# define MONOTONIC_RES_NSEC LOW_RES_NSEC
301# define KTIME_MONOTONIC_RES KTIME_LOW_RES 309# define KTIME_MONOTONIC_RES KTIME_LOW_RES
302 310
303static inline void hrtimer_peek_ahead_timers(void) { } 311#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
304 312
305/* 313static inline void hrtimer_peek_ahead_timers(void) { }
306 * In non high resolution mode the time reference is taken from
307 * the base softirq time variable.
308 */
309static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
310{
311 return timer->base->softirq_time;
312}
313 314
314static inline int hrtimer_is_hres_active(struct hrtimer *timer) 315static inline int hrtimer_is_hres_active(struct hrtimer *timer)
315{ 316{
@@ -353,49 +354,47 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
353#endif 354#endif
354 355
355/* Basic timer operations: */ 356/* Basic timer operations: */
356extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, 357extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
357 const enum hrtimer_mode mode);
358extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
359 unsigned long range_ns, const enum hrtimer_mode mode); 358 unsigned long range_ns, const enum hrtimer_mode mode);
360extern int 359
361__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 360/**
362 unsigned long delta_ns, 361 * hrtimer_start - (re)start an hrtimer on the current CPU
363 const enum hrtimer_mode mode, int wakeup); 362 * @timer: the timer to be added
363 * @tim: expiry time
364 * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
365 * relative (HRTIMER_MODE_REL)
366 */
367static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
368 const enum hrtimer_mode mode)
369{
370 hrtimer_start_range_ns(timer, tim, 0, mode);
371}
364 372
365extern int hrtimer_cancel(struct hrtimer *timer); 373extern int hrtimer_cancel(struct hrtimer *timer);
366extern int hrtimer_try_to_cancel(struct hrtimer *timer); 374extern int hrtimer_try_to_cancel(struct hrtimer *timer);
367 375
368static inline int hrtimer_start_expires(struct hrtimer *timer, 376static inline void hrtimer_start_expires(struct hrtimer *timer,
369 enum hrtimer_mode mode) 377 enum hrtimer_mode mode)
370{ 378{
371 unsigned long delta; 379 unsigned long delta;
372 ktime_t soft, hard; 380 ktime_t soft, hard;
373 soft = hrtimer_get_softexpires(timer); 381 soft = hrtimer_get_softexpires(timer);
374 hard = hrtimer_get_expires(timer); 382 hard = hrtimer_get_expires(timer);
375 delta = ktime_to_ns(ktime_sub(hard, soft)); 383 delta = ktime_to_ns(ktime_sub(hard, soft));
376 return hrtimer_start_range_ns(timer, soft, delta, mode); 384 hrtimer_start_range_ns(timer, soft, delta, mode);
377} 385}
378 386
379static inline int hrtimer_restart(struct hrtimer *timer) 387static inline void hrtimer_restart(struct hrtimer *timer)
380{ 388{
381 return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 389 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
382} 390}
383 391
384/* Query timers: */ 392/* Query timers: */
385extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); 393extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
386extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
387 394
388extern ktime_t hrtimer_get_next_event(void); 395extern u64 hrtimer_get_next_event(void);
389 396
390/* 397extern bool hrtimer_active(const struct hrtimer *timer);
391 * A timer is active, when it is enqueued into the rbtree or the
392 * callback function is running or it's in the state of being migrated
393 * to another cpu.
394 */
395static inline int hrtimer_active(const struct hrtimer *timer)
396{
397 return timer->state != HRTIMER_STATE_INACTIVE;
398}
399 398
400/* 399/*
401 * Helper function to check, whether the timer is on one of the queues 400 * Helper function to check, whether the timer is on one of the queues
@@ -411,14 +410,29 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
411 */ 410 */
412static inline int hrtimer_callback_running(struct hrtimer *timer) 411static inline int hrtimer_callback_running(struct hrtimer *timer)
413{ 412{
414 return timer->state & HRTIMER_STATE_CALLBACK; 413 return timer->base->cpu_base->running == timer;
415} 414}
416 415
417/* Forward a hrtimer so it expires after now: */ 416/* Forward a hrtimer so it expires after now: */
418extern u64 417extern u64
419hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); 418hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
420 419
421/* Forward a hrtimer so it expires after the hrtimer's current now */ 420/**
421 * hrtimer_forward_now - forward the timer expiry so it expires after now
422 * @timer: hrtimer to forward
423 * @interval: the interval to forward
424 *
425 * Forward the timer expiry so it will expire after the current time
426 * of the hrtimer clock base. Returns the number of overruns.
427 *
428 * Can be safely called from the callback function of @timer. If
429 * called from other contexts @timer must neither be enqueued nor
430 * running the callback and the caller needs to take care of
431 * serialization.
432 *
433 * Note: This only updates the timer expiry value and does not requeue
434 * the timer.
435 */
422static inline u64 hrtimer_forward_now(struct hrtimer *timer, 436static inline u64 hrtimer_forward_now(struct hrtimer *timer,
423 ktime_t interval) 437 ktime_t interval)
424{ 438{
@@ -443,7 +457,6 @@ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
443 457
444/* Soft interrupt function to run the hrtimer queues: */ 458/* Soft interrupt function to run the hrtimer queues: */
445extern void hrtimer_run_queues(void); 459extern void hrtimer_run_queues(void);
446extern void hrtimer_run_pending(void);
447 460
448/* Bootup initialization: */ 461/* Bootup initialization: */
449extern void __init hrtimers_init(void); 462extern void __init hrtimers_init(void);
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
index 70a1dbbf2093..d4a527e58434 100644
--- a/include/linux/htirq.h
+++ b/include/linux/htirq.h
@@ -1,24 +1,38 @@
1#ifndef LINUX_HTIRQ_H 1#ifndef LINUX_HTIRQ_H
2#define LINUX_HTIRQ_H 2#define LINUX_HTIRQ_H
3 3
4struct pci_dev;
5struct irq_data;
6
4struct ht_irq_msg { 7struct ht_irq_msg {
5 u32 address_lo; /* low 32 bits of the ht irq message */ 8 u32 address_lo; /* low 32 bits of the ht irq message */
6 u32 address_hi; /* high 32 bits of the it irq message */ 9 u32 address_hi; /* high 32 bits of the it irq message */
7}; 10};
8 11
12typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
13 struct ht_irq_msg *msg);
14
15struct ht_irq_cfg {
16 struct pci_dev *dev;
17 /* Update callback used to cope with buggy hardware */
18 ht_irq_update_t *update;
19 unsigned pos;
20 unsigned idx;
21 struct ht_irq_msg msg;
22};
23
9/* Helper functions.. */ 24/* Helper functions.. */
10void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 25void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
11void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 26void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
12struct irq_data;
13void mask_ht_irq(struct irq_data *data); 27void mask_ht_irq(struct irq_data *data);
14void unmask_ht_irq(struct irq_data *data); 28void unmask_ht_irq(struct irq_data *data);
15 29
16/* The arch hook for getting things started */ 30/* The arch hook for getting things started */
17int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); 31int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
32 ht_irq_update_t *update);
33void arch_teardown_ht_irq(unsigned int irq);
18 34
19/* For drivers of buggy hardware */ 35/* For drivers of buggy hardware */
20typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
21 struct ht_irq_msg *msg);
22int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update); 36int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
23 37
24#endif /* LINUX_HTIRQ_H */ 38#endif /* LINUX_HTIRQ_H */
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 0bc03f100d04..9ad7828d9d34 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -675,6 +675,7 @@ struct twl4030_power_data {
675 struct twl4030_resconfig *board_config; 675 struct twl4030_resconfig *board_config;
676#define TWL4030_RESCONFIG_UNDEF ((u8)-1) 676#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
677 bool use_poweroff; /* Board is wired for TWL poweroff */ 677 bool use_poweroff; /* Board is wired for TWL poweroff */
678 bool ac_charger_quirk; /* Disable AC charger on board */
678}; 679};
679 680
680extern int twl4030_remove_script(u8 flags); 681extern int twl4030_remove_script(u8 flags);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 93b5ca754b5b..a633898f36ac 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -39,6 +39,19 @@
39 39
40struct device; 40struct device;
41 41
42/* IDE-specific values for req->cmd_type */
43enum ata_cmd_type_bits {
44 REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1,
45 REQ_TYPE_ATA_PC,
46 REQ_TYPE_ATA_SENSE, /* sense request */
47 REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */
48 REQ_TYPE_ATA_PM_RESUME, /* resume request */
49};
50
51#define ata_pm_request(rq) \
52 ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \
53 (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME)
54
42/* Error codes returned in rq->errors to the higher part of the driver. */ 55/* Error codes returned in rq->errors to the higher part of the driver. */
43enum { 56enum {
44 IDE_DRV_ERROR_GENERAL = 101, 57 IDE_DRV_ERROR_GENERAL = 101,
@@ -1314,6 +1327,19 @@ struct ide_port_info {
1314 u8 udma_mask; 1327 u8 udma_mask;
1315}; 1328};
1316 1329
1330/*
1331 * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
1332 * requests.
1333 */
1334struct ide_pm_state {
1335 /* PM state machine step value, currently driver specific */
1336 int pm_step;
1337 /* requested PM state value (S1, S2, S3, S4, ...) */
1338 u32 pm_state;
1339 void* data; /* for driver use */
1340};
1341
1342
1317int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *); 1343int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
1318int ide_pci_init_two(struct pci_dev *, struct pci_dev *, 1344int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
1319 const struct ide_port_info *, void *); 1345 const struct ide_port_info *, void *);
@@ -1551,4 +1577,5 @@ static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
1551#define ide_host_for_each_port(i, port, host) \ 1577#define ide_host_for_each_port(i, port, host) \
1552 for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++) 1578 for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
1553 1579
1580
1554#endif /* _IDE_H */ 1581#endif /* _IDE_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 8872ca103d06..1dc1f4ed4001 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -225,15 +225,13 @@ static inline bool ieee802154_is_valid_psdu_len(const u8 len)
225 * ieee802154_is_valid_psdu_len - check if extended addr is valid 225 * ieee802154_is_valid_psdu_len - check if extended addr is valid
226 * @addr: extended addr to check 226 * @addr: extended addr to check
227 */ 227 */
228static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) 228static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
229{ 229{
230 /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff 230 /* Bail out if the address is all zero, or if the group
231 * is used internally as extended to short address broadcast mapping. 231 * address bit is set.
232 * This is currently a workaround because neighbor discovery can't
233 * deal with short addresses types right now.
234 */ 232 */
235 return ((addr != cpu_to_le64(0x0000000000000000ULL)) && 233 return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
236 (addr != cpu_to_le64(0xffffffffffffffffULL))); 234 !(addr & cpu_to_le64(0x0100000000000000ULL)));
237} 235}
238 236
239/** 237/**
@@ -244,9 +242,9 @@ static inline void ieee802154_random_extended_addr(__le64 *addr)
244{ 242{
245 get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); 243 get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
246 244
247 /* toggle some bit if we hit an invalid extended addr */ 245 /* clear the group bit, and set the locally administered bit */
248 if (!ieee802154_is_valid_extended_addr(*addr)) 246 ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] &= ~0x01;
249 ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; 247 ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] |= 0x02;
250} 248}
251 249
252#endif /* LINUX_IEEE802154_H */ 250#endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index da4929927f69..ae5d0d22955d 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -5,6 +5,15 @@
5 5
6 6
7/* We don't want this structure exposed to user space */ 7/* We don't want this structure exposed to user space */
8struct ifla_vf_stats {
9 __u64 rx_packets;
10 __u64 tx_packets;
11 __u64 rx_bytes;
12 __u64 tx_bytes;
13 __u64 broadcast;
14 __u64 multicast;
15};
16
8struct ifla_vf_info { 17struct ifla_vf_info {
9 __u32 vf; 18 __u32 vf;
10 __u8 mac[32]; 19 __u8 mac[32];
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 6f6929ea8a0c..a4ccc3122f93 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -29,7 +29,7 @@ struct macvtap_queue;
29 * Maximum times a macvtap device can be opened. This can be used to 29 * Maximum times a macvtap device can be opened. This can be used to
30 * configure the number of receive queue, e.g. for multiqueue virtio. 30 * configure the number of receive queue, e.g. for multiqueue virtio.
31 */ 31 */
32#define MAX_MACVTAP_QUEUES 16 32#define MAX_MACVTAP_QUEUES 256
33 33
34#define MACVLAN_MC_FILTER_BITS 8 34#define MACVLAN_MC_FILTER_BITS 8
35#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) 35#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 66a7d7600f43..b49cf923becc 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -74,7 +74,7 @@ static inline struct sock *sk_pppox(struct pppox_sock *po)
74struct module; 74struct module;
75 75
76struct pppox_proto { 76struct pppox_proto {
77 int (*create)(struct net *net, struct socket *sock); 77 int (*create)(struct net *net, struct socket *sock, int kern);
78 int (*ioctl)(struct socket *sock, unsigned int cmd, 78 int (*ioctl)(struct socket *sock, unsigned int cmd,
79 unsigned long arg); 79 unsigned long arg);
80 struct module *owner; 80 struct module *owner;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 920e4457ce6e..67ce5bd3b56a 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
416/** 416/**
417 * __vlan_get_tag - get the VLAN ID that is part of the payload 417 * __vlan_get_tag - get the VLAN ID that is part of the payload
418 * @skb: skbuff to query 418 * @skb: skbuff to query
419 * @vlan_tci: buffer to store vlaue 419 * @vlan_tci: buffer to store value
420 * 420 *
421 * Returns error if the skb is not of VLAN type 421 * Returns error if the skb is not of VLAN type
422 */ 422 */
@@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
435/** 435/**
436 * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] 436 * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
437 * @skb: skbuff to query 437 * @skb: skbuff to query
438 * @vlan_tci: buffer to store vlaue 438 * @vlan_tci: buffer to store value
439 * 439 *
440 * Returns error if @skb->vlan_tci is not set correctly 440 * Returns error if @skb->vlan_tci is not set correctly
441 */ 441 */
@@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
456/** 456/**
457 * vlan_get_tag - get the VLAN ID from the skb 457 * vlan_get_tag - get the VLAN ID from the skb
458 * @skb: skbuff to query 458 * @skb: skbuff to query
459 * @vlan_tci: buffer to store vlaue 459 * @vlan_tci: buffer to store value
460 * 460 *
461 * Returns error if the skb is not VLAN tagged 461 * Returns error if the skb is not VLAN tagged
462 */ 462 */
@@ -539,7 +539,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
539 */ 539 */
540 540
541 proto = vhdr->h_vlan_encapsulated_proto; 541 proto = vhdr->h_vlan_encapsulated_proto;
542 if (ntohs(proto) >= ETH_P_802_3_MIN) { 542 if (eth_proto_is_802_3(proto)) {
543 skb->protocol = proto; 543 skb->protocol = proto;
544 return; 544 return;
545 } 545 }
@@ -628,4 +628,24 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
628 return features; 628 return features;
629} 629}
630 630
631/**
632 * compare_vlan_header - Compare two vlan headers
633 * @h1: Pointer to vlan header
634 * @h2: Pointer to vlan header
635 *
636 * Compare two vlan headers, returns 0 if equal.
637 *
638 * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
639 */
640static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
641 const struct vlan_hdr *h2)
642{
643#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
644 return *(u32 *)h1 ^ *(u32 *)h2;
645#else
646 return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
647 ((__force u32)h1->h_vlan_encapsulated_proto ^
648 (__force u32)h2->h_vlan_encapsulated_proto);
649#endif
650}
631#endif /* !(_LINUX_IF_VLAN_H_) */ 651#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 2c677afeea47..193ad488d3e2 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -130,5 +130,6 @@ extern void ip_mc_unmap(struct in_device *);
130extern void ip_mc_remap(struct in_device *); 130extern void ip_mc_remap(struct in_device *);
131extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); 131extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
132extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); 132extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
133int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
133 134
134#endif 135#endif
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index ac48b10c9395..0e707f0c1a3e 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -24,6 +24,7 @@ struct inet_diag_handler {
24 struct inet_diag_msg *r, 24 struct inet_diag_msg *r,
25 void *info); 25 void *info);
26 __u16 idiag_type; 26 __u16 idiag_type;
27 __u16 idiag_info_size;
27}; 28};
28 29
29struct inet_connection_sock; 30struct inet_connection_sock;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0a21fbefdfbe..a4328cea376a 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -120,6 +120,9 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
120 || (!IN_DEV_FORWARD(in_dev) && \ 120 || (!IN_DEV_FORWARD(in_dev) && \
121 IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) 121 IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
122 122
123#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
124 IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
125
123#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) 126#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
124#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) 127#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
125#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) 128#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 696d22312b31..bb9b075f0eb0 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -50,9 +50,8 @@ extern struct fs_struct init_fs;
50 .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ 50 .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
51 .rlim = INIT_RLIMITS, \ 51 .rlim = INIT_RLIMITS, \
52 .cputimer = { \ 52 .cputimer = { \
53 .cputime = INIT_CPUTIME, \ 53 .cputime_atomic = INIT_CPUTIME_ATOMIC, \
54 .running = 0, \ 54 .running = 0, \
55 .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
56 }, \ 55 }, \
57 .cred_guard_mutex = \ 56 .cred_guard_mutex = \
58 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 57 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 796ef9645827..d9a366d24e3b 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -87,6 +87,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
87/* 87/*
88 * Decoding Capability Register 88 * Decoding Capability Register
89 */ 89 */
90#define cap_pi_support(c) (((c) >> 59) & 1)
90#define cap_read_drain(c) (((c) >> 55) & 1) 91#define cap_read_drain(c) (((c) >> 55) & 1)
91#define cap_write_drain(c) (((c) >> 54) & 1) 92#define cap_write_drain(c) (((c) >> 54) & 1)
92#define cap_max_amask_val(c) (((c) >> 48) & 0x3f) 93#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
@@ -115,13 +116,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
115 * Extended Capability Register 116 * Extended Capability Register
116 */ 117 */
117 118
119#define ecap_pasid(e) ((e >> 40) & 0x1)
118#define ecap_pss(e) ((e >> 35) & 0x1f) 120#define ecap_pss(e) ((e >> 35) & 0x1f)
119#define ecap_eafs(e) ((e >> 34) & 0x1) 121#define ecap_eafs(e) ((e >> 34) & 0x1)
120#define ecap_nwfs(e) ((e >> 33) & 0x1) 122#define ecap_nwfs(e) ((e >> 33) & 0x1)
121#define ecap_srs(e) ((e >> 31) & 0x1) 123#define ecap_srs(e) ((e >> 31) & 0x1)
122#define ecap_ers(e) ((e >> 30) & 0x1) 124#define ecap_ers(e) ((e >> 30) & 0x1)
123#define ecap_prs(e) ((e >> 29) & 0x1) 125#define ecap_prs(e) ((e >> 29) & 0x1)
124#define ecap_pasid(e) ((e >> 28) & 0x1) 126/* PASID support used to be on bit 28 */
125#define ecap_dis(e) ((e >> 27) & 0x1) 127#define ecap_dis(e) ((e >> 27) & 0x1)
126#define ecap_nest(e) ((e >> 26) & 0x1) 128#define ecap_nest(e) ((e >> 26) & 0x1)
127#define ecap_mts(e) ((e >> 25) & 0x1) 129#define ecap_mts(e) ((e >> 25) & 0x1)
@@ -295,9 +297,12 @@ struct q_inval {
295/* 1MB - maximum possible interrupt remapping table size */ 297/* 1MB - maximum possible interrupt remapping table size */
296#define INTR_REMAP_PAGE_ORDER 8 298#define INTR_REMAP_PAGE_ORDER 8
297#define INTR_REMAP_TABLE_REG_SIZE 0xf 299#define INTR_REMAP_TABLE_REG_SIZE 0xf
300#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
298 301
299#define INTR_REMAP_TABLE_ENTRIES 65536 302#define INTR_REMAP_TABLE_ENTRIES 65536
300 303
304struct irq_domain;
305
301struct ir_table { 306struct ir_table {
302 struct irte *base; 307 struct irte *base;
303 unsigned long *bitmap; 308 unsigned long *bitmap;
@@ -319,6 +324,9 @@ enum {
319 MAX_SR_DMAR_REGS 324 MAX_SR_DMAR_REGS
320}; 325};
321 326
327#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
328#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
329
322struct intel_iommu { 330struct intel_iommu {
323 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 331 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
324 u64 reg_phys; /* physical address of hw register set */ 332 u64 reg_phys; /* physical address of hw register set */
@@ -347,9 +355,12 @@ struct intel_iommu {
347 355
348#ifdef CONFIG_IRQ_REMAP 356#ifdef CONFIG_IRQ_REMAP
349 struct ir_table *ir_table; /* Interrupt remapping info */ 357 struct ir_table *ir_table; /* Interrupt remapping info */
358 struct irq_domain *ir_domain;
359 struct irq_domain *ir_msi_domain;
350#endif 360#endif
351 struct device *iommu_dev; /* IOMMU-sysfs device */ 361 struct device *iommu_dev; /* IOMMU-sysfs device */
352 int node; 362 int node;
363 u32 flags; /* Software defined flags */
353}; 364};
354 365
355static inline void __iommu_flush_cache( 366static inline void __iommu_flush_cache(
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 950ae4501826..be7e75c945e9 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -413,7 +413,8 @@ enum
413 BLOCK_IOPOLL_SOFTIRQ, 413 BLOCK_IOPOLL_SOFTIRQ,
414 TASKLET_SOFTIRQ, 414 TASKLET_SOFTIRQ,
415 SCHED_SOFTIRQ, 415 SCHED_SOFTIRQ,
416 HRTIMER_SOFTIRQ, 416 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
417 numbering. Sigh! */
417 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 418 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
418 419
419 NR_SOFTIRQS 420 NR_SOFTIRQS
@@ -592,10 +593,10 @@ tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
592 clockid_t which_clock, enum hrtimer_mode mode); 593 clockid_t which_clock, enum hrtimer_mode mode);
593 594
594static inline 595static inline
595int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, 596void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
596 const enum hrtimer_mode mode) 597 const enum hrtimer_mode mode)
597{ 598{
598 return hrtimer_start(&ttimer->timer, time, mode); 599 hrtimer_start(&ttimer->timer, time, mode);
599} 600}
600 601
601static inline 602static inline
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 657fab4efab3..c27dde7215b5 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -141,6 +141,7 @@ static inline void __iomem *
141io_mapping_map_atomic_wc(struct io_mapping *mapping, 141io_mapping_map_atomic_wc(struct io_mapping *mapping,
142 unsigned long offset) 142 unsigned long offset)
143{ 143{
144 preempt_disable();
144 pagefault_disable(); 145 pagefault_disable();
145 return ((char __force __iomem *) mapping) + offset; 146 return ((char __force __iomem *) mapping) + offset;
146} 147}
@@ -149,6 +150,7 @@ static inline void
149io_mapping_unmap_atomic(void __iomem *vaddr) 150io_mapping_unmap_atomic(void __iomem *vaddr)
150{ 151{
151 pagefault_enable(); 152 pagefault_enable();
153 preempt_enable();
152} 154}
153 155
154/* Non-atomic map/unmap */ 156/* Non-atomic map/unmap */
diff --git a/include/linux/io.h b/include/linux/io.h
index 986f2bffea1e..fb5a99800e77 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -19,6 +19,7 @@
19#define _LINUX_IO_H 19#define _LINUX_IO_H
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/init.h>
22#include <asm/io.h> 23#include <asm/io.h>
23#include <asm/page.h> 24#include <asm/page.h>
24 25
@@ -111,6 +112,13 @@ static inline void arch_phys_wc_del(int handle)
111} 112}
112 113
113#define arch_phys_wc_add arch_phys_wc_add 114#define arch_phys_wc_add arch_phys_wc_add
115#ifndef arch_phys_wc_index
116static inline int arch_phys_wc_index(int handle)
117{
118 return -1;
119}
120#define arch_phys_wc_index arch_phys_wc_index
121#endif
114#endif 122#endif
115 123
116#endif /* _LINUX_IO_H */ 124#endif /* _LINUX_IO_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0546b8710ce3..dc767f7c3704 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -114,6 +114,20 @@ enum iommu_attr {
114 DOMAIN_ATTR_MAX, 114 DOMAIN_ATTR_MAX,
115}; 115};
116 116
117/**
118 * struct iommu_dm_region - descriptor for a direct mapped memory region
119 * @list: Linked list pointers
120 * @start: System physical start address of the region
121 * @length: Length of the region in bytes
122 * @prot: IOMMU Protection flags (READ/WRITE/...)
123 */
124struct iommu_dm_region {
125 struct list_head list;
126 phys_addr_t start;
127 size_t length;
128 int prot;
129};
130
117#ifdef CONFIG_IOMMU_API 131#ifdef CONFIG_IOMMU_API
118 132
119/** 133/**
@@ -159,6 +173,10 @@ struct iommu_ops {
159 int (*domain_set_attr)(struct iommu_domain *domain, 173 int (*domain_set_attr)(struct iommu_domain *domain,
160 enum iommu_attr attr, void *data); 174 enum iommu_attr attr, void *data);
161 175
176 /* Request/Free a list of direct mapping requirements for a device */
177 void (*get_dm_regions)(struct device *dev, struct list_head *list);
178 void (*put_dm_regions)(struct device *dev, struct list_head *list);
179
162 /* Window handling functions */ 180 /* Window handling functions */
163 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, 181 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
164 phys_addr_t paddr, u64 size, int prot); 182 phys_addr_t paddr, u64 size, int prot);
@@ -193,6 +211,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
193 struct device *dev); 211 struct device *dev);
194extern void iommu_detach_device(struct iommu_domain *domain, 212extern void iommu_detach_device(struct iommu_domain *domain,
195 struct device *dev); 213 struct device *dev);
214extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
196extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 215extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
197 phys_addr_t paddr, size_t size, int prot); 216 phys_addr_t paddr, size_t size, int prot);
198extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 217extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -204,6 +223,10 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
204extern void iommu_set_fault_handler(struct iommu_domain *domain, 223extern void iommu_set_fault_handler(struct iommu_domain *domain,
205 iommu_fault_handler_t handler, void *token); 224 iommu_fault_handler_t handler, void *token);
206 225
226extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
227extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
228extern int iommu_request_dm_for_dev(struct device *dev);
229
207extern int iommu_attach_group(struct iommu_domain *domain, 230extern int iommu_attach_group(struct iommu_domain *domain,
208 struct iommu_group *group); 231 struct iommu_group *group);
209extern void iommu_detach_group(struct iommu_domain *domain, 232extern void iommu_detach_group(struct iommu_domain *domain,
@@ -227,6 +250,7 @@ extern int iommu_group_unregister_notifier(struct iommu_group *group,
227 struct notifier_block *nb); 250 struct notifier_block *nb);
228extern int iommu_group_id(struct iommu_group *group); 251extern int iommu_group_id(struct iommu_group *group);
229extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); 252extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
253extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
230 254
231extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, 255extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
232 void *data); 256 void *data);
@@ -332,6 +356,11 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
332{ 356{
333} 357}
334 358
359static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
360{
361 return NULL;
362}
363
335static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 364static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
336 phys_addr_t paddr, int gfp_order, int prot) 365 phys_addr_t paddr, int gfp_order, int prot)
337{ 366{
@@ -373,6 +402,21 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
373{ 402{
374} 403}
375 404
405static inline void iommu_get_dm_regions(struct device *dev,
406 struct list_head *list)
407{
408}
409
410static inline void iommu_put_dm_regions(struct device *dev,
411 struct list_head *list)
412{
413}
414
415static inline int iommu_request_dm_for_dev(struct device *dev)
416{
417 return -ENODEV;
418}
419
376static inline int iommu_attach_group(struct iommu_domain *domain, 420static inline int iommu_attach_group(struct iommu_domain *domain,
377 struct iommu_group *group) 421 struct iommu_group *group)
378{ 422{
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 62c6901cab55..812149160d3b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -126,13 +126,21 @@ struct msi_desc;
126struct irq_domain; 126struct irq_domain;
127 127
128/** 128/**
129 * struct irq_data - per irq and irq chip data passed down to chip functions 129 * struct irq_common_data - per irq data shared by all irqchips
130 * @state_use_accessors: status information for irq chip functions.
131 * Use accessor functions to deal with it
132 */
133struct irq_common_data {
134 unsigned int state_use_accessors;
135};
136
137/**
138 * struct irq_data - per irq chip data passed down to chip functions
130 * @mask: precomputed bitmask for accessing the chip registers 139 * @mask: precomputed bitmask for accessing the chip registers
131 * @irq: interrupt number 140 * @irq: interrupt number
132 * @hwirq: hardware interrupt number, local to the interrupt domain 141 * @hwirq: hardware interrupt number, local to the interrupt domain
133 * @node: node index useful for balancing 142 * @node: node index useful for balancing
134 * @state_use_accessors: status information for irq chip functions. 143 * @common: point to data shared by all irqchips
135 * Use accessor functions to deal with it
136 * @chip: low level interrupt hardware access 144 * @chip: low level interrupt hardware access
137 * @domain: Interrupt translation domain; responsible for mapping 145 * @domain: Interrupt translation domain; responsible for mapping
138 * between hwirq number and linux irq number. 146 * between hwirq number and linux irq number.
@@ -153,7 +161,7 @@ struct irq_data {
153 unsigned int irq; 161 unsigned int irq;
154 unsigned long hwirq; 162 unsigned long hwirq;
155 unsigned int node; 163 unsigned int node;
156 unsigned int state_use_accessors; 164 struct irq_common_data *common;
157 struct irq_chip *chip; 165 struct irq_chip *chip;
158 struct irq_domain *domain; 166 struct irq_domain *domain;
159#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 167#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
@@ -166,7 +174,7 @@ struct irq_data {
166}; 174};
167 175
168/* 176/*
169 * Bit masks for irq_data.state 177 * Bit masks for irq_common_data.state_use_accessors
170 * 178 *
171 * IRQD_TRIGGER_MASK - Mask for the trigger type bits 179 * IRQD_TRIGGER_MASK - Mask for the trigger type bits
172 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending 180 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
@@ -198,34 +206,36 @@ enum {
198 IRQD_WAKEUP_ARMED = (1 << 19), 206 IRQD_WAKEUP_ARMED = (1 << 19),
199}; 207};
200 208
209#define __irqd_to_state(d) ((d)->common->state_use_accessors)
210
201static inline bool irqd_is_setaffinity_pending(struct irq_data *d) 211static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
202{ 212{
203 return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; 213 return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
204} 214}
205 215
206static inline bool irqd_is_per_cpu(struct irq_data *d) 216static inline bool irqd_is_per_cpu(struct irq_data *d)
207{ 217{
208 return d->state_use_accessors & IRQD_PER_CPU; 218 return __irqd_to_state(d) & IRQD_PER_CPU;
209} 219}
210 220
211static inline bool irqd_can_balance(struct irq_data *d) 221static inline bool irqd_can_balance(struct irq_data *d)
212{ 222{
213 return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); 223 return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
214} 224}
215 225
216static inline bool irqd_affinity_was_set(struct irq_data *d) 226static inline bool irqd_affinity_was_set(struct irq_data *d)
217{ 227{
218 return d->state_use_accessors & IRQD_AFFINITY_SET; 228 return __irqd_to_state(d) & IRQD_AFFINITY_SET;
219} 229}
220 230
221static inline void irqd_mark_affinity_was_set(struct irq_data *d) 231static inline void irqd_mark_affinity_was_set(struct irq_data *d)
222{ 232{
223 d->state_use_accessors |= IRQD_AFFINITY_SET; 233 __irqd_to_state(d) |= IRQD_AFFINITY_SET;
224} 234}
225 235
226static inline u32 irqd_get_trigger_type(struct irq_data *d) 236static inline u32 irqd_get_trigger_type(struct irq_data *d)
227{ 237{
228 return d->state_use_accessors & IRQD_TRIGGER_MASK; 238 return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
229} 239}
230 240
231/* 241/*
@@ -233,43 +243,43 @@ static inline u32 irqd_get_trigger_type(struct irq_data *d)
233 */ 243 */
234static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) 244static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
235{ 245{
236 d->state_use_accessors &= ~IRQD_TRIGGER_MASK; 246 __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
237 d->state_use_accessors |= type & IRQD_TRIGGER_MASK; 247 __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
238} 248}
239 249
240static inline bool irqd_is_level_type(struct irq_data *d) 250static inline bool irqd_is_level_type(struct irq_data *d)
241{ 251{
242 return d->state_use_accessors & IRQD_LEVEL; 252 return __irqd_to_state(d) & IRQD_LEVEL;
243} 253}
244 254
245static inline bool irqd_is_wakeup_set(struct irq_data *d) 255static inline bool irqd_is_wakeup_set(struct irq_data *d)
246{ 256{
247 return d->state_use_accessors & IRQD_WAKEUP_STATE; 257 return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
248} 258}
249 259
250static inline bool irqd_can_move_in_process_context(struct irq_data *d) 260static inline bool irqd_can_move_in_process_context(struct irq_data *d)
251{ 261{
252 return d->state_use_accessors & IRQD_MOVE_PCNTXT; 262 return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
253} 263}
254 264
255static inline bool irqd_irq_disabled(struct irq_data *d) 265static inline bool irqd_irq_disabled(struct irq_data *d)
256{ 266{
257 return d->state_use_accessors & IRQD_IRQ_DISABLED; 267 return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
258} 268}
259 269
260static inline bool irqd_irq_masked(struct irq_data *d) 270static inline bool irqd_irq_masked(struct irq_data *d)
261{ 271{
262 return d->state_use_accessors & IRQD_IRQ_MASKED; 272 return __irqd_to_state(d) & IRQD_IRQ_MASKED;
263} 273}
264 274
265static inline bool irqd_irq_inprogress(struct irq_data *d) 275static inline bool irqd_irq_inprogress(struct irq_data *d)
266{ 276{
267 return d->state_use_accessors & IRQD_IRQ_INPROGRESS; 277 return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
268} 278}
269 279
270static inline bool irqd_is_wakeup_armed(struct irq_data *d) 280static inline bool irqd_is_wakeup_armed(struct irq_data *d)
271{ 281{
272 return d->state_use_accessors & IRQD_WAKEUP_ARMED; 282 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
273} 283}
274 284
275 285
@@ -280,12 +290,12 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d)
280 */ 290 */
281static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) 291static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
282{ 292{
283 d->state_use_accessors |= IRQD_IRQ_INPROGRESS; 293 __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS;
284} 294}
285 295
286static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) 296static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
287{ 297{
288 d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; 298 __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS;
289} 299}
290 300
291static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 301static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -327,6 +337,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
327 * @irq_write_msi_msg: optional to write message content for MSI 337 * @irq_write_msi_msg: optional to write message content for MSI
328 * @irq_get_irqchip_state: return the internal state of an interrupt 338 * @irq_get_irqchip_state: return the internal state of an interrupt
329 * @irq_set_irqchip_state: set the internal state of a interrupt 339 * @irq_set_irqchip_state: set the internal state of a interrupt
340 * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
330 * @flags: chip specific flags 341 * @flags: chip specific flags
331 */ 342 */
332struct irq_chip { 343struct irq_chip {
@@ -369,6 +380,8 @@ struct irq_chip {
369 int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state); 380 int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
370 int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state); 381 int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
371 382
383 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
384
372 unsigned long flags; 385 unsigned long flags;
373}; 386};
374 387
@@ -422,6 +435,7 @@ extern void irq_cpu_online(void);
422extern void irq_cpu_offline(void); 435extern void irq_cpu_offline(void);
423extern int irq_set_affinity_locked(struct irq_data *data, 436extern int irq_set_affinity_locked(struct irq_data *data,
424 const struct cpumask *cpumask, bool force); 437 const struct cpumask *cpumask, bool force);
438extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
425 439
426#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) 440#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
427void irq_move_irq(struct irq_data *data); 441void irq_move_irq(struct irq_data *data);
@@ -458,6 +472,8 @@ extern void handle_nested_irq(unsigned int irq);
458 472
459extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); 473extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
460#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 474#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
475extern void irq_chip_enable_parent(struct irq_data *data);
476extern void irq_chip_disable_parent(struct irq_data *data);
461extern void irq_chip_ack_parent(struct irq_data *data); 477extern void irq_chip_ack_parent(struct irq_data *data);
462extern int irq_chip_retrigger_hierarchy(struct irq_data *data); 478extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
463extern void irq_chip_mask_parent(struct irq_data *data); 479extern void irq_chip_mask_parent(struct irq_data *data);
@@ -467,6 +483,8 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
467 const struct cpumask *dest, 483 const struct cpumask *dest,
468 bool force); 484 bool force);
469extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); 485extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
486extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
487 void *vcpu_info);
470#endif 488#endif
471 489
472/* Handling of unhandled and spurious interrupts: */ 490/* Handling of unhandled and spurious interrupts: */
@@ -517,6 +535,15 @@ irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
517 __irq_set_handler(irq, handle, 1, NULL); 535 __irq_set_handler(irq, handle, 1, NULL);
518} 536}
519 537
538/*
539 * Set a highlevel chained flow handler and its data for a given IRQ.
540 * (a chained handler is automatically enabled and set to
541 * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
542 */
543void
544irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
545 void *data);
546
520void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); 547void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
521 548
522static inline void irq_set_status_flags(unsigned int irq, unsigned long set) 549static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
@@ -624,6 +651,23 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
624 return d ? irqd_get_trigger_type(d) : 0; 651 return d ? irqd_get_trigger_type(d) : 0;
625} 652}
626 653
654static inline int irq_data_get_node(struct irq_data *d)
655{
656 return d->node;
657}
658
659static inline struct cpumask *irq_get_affinity_mask(int irq)
660{
661 struct irq_data *d = irq_get_irq_data(irq);
662
663 return d ? d->affinity : NULL;
664}
665
666static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
667{
668 return d->affinity;
669}
670
627unsigned int arch_dynirq_lower_bound(unsigned int from); 671unsigned int arch_dynirq_lower_bound(unsigned int from);
628 672
629int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 673int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dd1109fb241e..c52d1480f272 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -17,7 +17,7 @@ struct pt_regs;
17 17
18/** 18/**
19 * struct irq_desc - interrupt descriptor 19 * struct irq_desc - interrupt descriptor
20 * @irq_data: per irq and chip data passed down to chip functions 20 * @irq_common_data: per irq and chip data passed down to chip functions
21 * @kstat_irqs: irq stats per cpu 21 * @kstat_irqs: irq stats per cpu
22 * @handle_irq: highlevel irq-events handler 22 * @handle_irq: highlevel irq-events handler
23 * @preflow_handler: handler called before the flow handler (currently used by sparc) 23 * @preflow_handler: handler called before the flow handler (currently used by sparc)
@@ -47,6 +47,7 @@ struct pt_regs;
47 * @name: flow handler name for /proc/interrupts output 47 * @name: flow handler name for /proc/interrupts output
48 */ 48 */
49struct irq_desc { 49struct irq_desc {
50 struct irq_common_data irq_common_data;
50 struct irq_data irq_data; 51 struct irq_data irq_data;
51 unsigned int __percpu *kstat_irqs; 52 unsigned int __percpu *kstat_irqs;
52 irq_flow_handler_t handle_irq; 53 irq_flow_handler_t handle_irq;
@@ -93,6 +94,15 @@ struct irq_desc {
93extern struct irq_desc irq_desc[NR_IRQS]; 94extern struct irq_desc irq_desc[NR_IRQS];
94#endif 95#endif
95 96
97static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
98{
99#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
100 return irq_to_desc(data->irq);
101#else
102 return container_of(data, struct irq_desc, irq_data);
103#endif
104}
105
96static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) 106static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
97{ 107{
98 return &desc->irq_data; 108 return &desc->irq_data;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 676d7306a360..744ac0ec98eb 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -258,6 +258,10 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
258/* V2 interfaces to support hierarchy IRQ domains. */ 258/* V2 interfaces to support hierarchy IRQ domains. */
259extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 259extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
260 unsigned int virq); 260 unsigned int virq);
261extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
262 irq_hw_number_t hwirq, struct irq_chip *chip,
263 void *chip_data, irq_flow_handler_t handler,
264 void *handler_data, const char *handler_name);
261#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 265#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
262extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, 266extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
263 unsigned int flags, unsigned int size, 267 unsigned int flags, unsigned int size,
@@ -281,10 +285,6 @@ extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
281 irq_hw_number_t hwirq, 285 irq_hw_number_t hwirq,
282 struct irq_chip *chip, 286 struct irq_chip *chip,
283 void *chip_data); 287 void *chip_data);
284extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
285 irq_hw_number_t hwirq, struct irq_chip *chip,
286 void *chip_data, irq_flow_handler_t handler,
287 void *handler_data, const char *handler_name);
288extern void irq_domain_reset_irq_data(struct irq_data *irq_data); 288extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
289extern void irq_domain_free_irqs_common(struct irq_domain *domain, 289extern void irq_domain_free_irqs_common(struct irq_domain *domain,
290 unsigned int virq, 290 unsigned int virq,
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 20e7f78041c8..edb640ae9a94 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
1035int jbd2_journal_next_log_block(journal_t *, unsigned long long *); 1035int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
1036int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid, 1036int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
1037 unsigned long *block); 1037 unsigned long *block);
1038void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); 1038int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
1039void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); 1039void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
1040 1040
1041/* Commit management */ 1041/* Commit management */
@@ -1157,7 +1157,7 @@ extern int jbd2_journal_recover (journal_t *journal);
1157extern int jbd2_journal_wipe (journal_t *, int); 1157extern int jbd2_journal_wipe (journal_t *, int);
1158extern int jbd2_journal_skip_recovery (journal_t *); 1158extern int jbd2_journal_skip_recovery (journal_t *);
1159extern void jbd2_journal_update_sb_errno(journal_t *); 1159extern void jbd2_journal_update_sb_errno(journal_t *);
1160extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t, 1160extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
1161 unsigned long, int); 1161 unsigned long, int);
1162extern void __jbd2_journal_abort_hard (journal_t *); 1162extern void __jbd2_journal_abort_hard (journal_t *);
1163extern void jbd2_journal_abort (journal_t *, int); 1163extern void jbd2_journal_abort (journal_t *, int);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index c367cbdf73ab..535fd3bb1ba8 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -7,6 +7,7 @@
7#include <linux/time.h> 7#include <linux/time.h>
8#include <linux/timex.h> 8#include <linux/timex.h>
9#include <asm/param.h> /* for HZ */ 9#include <asm/param.h> /* for HZ */
10#include <generated/timeconst.h>
10 11
11/* 12/*
12 * The following defines establish the engineering parameters of the PLL 13 * The following defines establish the engineering parameters of the PLL
@@ -288,8 +289,133 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
288 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; 289 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
289} 290}
290 291
291extern unsigned long msecs_to_jiffies(const unsigned int m); 292extern unsigned long __msecs_to_jiffies(const unsigned int m);
292extern unsigned long usecs_to_jiffies(const unsigned int u); 293#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
294/*
295 * HZ is equal to or smaller than 1000, and 1000 is a nice round
296 * multiple of HZ, divide with the factor between them, but round
297 * upwards:
298 */
299static inline unsigned long _msecs_to_jiffies(const unsigned int m)
300{
301 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
302}
303#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
304/*
305 * HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
306 * simply multiply with the factor between them.
307 *
308 * But first make sure the multiplication result cannot overflow:
309 */
310static inline unsigned long _msecs_to_jiffies(const unsigned int m)
311{
312 if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
313 return MAX_JIFFY_OFFSET;
314 return m * (HZ / MSEC_PER_SEC);
315}
316#else
317/*
318 * Generic case - multiply, round and divide. But first check that if
319 * we are doing a net multiplication, that we wouldn't overflow:
320 */
321static inline unsigned long _msecs_to_jiffies(const unsigned int m)
322{
323 if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
324 return MAX_JIFFY_OFFSET;
325
326 return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
327}
328#endif
329/**
330 * msecs_to_jiffies: - convert milliseconds to jiffies
331 * @m: time in milliseconds
332 *
333 * conversion is done as follows:
334 *
335 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
336 *
337 * - 'too large' values [that would result in larger than
338 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
339 *
340 * - all other values are converted to jiffies by either multiplying
341 * the input value by a factor or dividing it with a factor and
342 * handling any 32-bit overflows.
343 * for the details see __msecs_to_jiffies()
344 *
345 * msecs_to_jiffies() checks for the passed in value being a constant
346 * via __builtin_constant_p() allowing gcc to eliminate most of the
347 * code, __msecs_to_jiffies() is called if the value passed does not
348 * allow constant folding and the actual conversion must be done at
349 * runtime.
350 * the HZ range specific helpers _msecs_to_jiffies() are called both
351 * directly here and from __msecs_to_jiffies() in the case where
352 * constant folding is not possible.
353 */
354static inline unsigned long msecs_to_jiffies(const unsigned int m)
355{
356 if (__builtin_constant_p(m)) {
357 if ((int)m < 0)
358 return MAX_JIFFY_OFFSET;
359 return _msecs_to_jiffies(m);
360 } else {
361 return __msecs_to_jiffies(m);
362 }
363}
364
365extern unsigned long __usecs_to_jiffies(const unsigned int u);
366#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
367static inline unsigned long _usecs_to_jiffies(const unsigned int u)
368{
369 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
370}
371#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
372static inline unsigned long _usecs_to_jiffies(const unsigned int u)
373{
374 return u * (HZ / USEC_PER_SEC);
375}
376static inline unsigned long _usecs_to_jiffies(const unsigned int u)
377{
378#else
379static inline unsigned long _usecs_to_jiffies(const unsigned int u)
380{
381 return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
382 >> USEC_TO_HZ_SHR32;
383}
384#endif
385
386/**
387 * usecs_to_jiffies: - convert microseconds to jiffies
388 * @u: time in microseconds
389 *
390 * conversion is done as follows:
391 *
392 * - 'too large' values [that would result in larger than
393 * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
394 *
395 * - all other values are converted to jiffies by either multiplying
396 * the input value by a factor or dividing it with a factor and
397 * handling any 32-bit overflows as for msecs_to_jiffies.
398 *
399 * usecs_to_jiffies() checks for the passed in value being a constant
400 * via __builtin_constant_p() allowing gcc to eliminate most of the
401 * code, __usecs_to_jiffies() is called if the value passed does not
402 * allow constant folding and the actual conversion must be done at
403 * runtime.
404 * the HZ range specific helpers _usecs_to_jiffies() are called both
405 * directly here and from __msecs_to_jiffies() in the case where
406 * constant folding is not possible.
407 */
408static inline unsigned long usecs_to_jiffies(const unsigned int u)
409{
410 if (__builtin_constant_p(u)) {
411 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
412 return MAX_JIFFY_OFFSET;
413 return _usecs_to_jiffies(u);
414 } else {
415 return __usecs_to_jiffies(u);
416 }
417}
418
293extern unsigned long timespec_to_jiffies(const struct timespec *value); 419extern unsigned long timespec_to_jiffies(const struct timespec *value);
294extern void jiffies_to_timespec(const unsigned long jiffies, 420extern void jiffies_to_timespec(const unsigned long jiffies,
295 struct timespec *value); 421 struct timespec *value);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3a5b48e52a9e..060dd7b61c6d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -244,7 +244,8 @@ static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
244 244
245#if defined(CONFIG_MMU) && \ 245#if defined(CONFIG_MMU) && \
246 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) 246 (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
247void might_fault(void); 247#define might_fault() __might_fault(__FILE__, __LINE__)
248void __might_fault(const char *file, int line);
248#else 249#else
249static inline void might_fault(void) { } 250static inline void might_fault(void) { }
250#endif 251#endif
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index e705467ddb47..d0a1f99e24e3 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -28,7 +28,8 @@
28extern void kmemleak_init(void) __ref; 28extern void kmemleak_init(void) __ref;
29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
30 gfp_t gfp) __ref; 30 gfp_t gfp) __ref;
31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref; 31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
32 gfp_t gfp) __ref;
32extern void kmemleak_free(const void *ptr) __ref; 33extern void kmemleak_free(const void *ptr) __ref;
33extern void kmemleak_free_part(const void *ptr, size_t size) __ref; 34extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
34extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; 35extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
@@ -71,7 +72,8 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
71 gfp_t gfp) 72 gfp_t gfp)
72{ 73{
73} 74}
74static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) 75static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
76 gfp_t gfp)
75{ 77{
76} 78}
77static inline void kmemleak_free(const void *ptr) 79static inline void kmemleak_free(const void *ptr)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ad45054309a0..9564fd78c547 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -44,6 +44,10 @@
44/* Two fragments for cross MMIO pages. */ 44/* Two fragments for cross MMIO pages. */
45#define KVM_MAX_MMIO_FRAGMENTS 2 45#define KVM_MAX_MMIO_FRAGMENTS 2
46 46
47#ifndef KVM_ADDRESS_SPACE_NUM
48#define KVM_ADDRESS_SPACE_NUM 1
49#endif
50
47/* 51/*
48 * For the normal pfn, the highest 12 bits should be zero, 52 * For the normal pfn, the highest 12 bits should be zero,
49 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 53 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
@@ -134,6 +138,7 @@ static inline bool is_error_page(struct page *page)
134#define KVM_REQ_ENABLE_IBS 23 138#define KVM_REQ_ENABLE_IBS 23
135#define KVM_REQ_DISABLE_IBS 24 139#define KVM_REQ_DISABLE_IBS 24
136#define KVM_REQ_APIC_PAGE_RELOAD 25 140#define KVM_REQ_APIC_PAGE_RELOAD 25
141#define KVM_REQ_SMI 26
137 142
138#define KVM_USERSPACE_IRQ_SOURCE_ID 0 143#define KVM_USERSPACE_IRQ_SOURCE_ID 0
139#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 144#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -230,6 +235,7 @@ struct kvm_vcpu {
230 235
231 int fpu_active; 236 int fpu_active;
232 int guest_fpu_loaded, guest_xcr0_loaded; 237 int guest_fpu_loaded, guest_xcr0_loaded;
238 unsigned char fpu_counter;
233 wait_queue_head_t wq; 239 wait_queue_head_t wq;
234 struct pid *pid; 240 struct pid *pid;
235 int sigset_active; 241 int sigset_active;
@@ -329,6 +335,13 @@ struct kvm_kernel_irq_routing_entry {
329#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) 335#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
330#endif 336#endif
331 337
338#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
339static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
340{
341 return 0;
342}
343#endif
344
332/* 345/*
333 * Note: 346 * Note:
334 * memslots are not sorted by id anymore, please use id_to_memslot() 347 * memslots are not sorted by id anymore, please use id_to_memslot()
@@ -347,7 +360,7 @@ struct kvm {
347 spinlock_t mmu_lock; 360 spinlock_t mmu_lock;
348 struct mutex slots_lock; 361 struct mutex slots_lock;
349 struct mm_struct *mm; /* userspace tied to this vm */ 362 struct mm_struct *mm; /* userspace tied to this vm */
350 struct kvm_memslots *memslots; 363 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
351 struct srcu_struct srcu; 364 struct srcu_struct srcu;
352 struct srcu_struct irq_srcu; 365 struct srcu_struct irq_srcu;
353#ifdef CONFIG_KVM_APIC_ARCHITECTURE 366#ifdef CONFIG_KVM_APIC_ARCHITECTURE
@@ -462,13 +475,25 @@ void kvm_exit(void);
462void kvm_get_kvm(struct kvm *kvm); 475void kvm_get_kvm(struct kvm *kvm);
463void kvm_put_kvm(struct kvm *kvm); 476void kvm_put_kvm(struct kvm *kvm);
464 477
465static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 478static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
466{ 479{
467 return rcu_dereference_check(kvm->memslots, 480 return rcu_dereference_check(kvm->memslots[as_id],
468 srcu_read_lock_held(&kvm->srcu) 481 srcu_read_lock_held(&kvm->srcu)
469 || lockdep_is_held(&kvm->slots_lock)); 482 || lockdep_is_held(&kvm->slots_lock));
470} 483}
471 484
485static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
486{
487 return __kvm_memslots(kvm, 0);
488}
489
490static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
491{
492 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
493
494 return __kvm_memslots(vcpu->kvm, as_id);
495}
496
472static inline struct kvm_memory_slot * 497static inline struct kvm_memory_slot *
473id_to_memslot(struct kvm_memslots *slots, int id) 498id_to_memslot(struct kvm_memslots *slots, int id)
474{ 499{
@@ -500,21 +525,22 @@ enum kvm_mr_change {
500}; 525};
501 526
502int kvm_set_memory_region(struct kvm *kvm, 527int kvm_set_memory_region(struct kvm *kvm,
503 struct kvm_userspace_memory_region *mem); 528 const struct kvm_userspace_memory_region *mem);
504int __kvm_set_memory_region(struct kvm *kvm, 529int __kvm_set_memory_region(struct kvm *kvm,
505 struct kvm_userspace_memory_region *mem); 530 const struct kvm_userspace_memory_region *mem);
506void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 531void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
507 struct kvm_memory_slot *dont); 532 struct kvm_memory_slot *dont);
508int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 533int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
509 unsigned long npages); 534 unsigned long npages);
510void kvm_arch_memslots_updated(struct kvm *kvm); 535void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
511int kvm_arch_prepare_memory_region(struct kvm *kvm, 536int kvm_arch_prepare_memory_region(struct kvm *kvm,
512 struct kvm_memory_slot *memslot, 537 struct kvm_memory_slot *memslot,
513 struct kvm_userspace_memory_region *mem, 538 const struct kvm_userspace_memory_region *mem,
514 enum kvm_mr_change change); 539 enum kvm_mr_change change);
515void kvm_arch_commit_memory_region(struct kvm *kvm, 540void kvm_arch_commit_memory_region(struct kvm *kvm,
516 struct kvm_userspace_memory_region *mem, 541 const struct kvm_userspace_memory_region *mem,
517 const struct kvm_memory_slot *old, 542 const struct kvm_memory_slot *old,
543 const struct kvm_memory_slot *new,
518 enum kvm_mr_change change); 544 enum kvm_mr_change change);
519bool kvm_largepages_enabled(void); 545bool kvm_largepages_enabled(void);
520void kvm_disable_largepages(void); 546void kvm_disable_largepages(void);
@@ -524,8 +550,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
524void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 550void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
525 struct kvm_memory_slot *slot); 551 struct kvm_memory_slot *slot);
526 552
527int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, 553int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
528 int nr_pages); 554 struct page **pages, int nr_pages);
529 555
530struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 556struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
531unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 557unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
@@ -538,13 +564,13 @@ void kvm_release_page_dirty(struct page *page);
538void kvm_set_page_accessed(struct page *page); 564void kvm_set_page_accessed(struct page *page);
539 565
540pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); 566pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
541pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
542 bool write_fault, bool *writable);
543pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 567pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
544pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 568pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
545 bool *writable); 569 bool *writable);
546pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 570pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
547pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); 571pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
572pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
573 bool *async, bool write_fault, bool *writable);
548 574
549void kvm_release_pfn_clean(pfn_t pfn); 575void kvm_release_pfn_clean(pfn_t pfn);
550void kvm_set_pfn_dirty(pfn_t pfn); 576void kvm_set_pfn_dirty(pfn_t pfn);
@@ -573,6 +599,25 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
573unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); 599unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
574void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 600void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
575 601
602struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
603struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
604pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
605pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
606struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
607unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
608unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
609int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
610 int len);
611int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
612 unsigned long len);
613int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
614 unsigned long len);
615int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
616 int offset, int len);
617int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
618 unsigned long len);
619void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
620
576void kvm_vcpu_block(struct kvm_vcpu *vcpu); 621void kvm_vcpu_block(struct kvm_vcpu *vcpu);
577void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 622void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
578int kvm_vcpu_yield_to(struct kvm_vcpu *target); 623int kvm_vcpu_yield_to(struct kvm_vcpu *target);
@@ -762,16 +807,10 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
762} 807}
763#endif 808#endif
764 809
765static inline void kvm_guest_enter(void) 810/* must be called with irqs disabled */
811static inline void __kvm_guest_enter(void)
766{ 812{
767 unsigned long flags;
768
769 BUG_ON(preemptible());
770
771 local_irq_save(flags);
772 guest_enter(); 813 guest_enter();
773 local_irq_restore(flags);
774
775 /* KVM does not hold any references to rcu protected data when it 814 /* KVM does not hold any references to rcu protected data when it
776 * switches CPU into a guest mode. In fact switching to a guest mode 815 * switches CPU into a guest mode. In fact switching to a guest mode
777 * is very similar to exiting to userspace from rcu point of view. In 816 * is very similar to exiting to userspace from rcu point of view. In
@@ -783,12 +822,27 @@ static inline void kvm_guest_enter(void)
783 rcu_virt_note_context_switch(smp_processor_id()); 822 rcu_virt_note_context_switch(smp_processor_id());
784} 823}
785 824
825/* must be called with irqs disabled */
826static inline void __kvm_guest_exit(void)
827{
828 guest_exit();
829}
830
831static inline void kvm_guest_enter(void)
832{
833 unsigned long flags;
834
835 local_irq_save(flags);
836 __kvm_guest_enter();
837 local_irq_restore(flags);
838}
839
786static inline void kvm_guest_exit(void) 840static inline void kvm_guest_exit(void)
787{ 841{
788 unsigned long flags; 842 unsigned long flags;
789 843
790 local_irq_save(flags); 844 local_irq_save(flags);
791 guest_exit(); 845 __kvm_guest_exit();
792 local_irq_restore(flags); 846 local_irq_restore(flags);
793} 847}
794 848
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 931da7e917cf..1b47a185c2f0 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -28,6 +28,7 @@ struct kvm_run;
28struct kvm_userspace_memory_region; 28struct kvm_userspace_memory_region;
29struct kvm_vcpu; 29struct kvm_vcpu;
30struct kvm_vcpu_init; 30struct kvm_vcpu_init;
31struct kvm_memslots;
31 32
32enum kvm_mr_change; 33enum kvm_mr_change;
33 34
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 0081f000e34b..c92ebd100d9b 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -52,10 +52,15 @@ struct lglock {
52 static struct lglock name = { .lock = &name ## _lock } 52 static struct lglock name = { .lock = &name ## _lock }
53 53
54void lg_lock_init(struct lglock *lg, char *name); 54void lg_lock_init(struct lglock *lg, char *name);
55
55void lg_local_lock(struct lglock *lg); 56void lg_local_lock(struct lglock *lg);
56void lg_local_unlock(struct lglock *lg); 57void lg_local_unlock(struct lglock *lg);
57void lg_local_lock_cpu(struct lglock *lg, int cpu); 58void lg_local_lock_cpu(struct lglock *lg, int cpu);
58void lg_local_unlock_cpu(struct lglock *lg, int cpu); 59void lg_local_unlock_cpu(struct lglock *lg, int cpu);
60
61void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
62void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
63
59void lg_global_lock(struct lglock *lg); 64void lg_global_lock(struct lglock *lg);
60void lg_global_unlock(struct lglock *lg); 65void lg_global_unlock(struct lglock *lg);
61 66
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 28aeae46f355..36ce37bcc963 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -134,7 +134,6 @@ enum {
134 ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, 134 ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1,
135 135
136 ATA_SHT_EMULATED = 1, 136 ATA_SHT_EMULATED = 1,
137 ATA_SHT_CMD_PER_LUN = 1,
138 ATA_SHT_THIS_ID = -1, 137 ATA_SHT_THIS_ID = -1,
139 ATA_SHT_USE_CLUSTERING = 1, 138 ATA_SHT_USE_CLUSTERING = 1,
140 139
@@ -431,6 +430,7 @@ enum {
431 ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ 430 ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
432 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ 431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
433 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ 432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
434 434
435 /* DMA mask for user DMA control: User visible values; DO NOT 435 /* DMA mask for user DMA control: User visible values; DO NOT
436 renumber */ 436 renumber */
@@ -1364,7 +1364,6 @@ extern struct device_attribute *ata_common_sdev_attrs[];
1364 .can_queue = ATA_DEF_QUEUE, \ 1364 .can_queue = ATA_DEF_QUEUE, \
1365 .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ 1365 .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
1366 .this_id = ATA_SHT_THIS_ID, \ 1366 .this_id = ATA_SHT_THIS_ID, \
1367 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
1368 .emulated = ATA_SHT_EMULATED, \ 1367 .emulated = ATA_SHT_EMULATED, \
1369 .use_clustering = ATA_SHT_USE_CLUSTERING, \ 1368 .use_clustering = ATA_SHT_USE_CLUSTERING, \
1370 .proc_name = drv_name, \ 1369 .proc_name = drv_name, \
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index ee6dbb39a809..31db7a05dd36 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -99,7 +99,7 @@ struct klp_object {
99 struct klp_func *funcs; 99 struct klp_func *funcs;
100 100
101 /* internal */ 101 /* internal */
102 struct kobject *kobj; 102 struct kobject kobj;
103 struct module *mod; 103 struct module *mod;
104 enum klp_state state; 104 enum klp_state state;
105}; 105};
@@ -123,6 +123,12 @@ struct klp_patch {
123 enum klp_state state; 123 enum klp_state state;
124}; 124};
125 125
126#define klp_for_each_object(patch, obj) \
127 for (obj = patch->objs; obj->funcs; obj++)
128
129#define klp_for_each_func(obj, func) \
130 for (func = obj->funcs; func->old_name; func++)
131
126int klp_register_patch(struct klp_patch *); 132int klp_register_patch(struct klp_patch *);
127int klp_unregister_patch(struct klp_patch *); 133int klp_unregister_patch(struct klp_patch *);
128int klp_enable_patch(struct klp_patch *); 134int klp_enable_patch(struct klp_patch *);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 066ba4157541..70400dc7660f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -130,8 +130,8 @@ enum bounce_type {
130}; 130};
131 131
132struct lock_class_stats { 132struct lock_class_stats {
133 unsigned long contention_point[4]; 133 unsigned long contention_point[LOCKSTAT_POINTS];
134 unsigned long contending_point[4]; 134 unsigned long contending_point[LOCKSTAT_POINTS];
135 struct lock_time read_waittime; 135 struct lock_time read_waittime;
136 struct lock_time write_waittime; 136 struct lock_time write_waittime;
137 struct lock_time read_holdtime; 137 struct lock_time read_holdtime;
@@ -255,6 +255,7 @@ struct held_lock {
255 unsigned int check:1; /* see lock_acquire() comment */ 255 unsigned int check:1; /* see lock_acquire() comment */
256 unsigned int hardirqs_off:1; 256 unsigned int hardirqs_off:1;
257 unsigned int references:12; /* 32 bits */ 257 unsigned int references:12; /* 32 bits */
258 unsigned int pin_count;
258}; 259};
259 260
260/* 261/*
@@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
354extern void lockdep_clear_current_reclaim_state(void); 355extern void lockdep_clear_current_reclaim_state(void);
355extern void lockdep_trace_alloc(gfp_t mask); 356extern void lockdep_trace_alloc(gfp_t mask);
356 357
358extern void lock_pin_lock(struct lockdep_map *lock);
359extern void lock_unpin_lock(struct lockdep_map *lock);
360
357# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, 361# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
358 362
359#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 363#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t mask);
368 372
369#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 373#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
370 374
375#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
376#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
377
371#else /* !CONFIG_LOCKDEP */ 378#else /* !CONFIG_LOCKDEP */
372 379
373static inline void lockdep_off(void) 380static inline void lockdep_off(void)
@@ -420,6 +427,9 @@ struct lock_class_key { };
420 427
421#define lockdep_recursing(tsk) (0) 428#define lockdep_recursing(tsk) (0)
422 429
430#define lockdep_pin_lock(l) do { (void)(l); } while (0)
431#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
432
423#endif /* !LOCKDEP */ 433#endif /* !LOCKDEP */
424 434
425#ifdef CONFIG_LOCK_STAT 435#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 611b69fa8594..1f7bc630d225 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -54,11 +54,16 @@ struct mbus_dram_target_info
54 */ 54 */
55#ifdef CONFIG_PLAT_ORION 55#ifdef CONFIG_PLAT_ORION
56extern const struct mbus_dram_target_info *mv_mbus_dram_info(void); 56extern const struct mbus_dram_target_info *mv_mbus_dram_info(void);
57extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void);
57#else 58#else
58static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) 59static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
59{ 60{
60 return NULL; 61 return NULL;
61} 62}
63static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void)
64{
65 return NULL;
66}
62#endif 67#endif
63 68
64int mvebu_mbus_save_cpu_target(u32 *store_addr); 69int mvebu_mbus_save_cpu_target(u32 *store_addr);
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
index 66c30a763b10..11f00cdabe3d 100644
--- a/include/linux/mdio-gpio.h
+++ b/include/linux/mdio-gpio.h
@@ -23,7 +23,8 @@ struct mdio_gpio_platform_data {
23 bool mdio_active_low; 23 bool mdio_active_low;
24 bool mdo_active_low; 24 bool mdo_active_low;
25 25
26 unsigned int phy_mask; 26 u32 phy_mask;
27 u32 phy_ignore_ta_mask;
27 int irqs[PHY_MAX_ADDR]; 28 int irqs[PHY_MAX_ADDR];
28 /* reset callback */ 29 /* reset callback */
29 int (*reset)(struct mii_bus *bus); 30 int (*reset)(struct mii_bus *bus);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 9497ec7c77ea..0215ffd63069 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -21,7 +21,11 @@
21#define INIT_PHYSMEM_REGIONS 4 21#define INIT_PHYSMEM_REGIONS 4
22 22
23/* Definition of memblock flags. */ 23/* Definition of memblock flags. */
24#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ 24enum {
25 MEMBLOCK_NONE = 0x0, /* No special request */
26 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
27 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
28};
25 29
26struct memblock_region { 30struct memblock_region {
27 phys_addr_t base; 31 phys_addr_t base;
@@ -61,7 +65,7 @@ extern bool movable_node_enabled;
61 65
62phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, 66phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
63 phys_addr_t start, phys_addr_t end, 67 phys_addr_t start, phys_addr_t end,
64 int nid); 68 int nid, ulong flags);
65phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, 69phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
66 phys_addr_t size, phys_addr_t align); 70 phys_addr_t size, phys_addr_t align);
67phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); 71phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
@@ -75,6 +79,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
75void memblock_trim_memory(phys_addr_t align); 79void memblock_trim_memory(phys_addr_t align);
76int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 80int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
77int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 81int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
82int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
83ulong choose_memblock_flags(void);
78 84
79/* Low level functions */ 85/* Low level functions */
80int memblock_add_range(struct memblock_type *type, 86int memblock_add_range(struct memblock_type *type,
@@ -85,11 +91,13 @@ int memblock_remove_range(struct memblock_type *type,
85 phys_addr_t base, 91 phys_addr_t base,
86 phys_addr_t size); 92 phys_addr_t size);
87 93
88void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a, 94void __next_mem_range(u64 *idx, int nid, ulong flags,
95 struct memblock_type *type_a,
89 struct memblock_type *type_b, phys_addr_t *out_start, 96 struct memblock_type *type_b, phys_addr_t *out_start,
90 phys_addr_t *out_end, int *out_nid); 97 phys_addr_t *out_end, int *out_nid);
91 98
92void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, 99void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
100 struct memblock_type *type_a,
93 struct memblock_type *type_b, phys_addr_t *out_start, 101 struct memblock_type *type_b, phys_addr_t *out_start,
94 phys_addr_t *out_end, int *out_nid); 102 phys_addr_t *out_end, int *out_nid);
95 103
@@ -100,16 +108,17 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
100 * @type_a: ptr to memblock_type to iterate 108 * @type_a: ptr to memblock_type to iterate
101 * @type_b: ptr to memblock_type which excludes from the iteration 109 * @type_b: ptr to memblock_type which excludes from the iteration
102 * @nid: node selector, %NUMA_NO_NODE for all nodes 110 * @nid: node selector, %NUMA_NO_NODE for all nodes
111 * @flags: pick from blocks based on memory attributes
103 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 112 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
104 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 113 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
105 * @p_nid: ptr to int for nid of the range, can be %NULL 114 * @p_nid: ptr to int for nid of the range, can be %NULL
106 */ 115 */
107#define for_each_mem_range(i, type_a, type_b, nid, \ 116#define for_each_mem_range(i, type_a, type_b, nid, flags, \
108 p_start, p_end, p_nid) \ 117 p_start, p_end, p_nid) \
109 for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \ 118 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
110 p_start, p_end, p_nid); \ 119 p_start, p_end, p_nid); \
111 i != (u64)ULLONG_MAX; \ 120 i != (u64)ULLONG_MAX; \
112 __next_mem_range(&i, nid, type_a, type_b, \ 121 __next_mem_range(&i, nid, flags, type_a, type_b, \
113 p_start, p_end, p_nid)) 122 p_start, p_end, p_nid))
114 123
115/** 124/**
@@ -119,17 +128,18 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
119 * @type_a: ptr to memblock_type to iterate 128 * @type_a: ptr to memblock_type to iterate
120 * @type_b: ptr to memblock_type which excludes from the iteration 129 * @type_b: ptr to memblock_type which excludes from the iteration
121 * @nid: node selector, %NUMA_NO_NODE for all nodes 130 * @nid: node selector, %NUMA_NO_NODE for all nodes
131 * @flags: pick from blocks based on memory attributes
122 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 132 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
123 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 133 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
124 * @p_nid: ptr to int for nid of the range, can be %NULL 134 * @p_nid: ptr to int for nid of the range, can be %NULL
125 */ 135 */
126#define for_each_mem_range_rev(i, type_a, type_b, nid, \ 136#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
127 p_start, p_end, p_nid) \ 137 p_start, p_end, p_nid) \
128 for (i = (u64)ULLONG_MAX, \ 138 for (i = (u64)ULLONG_MAX, \
129 __next_mem_range_rev(&i, nid, type_a, type_b, \ 139 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
130 p_start, p_end, p_nid); \ 140 p_start, p_end, p_nid); \
131 i != (u64)ULLONG_MAX; \ 141 i != (u64)ULLONG_MAX; \
132 __next_mem_range_rev(&i, nid, type_a, type_b, \ 142 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
133 p_start, p_end, p_nid)) 143 p_start, p_end, p_nid))
134 144
135#ifdef CONFIG_MOVABLE_NODE 145#ifdef CONFIG_MOVABLE_NODE
@@ -153,6 +163,11 @@ static inline bool movable_node_is_enabled(void)
153} 163}
154#endif 164#endif
155 165
166static inline bool memblock_is_mirror(struct memblock_region *m)
167{
168 return m->flags & MEMBLOCK_MIRROR;
169}
170
156#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 171#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
157int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, 172int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
158 unsigned long *end_pfn); 173 unsigned long *end_pfn);
@@ -181,13 +196,14 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
181 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 196 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
182 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 197 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
183 * @p_nid: ptr to int for nid of the range, can be %NULL 198 * @p_nid: ptr to int for nid of the range, can be %NULL
199 * @flags: pick from blocks based on memory attributes
184 * 200 *
185 * Walks over free (memory && !reserved) areas of memblock. Available as 201 * Walks over free (memory && !reserved) areas of memblock. Available as
186 * soon as memblock is initialized. 202 * soon as memblock is initialized.
187 */ 203 */
188#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ 204#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
189 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ 205 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
190 nid, p_start, p_end, p_nid) 206 nid, flags, p_start, p_end, p_nid)
191 207
192/** 208/**
193 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas 209 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -196,13 +212,15 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
196 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 212 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
197 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 213 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
198 * @p_nid: ptr to int for nid of the range, can be %NULL 214 * @p_nid: ptr to int for nid of the range, can be %NULL
215 * @flags: pick from blocks based on memory attributes
199 * 216 *
200 * Walks over free (memory && !reserved) areas of memblock in reverse 217 * Walks over free (memory && !reserved) areas of memblock in reverse
201 * order. Available as soon as memblock is initialized. 218 * order. Available as soon as memblock is initialized.
202 */ 219 */
203#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ 220#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
221 p_nid) \
204 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ 222 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
205 nid, p_start, p_end, p_nid) 223 nid, flags, p_start, p_end, p_nid)
206 224
207static inline void memblock_set_region_flags(struct memblock_region *r, 225static inline void memblock_set_region_flags(struct memblock_region *r,
208 unsigned long flags) 226 unsigned long flags)
@@ -273,7 +291,8 @@ static inline bool memblock_bottom_up(void) { return false; }
273#define MEMBLOCK_ALLOC_ACCESSIBLE 0 291#define MEMBLOCK_ALLOC_ACCESSIBLE 0
274 292
275phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 293phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
276 phys_addr_t start, phys_addr_t end); 294 phys_addr_t start, phys_addr_t end,
295 ulong flags);
277phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, 296phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
278 phys_addr_t max_addr); 297 phys_addr_t max_addr);
279phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, 298phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c8918114804..73b02b0a8f60 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -41,6 +41,7 @@ enum mem_cgroup_stat_index {
41 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 41 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
42 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ 42 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
43 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 43 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
44 MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
44 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ 45 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
45 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ 46 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
46 MEM_CGROUP_STAT_NSTATS, 47 MEM_CGROUP_STAT_NSTATS,
@@ -67,6 +68,8 @@ enum mem_cgroup_events_index {
67}; 68};
68 69
69#ifdef CONFIG_MEMCG 70#ifdef CONFIG_MEMCG
71extern struct cgroup_subsys_state *mem_cgroup_root_css;
72
70void mem_cgroup_events(struct mem_cgroup *memcg, 73void mem_cgroup_events(struct mem_cgroup *memcg,
71 enum mem_cgroup_events_index idx, 74 enum mem_cgroup_events_index idx,
72 unsigned int nr); 75 unsigned int nr);
@@ -112,6 +115,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
112} 115}
113 116
114extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 117extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
118extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
115 119
116struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 120struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
117 struct mem_cgroup *, 121 struct mem_cgroup *,
@@ -195,6 +199,8 @@ void mem_cgroup_split_huge_fixup(struct page *head);
195#else /* CONFIG_MEMCG */ 199#else /* CONFIG_MEMCG */
196struct mem_cgroup; 200struct mem_cgroup;
197 201
202#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
203
198static inline void mem_cgroup_events(struct mem_cgroup *memcg, 204static inline void mem_cgroup_events(struct mem_cgroup *memcg,
199 enum mem_cgroup_events_index idx, 205 enum mem_cgroup_events_index idx,
200 unsigned int nr) 206 unsigned int nr)
@@ -382,6 +388,29 @@ enum {
382 OVER_LIMIT, 388 OVER_LIMIT,
383}; 389};
384 390
391#ifdef CONFIG_CGROUP_WRITEBACK
392
393struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
394struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
395void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail,
396 unsigned long *pdirty, unsigned long *pwriteback);
397
398#else /* CONFIG_CGROUP_WRITEBACK */
399
400static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
401{
402 return NULL;
403}
404
405static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
406 unsigned long *pavail,
407 unsigned long *pdirty,
408 unsigned long *pwriteback)
409{
410}
411
412#endif /* CONFIG_CGROUP_WRITEBACK */
413
385struct sock; 414struct sock;
386#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 415#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
387void sock_update_memcg(struct sock *sk); 416void sock_update_memcg(struct sock *sk);
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 16a498f48169..2f434f4f79a1 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -117,6 +117,7 @@ struct arizona {
117 int num_core_supplies; 117 int num_core_supplies;
118 struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES]; 118 struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES];
119 struct regulator *dcvdd; 119 struct regulator *dcvdd;
120 bool has_fully_powered_off;
120 121
121 struct arizona_pdata pdata; 122 struct arizona_pdata pdata;
122 123
@@ -153,7 +154,15 @@ int arizona_request_irq(struct arizona *arizona, int irq, char *name,
153void arizona_free_irq(struct arizona *arizona, int irq, void *data); 154void arizona_free_irq(struct arizona *arizona, int irq, void *data);
154int arizona_set_irq_wake(struct arizona *arizona, int irq, int on); 155int arizona_set_irq_wake(struct arizona *arizona, int irq, int on);
155 156
157#ifdef CONFIG_MFD_WM5102
156int wm5102_patch(struct arizona *arizona); 158int wm5102_patch(struct arizona *arizona);
159#else
160static inline int wm5102_patch(struct arizona *arizona)
161{
162 return 0;
163}
164#endif
165
157int wm5110_patch(struct arizona *arizona); 166int wm5110_patch(struct arizona *arizona);
158int wm8997_patch(struct arizona *arizona); 167int wm8997_patch(struct arizona *arizona);
159 168
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 1789cb0f4f17..f6722677e6d0 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -156,7 +156,10 @@ struct arizona_pdata {
156 /** MICBIAS configurations */ 156 /** MICBIAS configurations */
157 struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS]; 157 struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS];
158 158
159 /** Mode of input structures */ 159 /**
160 * Mode of input structures
161 * One of the ARIZONA_INMODE_xxx values
162 */
160 int inmode[ARIZONA_MAX_INPUT]; 163 int inmode[ARIZONA_MAX_INPUT];
161 164
162 /** Mode for outputs */ 165 /** Mode for outputs */
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index aacc10d7789c..3499d36e6067 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -2515,9 +2515,12 @@
2515#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */ 2515#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */
2516#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */ 2516#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */
2517#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */ 2517#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */
2518#define ARIZONA_IN1_MODE_MASK 0x0600 /* IN1_MODE - [10:9] */ 2518#define ARIZONA_IN1_MODE_MASK 0x0400 /* IN1_MODE - [10] */
2519#define ARIZONA_IN1_MODE_SHIFT 9 /* IN1_MODE - [10:9] */ 2519#define ARIZONA_IN1_MODE_SHIFT 10 /* IN1_MODE - [10] */
2520#define ARIZONA_IN1_MODE_WIDTH 2 /* IN1_MODE - [10:9] */ 2520#define ARIZONA_IN1_MODE_WIDTH 1 /* IN1_MODE - [10] */
2521#define ARIZONA_IN1_SINGLE_ENDED_MASK 0x0200 /* IN1_MODE - [9] */
2522#define ARIZONA_IN1_SINGLE_ENDED_SHIFT 9 /* IN1_MODE - [9] */
2523#define ARIZONA_IN1_SINGLE_ENDED_WIDTH 1 /* IN1_MODE - [9] */
2521#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */ 2524#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */
2522#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */ 2525#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */
2523#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */ 2526#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */
@@ -2588,9 +2591,12 @@
2588#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */ 2591#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */
2589#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */ 2592#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */
2590#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */ 2593#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */
2591#define ARIZONA_IN2_MODE_MASK 0x0600 /* IN2_MODE - [10:9] */ 2594#define ARIZONA_IN2_MODE_MASK 0x0400 /* IN2_MODE - [10] */
2592#define ARIZONA_IN2_MODE_SHIFT 9 /* IN2_MODE - [10:9] */ 2595#define ARIZONA_IN2_MODE_SHIFT 10 /* IN2_MODE - [10] */
2593#define ARIZONA_IN2_MODE_WIDTH 2 /* IN2_MODE - [10:9] */ 2596#define ARIZONA_IN2_MODE_WIDTH 1 /* IN2_MODE - [10] */
2597#define ARIZONA_IN2_SINGLE_ENDED_MASK 0x0200 /* IN2_MODE - [9] */
2598#define ARIZONA_IN2_SINGLE_ENDED_SHIFT 9 /* IN2_MODE - [9] */
2599#define ARIZONA_IN2_SINGLE_ENDED_WIDTH 1 /* IN2_MODE - [9] */
2594#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */ 2600#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */
2595#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */ 2601#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */
2596#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */ 2602#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */
@@ -2661,9 +2667,12 @@
2661#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */ 2667#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */
2662#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */ 2668#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */
2663#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */ 2669#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */
2664#define ARIZONA_IN3_MODE_MASK 0x0600 /* IN3_MODE - [10:9] */ 2670#define ARIZONA_IN3_MODE_MASK 0x0400 /* IN3_MODE - [10] */
2665#define ARIZONA_IN3_MODE_SHIFT 9 /* IN3_MODE - [10:9] */ 2671#define ARIZONA_IN3_MODE_SHIFT 10 /* IN3_MODE - [10] */
2666#define ARIZONA_IN3_MODE_WIDTH 2 /* IN3_MODE - [10:9] */ 2672#define ARIZONA_IN3_MODE_WIDTH 1 /* IN3_MODE - [10] */
2673#define ARIZONA_IN3_SINGLE_ENDED_MASK 0x0200 /* IN3_MODE - [9] */
2674#define ARIZONA_IN3_SINGLE_ENDED_SHIFT 9 /* IN3_MODE - [9] */
2675#define ARIZONA_IN3_SINGLE_ENDED_WIDTH 1 /* IN3_MODE - [9] */
2667#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */ 2676#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */
2668#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */ 2677#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */
2669#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */ 2678#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index dfabd6db7ddf..02f97dc568ac 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -14,6 +14,7 @@
14enum { 14enum {
15 AXP202_ID = 0, 15 AXP202_ID = 0,
16 AXP209_ID, 16 AXP209_ID,
17 AXP221_ID,
17 AXP288_ID, 18 AXP288_ID,
18 NR_AXP20X_VARIANTS, 19 NR_AXP20X_VARIANTS,
19}; 20};
@@ -45,6 +46,28 @@ enum {
45#define AXP20X_V_LTF_DISCHRG 0x3c 46#define AXP20X_V_LTF_DISCHRG 0x3c
46#define AXP20X_V_HTF_DISCHRG 0x3d 47#define AXP20X_V_HTF_DISCHRG 0x3d
47 48
49#define AXP22X_PWR_OUT_CTRL1 0x10
50#define AXP22X_PWR_OUT_CTRL2 0x12
51#define AXP22X_PWR_OUT_CTRL3 0x13
52#define AXP22X_DLDO1_V_OUT 0x15
53#define AXP22X_DLDO2_V_OUT 0x16
54#define AXP22X_DLDO3_V_OUT 0x17
55#define AXP22X_DLDO4_V_OUT 0x18
56#define AXP22X_ELDO1_V_OUT 0x19
57#define AXP22X_ELDO2_V_OUT 0x1a
58#define AXP22X_ELDO3_V_OUT 0x1b
59#define AXP22X_DC5LDO_V_OUT 0x1c
60#define AXP22X_DCDC1_V_OUT 0x21
61#define AXP22X_DCDC2_V_OUT 0x22
62#define AXP22X_DCDC3_V_OUT 0x23
63#define AXP22X_DCDC4_V_OUT 0x24
64#define AXP22X_DCDC5_V_OUT 0x25
65#define AXP22X_DCDC23_V_RAMP_CTRL 0x27
66#define AXP22X_ALDO1_V_OUT 0x28
67#define AXP22X_ALDO2_V_OUT 0x29
68#define AXP22X_ALDO3_V_OUT 0x2a
69#define AXP22X_CHRG_CTRL3 0x35
70
48/* Interrupt */ 71/* Interrupt */
49#define AXP20X_IRQ1_EN 0x40 72#define AXP20X_IRQ1_EN 0x40
50#define AXP20X_IRQ2_EN 0x41 73#define AXP20X_IRQ2_EN 0x41
@@ -100,6 +123,9 @@ enum {
100#define AXP20X_VBUS_MON 0x8b 123#define AXP20X_VBUS_MON 0x8b
101#define AXP20X_OVER_TMP 0x8f 124#define AXP20X_OVER_TMP 0x8f
102 125
126#define AXP22X_PWREN_CTRL1 0x8c
127#define AXP22X_PWREN_CTRL2 0x8d
128
103/* GPIO */ 129/* GPIO */
104#define AXP20X_GPIO0_CTRL 0x90 130#define AXP20X_GPIO0_CTRL 0x90
105#define AXP20X_LDO5_V_OUT 0x91 131#define AXP20X_LDO5_V_OUT 0x91
@@ -108,6 +134,11 @@ enum {
108#define AXP20X_GPIO20_SS 0x94 134#define AXP20X_GPIO20_SS 0x94
109#define AXP20X_GPIO3_CTRL 0x95 135#define AXP20X_GPIO3_CTRL 0x95
110 136
137#define AXP22X_LDO_IO0_V_OUT 0x91
138#define AXP22X_LDO_IO1_V_OUT 0x93
139#define AXP22X_GPIO_STATE 0x94
140#define AXP22X_GPIO_PULL_DOWN 0x95
141
111/* Battery */ 142/* Battery */
112#define AXP20X_CHRG_CC_31_24 0xb0 143#define AXP20X_CHRG_CC_31_24 0xb0
113#define AXP20X_CHRG_CC_23_16 0xb1 144#define AXP20X_CHRG_CC_23_16 0xb1
@@ -120,6 +151,9 @@ enum {
120#define AXP20X_CC_CTRL 0xb8 151#define AXP20X_CC_CTRL 0xb8
121#define AXP20X_FG_RES 0xb9 152#define AXP20X_FG_RES 0xb9
122 153
154/* AXP22X specific registers */
155#define AXP22X_BATLOW_THRES1 0xe6
156
123/* AXP288 specific registers */ 157/* AXP288 specific registers */
124#define AXP288_PMIC_ADC_H 0x56 158#define AXP288_PMIC_ADC_H 0x56
125#define AXP288_PMIC_ADC_L 0x57 159#define AXP288_PMIC_ADC_L 0x57
@@ -158,6 +192,30 @@ enum {
158 AXP20X_REG_ID_MAX, 192 AXP20X_REG_ID_MAX,
159}; 193};
160 194
195enum {
196 AXP22X_DCDC1 = 0,
197 AXP22X_DCDC2,
198 AXP22X_DCDC3,
199 AXP22X_DCDC4,
200 AXP22X_DCDC5,
201 AXP22X_DC1SW,
202 AXP22X_DC5LDO,
203 AXP22X_ALDO1,
204 AXP22X_ALDO2,
205 AXP22X_ALDO3,
206 AXP22X_ELDO1,
207 AXP22X_ELDO2,
208 AXP22X_ELDO3,
209 AXP22X_DLDO1,
210 AXP22X_DLDO2,
211 AXP22X_DLDO3,
212 AXP22X_DLDO4,
213 AXP22X_RTC_LDO,
214 AXP22X_LDO_IO0,
215 AXP22X_LDO_IO1,
216 AXP22X_REG_ID_MAX,
217};
218
161/* IRQs */ 219/* IRQs */
162enum { 220enum {
163 AXP20X_IRQ_ACIN_OVER_V = 1, 221 AXP20X_IRQ_ACIN_OVER_V = 1,
@@ -199,6 +257,34 @@ enum {
199 AXP20X_IRQ_GPIO0_INPUT, 257 AXP20X_IRQ_GPIO0_INPUT,
200}; 258};
201 259
260enum axp22x_irqs {
261 AXP22X_IRQ_ACIN_OVER_V = 1,
262 AXP22X_IRQ_ACIN_PLUGIN,
263 AXP22X_IRQ_ACIN_REMOVAL,
264 AXP22X_IRQ_VBUS_OVER_V,
265 AXP22X_IRQ_VBUS_PLUGIN,
266 AXP22X_IRQ_VBUS_REMOVAL,
267 AXP22X_IRQ_VBUS_V_LOW,
268 AXP22X_IRQ_BATT_PLUGIN,
269 AXP22X_IRQ_BATT_REMOVAL,
270 AXP22X_IRQ_BATT_ENT_ACT_MODE,
271 AXP22X_IRQ_BATT_EXIT_ACT_MODE,
272 AXP22X_IRQ_CHARG,
273 AXP22X_IRQ_CHARG_DONE,
274 AXP22X_IRQ_BATT_TEMP_HIGH,
275 AXP22X_IRQ_BATT_TEMP_LOW,
276 AXP22X_IRQ_DIE_TEMP_HIGH,
277 AXP22X_IRQ_PEK_SHORT,
278 AXP22X_IRQ_PEK_LONG,
279 AXP22X_IRQ_LOW_PWR_LVL1,
280 AXP22X_IRQ_LOW_PWR_LVL2,
281 AXP22X_IRQ_TIMER,
282 AXP22X_IRQ_PEK_RIS_EDGE,
283 AXP22X_IRQ_PEK_FAL_EDGE,
284 AXP22X_IRQ_GPIO1_INPUT,
285 AXP22X_IRQ_GPIO0_INPUT,
286};
287
202enum axp288_irqs { 288enum axp288_irqs {
203 AXP288_IRQ_VBUS_FALL = 2, 289 AXP288_IRQ_VBUS_FALL = 2,
204 AXP288_IRQ_VBUS_RISE, 290 AXP288_IRQ_VBUS_RISE,
@@ -275,4 +361,11 @@ struct axp20x_fg_pdata {
275 int thermistor_curve[MAX_THERM_CURVE_SIZE][2]; 361 int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
276}; 362};
277 363
364struct axp20x_chrg_pdata {
365 int max_cc;
366 int max_cv;
367 int def_cc;
368 int def_cv;
369};
370
278#endif /* __LINUX_MFD_AXP20X_H */ 371#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 324a34683971..da72671a42fa 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -17,10 +17,29 @@
17#define __LINUX_MFD_CROS_EC_H 17#define __LINUX_MFD_CROS_EC_H
18 18
19#include <linux/cdev.h> 19#include <linux/cdev.h>
20#include <linux/device.h>
20#include <linux/notifier.h> 21#include <linux/notifier.h>
21#include <linux/mfd/cros_ec_commands.h> 22#include <linux/mfd/cros_ec_commands.h>
22#include <linux/mutex.h> 23#include <linux/mutex.h>
23 24
25#define CROS_EC_DEV_NAME "cros_ec"
26#define CROS_EC_DEV_PD_NAME "cros_pd"
27
28/*
29 * The EC is unresponsive for a time after a reboot command. Add a
30 * simple delay to make sure that the bus stays locked.
31 */
32#define EC_REBOOT_DELAY_MS 50
33
34/*
35 * Max bus-specific overhead incurred by request/responses.
36 * I2C requires 1 additional byte for requests.
37 * I2C requires 2 additional bytes for responses.
38 * */
39#define EC_PROTO_VERSION_UNKNOWN 0
40#define EC_MAX_REQUEST_OVERHEAD 1
41#define EC_MAX_RESPONSE_OVERHEAD 2
42
24/* 43/*
25 * Command interface between EC and AP, for LPC, I2C and SPI interfaces. 44 * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
26 */ 45 */
@@ -42,8 +61,7 @@ enum {
42 * @outsize: Outgoing length in bytes 61 * @outsize: Outgoing length in bytes
43 * @insize: Max number of bytes to accept from EC 62 * @insize: Max number of bytes to accept from EC
44 * @result: EC's response to the command (separate from communication failure) 63 * @result: EC's response to the command (separate from communication failure)
45 * @outdata: Outgoing data to EC 64 * @data: Where to put the incoming data from EC and outgoing data to EC
46 * @indata: Where to put the incoming data from EC
47 */ 65 */
48struct cros_ec_command { 66struct cros_ec_command {
49 uint32_t version; 67 uint32_t version;
@@ -51,18 +69,14 @@ struct cros_ec_command {
51 uint32_t outsize; 69 uint32_t outsize;
52 uint32_t insize; 70 uint32_t insize;
53 uint32_t result; 71 uint32_t result;
54 uint8_t outdata[EC_PROTO2_MAX_PARAM_SIZE]; 72 uint8_t data[0];
55 uint8_t indata[EC_PROTO2_MAX_PARAM_SIZE];
56}; 73};
57 74
58/** 75/**
59 * struct cros_ec_device - Information about a ChromeOS EC device 76 * struct cros_ec_device - Information about a ChromeOS EC device
60 * 77 *
61 * @ec_name: name of EC device (e.g. 'chromeos-ec')
62 * @phys_name: name of physical comms layer (e.g. 'i2c-4') 78 * @phys_name: name of physical comms layer (e.g. 'i2c-4')
63 * @dev: Device pointer for physical comms device 79 * @dev: Device pointer for physical comms device
64 * @vdev: Device pointer for virtual comms device
65 * @cdev: Character device structure for virtual comms device
66 * @was_wake_device: true if this device was set to wake the system from 80 * @was_wake_device: true if this device was set to wake the system from
67 * sleep at the last suspend 81 * sleep at the last suspend
68 * @cmd_readmem: direct read of the EC memory-mapped region, if supported 82 * @cmd_readmem: direct read of the EC memory-mapped region, if supported
@@ -74,6 +88,7 @@ struct cros_ec_command {
74 * 88 *
75 * @priv: Private data 89 * @priv: Private data
76 * @irq: Interrupt to use 90 * @irq: Interrupt to use
91 * @id: Device id
77 * @din: input buffer (for data from EC) 92 * @din: input buffer (for data from EC)
78 * @dout: output buffer (for data to EC) 93 * @dout: output buffer (for data to EC)
79 * \note 94 * \note
@@ -85,41 +100,72 @@ struct cros_ec_command {
85 * to using dword. 100 * to using dword.
86 * @din_size: size of din buffer to allocate (zero to use static din) 101 * @din_size: size of din buffer to allocate (zero to use static din)
87 * @dout_size: size of dout buffer to allocate (zero to use static dout) 102 * @dout_size: size of dout buffer to allocate (zero to use static dout)
88 * @parent: pointer to parent device (e.g. i2c or spi device)
89 * @wake_enabled: true if this device can wake the system from sleep 103 * @wake_enabled: true if this device can wake the system from sleep
90 * @cmd_xfer: send command to EC and get response 104 * @cmd_xfer: send command to EC and get response
91 * Returns the number of bytes received if the communication succeeded, but 105 * Returns the number of bytes received if the communication succeeded, but
92 * that doesn't mean the EC was happy with the command. The caller 106 * that doesn't mean the EC was happy with the command. The caller
93 * should check msg.result for the EC's result code. 107 * should check msg.result for the EC's result code.
108 * @pkt_xfer: send packet to EC and get response
94 * @lock: one transaction at a time 109 * @lock: one transaction at a time
95 */ 110 */
96struct cros_ec_device { 111struct cros_ec_device {
97 112
98 /* These are used by other drivers that want to talk to the EC */ 113 /* These are used by other drivers that want to talk to the EC */
99 const char *ec_name;
100 const char *phys_name; 114 const char *phys_name;
101 struct device *dev; 115 struct device *dev;
102 struct device *vdev;
103 struct cdev cdev;
104 bool was_wake_device; 116 bool was_wake_device;
105 struct class *cros_class; 117 struct class *cros_class;
106 int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, 118 int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
107 unsigned int bytes, void *dest); 119 unsigned int bytes, void *dest);
108 120
109 /* These are used to implement the platform-specific interface */ 121 /* These are used to implement the platform-specific interface */
122 u16 max_request;
123 u16 max_response;
124 u16 max_passthru;
125 u16 proto_version;
110 void *priv; 126 void *priv;
111 int irq; 127 int irq;
112 uint8_t *din; 128 u8 *din;
113 uint8_t *dout; 129 u8 *dout;
114 int din_size; 130 int din_size;
115 int dout_size; 131 int dout_size;
116 struct device *parent;
117 bool wake_enabled; 132 bool wake_enabled;
118 int (*cmd_xfer)(struct cros_ec_device *ec, 133 int (*cmd_xfer)(struct cros_ec_device *ec,
119 struct cros_ec_command *msg); 134 struct cros_ec_command *msg);
135 int (*pkt_xfer)(struct cros_ec_device *ec,
136 struct cros_ec_command *msg);
120 struct mutex lock; 137 struct mutex lock;
121}; 138};
122 139
140/* struct cros_ec_platform - ChromeOS EC platform information
141 *
142 * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
143 * used in /dev/ and sysfs.
144 * @cmd_offset: offset to apply for each command. Set when
145 * registering a devicde behind another one.
146 */
147struct cros_ec_platform {
148 const char *ec_name;
149 u16 cmd_offset;
150};
151
152/*
153 * struct cros_ec_dev - ChromeOS EC device entry point
154 *
155 * @class_dev: Device structure used in sysfs
156 * @cdev: Character device structure in /dev
157 * @ec_dev: cros_ec_device structure to talk to the physical device
158 * @dev: pointer to the platform device
159 * @cmd_offset: offset to apply for each command.
160 */
161struct cros_ec_dev {
162 struct device class_dev;
163 struct cdev cdev;
164 struct cros_ec_device *ec_dev;
165 struct device *dev;
166 u16 cmd_offset;
167};
168
123/** 169/**
124 * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device 170 * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
125 * 171 *
@@ -198,4 +244,16 @@ int cros_ec_remove(struct cros_ec_device *ec_dev);
198 */ 244 */
199int cros_ec_register(struct cros_ec_device *ec_dev); 245int cros_ec_register(struct cros_ec_device *ec_dev);
200 246
247/**
248 * cros_ec_register - Query the protocol version supported by the ChromeOS EC
249 *
250 * @ec_dev: Device to register
251 * @return 0 if ok, -ve on error
252 */
253int cros_ec_query_all(struct cros_ec_device *ec_dev);
254
255/* sysfs stuff */
256extern struct attribute_group cros_ec_attr_group;
257extern struct attribute_group cros_ec_lightbar_attr_group;
258
201#endif /* __LINUX_MFD_CROS_EC_H */ 259#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index a49cd41feea7..13b630c10d4c 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -515,7 +515,7 @@ struct ec_host_response {
515/* 515/*
516 * Notes on commands: 516 * Notes on commands:
517 * 517 *
518 * Each command is an 8-byte command value. Commands which take params or 518 * Each command is an 16-bit command value. Commands which take params or
519 * return response data specify structs for that data. If no struct is 519 * return response data specify structs for that data. If no struct is
520 * specified, the command does not input or output data, respectively. 520 * specified, the command does not input or output data, respectively.
521 * Parameter/response length is implicit in the structs. Some underlying 521 * Parameter/response length is implicit in the structs. Some underlying
@@ -966,7 +966,7 @@ struct rgb_s {
966/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a 966/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
967 * host command, but the alignment is the same regardless. Keep it that way. 967 * host command, but the alignment is the same regardless. Keep it that way.
968 */ 968 */
969struct lightbar_params { 969struct lightbar_params_v0 {
970 /* Timing */ 970 /* Timing */
971 int32_t google_ramp_up; 971 int32_t google_ramp_up;
972 int32_t google_ramp_down; 972 int32_t google_ramp_down;
@@ -1000,32 +1000,81 @@ struct lightbar_params {
1000 struct rgb_s color[8]; /* 0-3 are Google colors */ 1000 struct rgb_s color[8]; /* 0-3 are Google colors */
1001} __packed; 1001} __packed;
1002 1002
1003struct lightbar_params_v1 {
1004 /* Timing */
1005 int32_t google_ramp_up;
1006 int32_t google_ramp_down;
1007 int32_t s3s0_ramp_up;
1008 int32_t s0_tick_delay[2]; /* AC=0/1 */
1009 int32_t s0a_tick_delay[2]; /* AC=0/1 */
1010 int32_t s0s3_ramp_down;
1011 int32_t s3_sleep_for;
1012 int32_t s3_ramp_up;
1013 int32_t s3_ramp_down;
1014 int32_t tap_tick_delay;
1015 int32_t tap_display_time;
1016
1017 /* Tap-for-battery params */
1018 uint8_t tap_pct_red;
1019 uint8_t tap_pct_green;
1020 uint8_t tap_seg_min_on;
1021 uint8_t tap_seg_max_on;
1022 uint8_t tap_seg_osc;
1023 uint8_t tap_idx[3];
1024
1025 /* Oscillation */
1026 uint8_t osc_min[2]; /* AC=0/1 */
1027 uint8_t osc_max[2]; /* AC=0/1 */
1028 uint8_t w_ofs[2]; /* AC=0/1 */
1029
1030 /* Brightness limits based on the backlight and AC. */
1031 uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
1032 uint8_t bright_bl_on_min[2]; /* AC=0/1 */
1033 uint8_t bright_bl_on_max[2]; /* AC=0/1 */
1034
1035 /* Battery level thresholds */
1036 uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
1037
1038 /* Map [AC][battery_level] to color index */
1039 uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
1040 uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
1041
1042 /* Color palette */
1043 struct rgb_s color[8]; /* 0-3 are Google colors */
1044} __packed;
1045
1003struct ec_params_lightbar { 1046struct ec_params_lightbar {
1004 uint8_t cmd; /* Command (see enum lightbar_command) */ 1047 uint8_t cmd; /* Command (see enum lightbar_command) */
1005 union { 1048 union {
1006 struct { 1049 struct {
1007 /* no args */ 1050 /* no args */
1008 } dump, off, on, init, get_seq, get_params, version; 1051 } dump, off, on, init, get_seq, get_params_v0, get_params_v1,
1052 version, get_brightness, get_demo;
1009 1053
1010 struct num { 1054 struct {
1011 uint8_t num; 1055 uint8_t num;
1012 } brightness, seq, demo; 1056 } set_brightness, seq, demo;
1013 1057
1014 struct reg { 1058 struct {
1015 uint8_t ctrl, reg, value; 1059 uint8_t ctrl, reg, value;
1016 } reg; 1060 } reg;
1017 1061
1018 struct rgb { 1062 struct {
1019 uint8_t led, red, green, blue; 1063 uint8_t led, red, green, blue;
1020 } rgb; 1064 } set_rgb;
1065
1066 struct {
1067 uint8_t led;
1068 } get_rgb;
1021 1069
1022 struct lightbar_params set_params; 1070 struct lightbar_params_v0 set_params_v0;
1071 struct lightbar_params_v1 set_params_v1;
1023 }; 1072 };
1024} __packed; 1073} __packed;
1025 1074
1026struct ec_response_lightbar { 1075struct ec_response_lightbar {
1027 union { 1076 union {
1028 struct dump { 1077 struct {
1029 struct { 1078 struct {
1030 uint8_t reg; 1079 uint8_t reg;
1031 uint8_t ic0; 1080 uint8_t ic0;
@@ -1033,20 +1082,26 @@ struct ec_response_lightbar {
1033 } vals[23]; 1082 } vals[23];
1034 } dump; 1083 } dump;
1035 1084
1036 struct get_seq { 1085 struct {
1037 uint8_t num; 1086 uint8_t num;
1038 } get_seq; 1087 } get_seq, get_brightness, get_demo;
1039 1088
1040 struct lightbar_params get_params; 1089 struct lightbar_params_v0 get_params_v0;
1090 struct lightbar_params_v1 get_params_v1;
1041 1091
1042 struct version { 1092 struct {
1043 uint32_t num; 1093 uint32_t num;
1044 uint32_t flags; 1094 uint32_t flags;
1045 } version; 1095 } version;
1046 1096
1047 struct { 1097 struct {
1098 uint8_t red, green, blue;
1099 } get_rgb;
1100
1101 struct {
1048 /* no return params */ 1102 /* no return params */
1049 } off, on, init, brightness, seq, reg, rgb, demo, set_params; 1103 } off, on, init, set_brightness, seq, reg, set_rgb,
1104 demo, set_params_v0, set_params_v1;
1050 }; 1105 };
1051} __packed; 1106} __packed;
1052 1107
@@ -1056,15 +1111,20 @@ enum lightbar_command {
1056 LIGHTBAR_CMD_OFF = 1, 1111 LIGHTBAR_CMD_OFF = 1,
1057 LIGHTBAR_CMD_ON = 2, 1112 LIGHTBAR_CMD_ON = 2,
1058 LIGHTBAR_CMD_INIT = 3, 1113 LIGHTBAR_CMD_INIT = 3,
1059 LIGHTBAR_CMD_BRIGHTNESS = 4, 1114 LIGHTBAR_CMD_SET_BRIGHTNESS = 4,
1060 LIGHTBAR_CMD_SEQ = 5, 1115 LIGHTBAR_CMD_SEQ = 5,
1061 LIGHTBAR_CMD_REG = 6, 1116 LIGHTBAR_CMD_REG = 6,
1062 LIGHTBAR_CMD_RGB = 7, 1117 LIGHTBAR_CMD_SET_RGB = 7,
1063 LIGHTBAR_CMD_GET_SEQ = 8, 1118 LIGHTBAR_CMD_GET_SEQ = 8,
1064 LIGHTBAR_CMD_DEMO = 9, 1119 LIGHTBAR_CMD_DEMO = 9,
1065 LIGHTBAR_CMD_GET_PARAMS = 10, 1120 LIGHTBAR_CMD_GET_PARAMS_V0 = 10,
1066 LIGHTBAR_CMD_SET_PARAMS = 11, 1121 LIGHTBAR_CMD_SET_PARAMS_V0 = 11,
1067 LIGHTBAR_CMD_VERSION = 12, 1122 LIGHTBAR_CMD_VERSION = 12,
1123 LIGHTBAR_CMD_GET_BRIGHTNESS = 13,
1124 LIGHTBAR_CMD_GET_RGB = 14,
1125 LIGHTBAR_CMD_GET_DEMO = 15,
1126 LIGHTBAR_CMD_GET_PARAMS_V1 = 16,
1127 LIGHTBAR_CMD_SET_PARAMS_V1 = 17,
1068 LIGHTBAR_NUM_CMDS 1128 LIGHTBAR_NUM_CMDS
1069}; 1129};
1070 1130
@@ -1421,8 +1481,40 @@ struct ec_response_rtc {
1421/*****************************************************************************/ 1481/*****************************************************************************/
1422/* Port80 log access */ 1482/* Port80 log access */
1423 1483
1484/* Maximum entries that can be read/written in a single command */
1485#define EC_PORT80_SIZE_MAX 32
1486
1424/* Get last port80 code from previous boot */ 1487/* Get last port80 code from previous boot */
1425#define EC_CMD_PORT80_LAST_BOOT 0x48 1488#define EC_CMD_PORT80_LAST_BOOT 0x48
1489#define EC_CMD_PORT80_READ 0x48
1490
1491enum ec_port80_subcmd {
1492 EC_PORT80_GET_INFO = 0,
1493 EC_PORT80_READ_BUFFER,
1494};
1495
1496struct ec_params_port80_read {
1497 uint16_t subcmd;
1498 union {
1499 struct {
1500 uint32_t offset;
1501 uint32_t num_entries;
1502 } read_buffer;
1503 };
1504} __packed;
1505
1506struct ec_response_port80_read {
1507 union {
1508 struct {
1509 uint32_t writes;
1510 uint32_t history_size;
1511 uint32_t last_boot;
1512 } get_info;
1513 struct {
1514 uint16_t codes[EC_PORT80_SIZE_MAX];
1515 } data;
1516 };
1517} __packed;
1426 1518
1427struct ec_response_port80_last_boot { 1519struct ec_response_port80_last_boot {
1428 uint16_t code; 1520 uint16_t code;
@@ -1782,6 +1874,7 @@ struct ec_params_gpio_set {
1782/* Get GPIO value */ 1874/* Get GPIO value */
1783#define EC_CMD_GPIO_GET 0x93 1875#define EC_CMD_GPIO_GET 0x93
1784 1876
1877/* Version 0 of input params and response */
1785struct ec_params_gpio_get { 1878struct ec_params_gpio_get {
1786 char name[32]; 1879 char name[32];
1787} __packed; 1880} __packed;
@@ -1789,6 +1882,38 @@ struct ec_response_gpio_get {
1789 uint8_t val; 1882 uint8_t val;
1790} __packed; 1883} __packed;
1791 1884
1885/* Version 1 of input params and response */
1886struct ec_params_gpio_get_v1 {
1887 uint8_t subcmd;
1888 union {
1889 struct {
1890 char name[32];
1891 } get_value_by_name;
1892 struct {
1893 uint8_t index;
1894 } get_info;
1895 };
1896} __packed;
1897
1898struct ec_response_gpio_get_v1 {
1899 union {
1900 struct {
1901 uint8_t val;
1902 } get_value_by_name, get_count;
1903 struct {
1904 uint8_t val;
1905 char name[32];
1906 uint32_t flags;
1907 } get_info;
1908 };
1909} __packed;
1910
1911enum gpio_get_subcmd {
1912 EC_GPIO_GET_BY_NAME = 0,
1913 EC_GPIO_GET_COUNT = 1,
1914 EC_GPIO_GET_INFO = 2,
1915};
1916
1792/*****************************************************************************/ 1917/*****************************************************************************/
1793/* I2C commands. Only available when flash write protect is unlocked. */ 1918/* I2C commands. Only available when flash write protect is unlocked. */
1794 1919
@@ -1857,13 +1982,21 @@ struct ec_params_charge_control {
1857/*****************************************************************************/ 1982/*****************************************************************************/
1858 1983
1859/* 1984/*
1860 * Cut off battery power output if the battery supports. 1985 * Cut off battery power immediately or after the host has shut down.
1861 * 1986 *
1862 * For unsupported battery, just don't implement this command and lets EC 1987 * return EC_RES_INVALID_COMMAND if unsupported by a board/battery.
1863 * return EC_RES_INVALID_COMMAND. 1988 * EC_RES_SUCCESS if the command was successful.
1989 * EC_RES_ERROR if the cut off command failed.
1864 */ 1990 */
1991
1865#define EC_CMD_BATTERY_CUT_OFF 0x99 1992#define EC_CMD_BATTERY_CUT_OFF 0x99
1866 1993
1994#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0)
1995
1996struct ec_params_battery_cutoff {
1997 uint8_t flags;
1998} __packed;
1999
1867/*****************************************************************************/ 2000/*****************************************************************************/
1868/* USB port mux control. */ 2001/* USB port mux control. */
1869 2002
@@ -2142,6 +2275,32 @@ struct ec_params_sb_wr_block {
2142} __packed; 2275} __packed;
2143 2276
2144/*****************************************************************************/ 2277/*****************************************************************************/
2278/* Battery vendor parameters
2279 *
2280 * Get or set vendor-specific parameters in the battery. Implementations may
2281 * differ between boards or batteries. On a set operation, the response
2282 * contains the actual value set, which may be rounded or clipped from the
2283 * requested value.
2284 */
2285
2286#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4
2287
2288enum ec_battery_vendor_param_mode {
2289 BATTERY_VENDOR_PARAM_MODE_GET = 0,
2290 BATTERY_VENDOR_PARAM_MODE_SET,
2291};
2292
2293struct ec_params_battery_vendor_param {
2294 uint32_t param;
2295 uint32_t value;
2296 uint8_t mode;
2297} __packed;
2298
2299struct ec_response_battery_vendor_param {
2300 uint32_t value;
2301} __packed;
2302
2303/*****************************************************************************/
2145/* System commands */ 2304/* System commands */
2146 2305
2147/* 2306/*
@@ -2338,6 +2497,80 @@ struct ec_params_reboot_ec {
2338 2497
2339/*****************************************************************************/ 2498/*****************************************************************************/
2340/* 2499/*
2500 * PD commands
2501 *
2502 * These commands are for PD MCU communication.
2503 */
2504
2505/* EC to PD MCU exchange status command */
2506#define EC_CMD_PD_EXCHANGE_STATUS 0x100
2507
2508/* Status of EC being sent to PD */
2509struct ec_params_pd_status {
2510 int8_t batt_soc; /* battery state of charge */
2511} __packed;
2512
2513/* Status of PD being sent back to EC */
2514struct ec_response_pd_status {
2515 int8_t status; /* PD MCU status */
2516 uint32_t curr_lim_ma; /* input current limit */
2517} __packed;
2518
2519/* Set USB type-C port role and muxes */
2520#define EC_CMD_USB_PD_CONTROL 0x101
2521
2522enum usb_pd_control_role {
2523 USB_PD_CTRL_ROLE_NO_CHANGE = 0,
2524 USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */
2525 USB_PD_CTRL_ROLE_TOGGLE_OFF = 2,
2526 USB_PD_CTRL_ROLE_FORCE_SINK = 3,
2527 USB_PD_CTRL_ROLE_FORCE_SOURCE = 4,
2528};
2529
2530enum usb_pd_control_mux {
2531 USB_PD_CTRL_MUX_NO_CHANGE = 0,
2532 USB_PD_CTRL_MUX_NONE = 1,
2533 USB_PD_CTRL_MUX_USB = 2,
2534 USB_PD_CTRL_MUX_DP = 3,
2535 USB_PD_CTRL_MUX_DOCK = 4,
2536 USB_PD_CTRL_MUX_AUTO = 5,
2537};
2538
2539struct ec_params_usb_pd_control {
2540 uint8_t port;
2541 uint8_t role;
2542 uint8_t mux;
2543} __packed;
2544
2545/*****************************************************************************/
2546/*
2547 * Passthru commands
2548 *
2549 * Some platforms have sub-processors chained to each other. For example.
2550 *
2551 * AP <--> EC <--> PD MCU
2552 *
2553 * The top 2 bits of the command number are used to indicate which device the
2554 * command is intended for. Device 0 is always the device receiving the
2555 * command; other device mapping is board-specific.
2556 *
2557 * When a device receives a command to be passed to a sub-processor, it passes
2558 * it on with the device number set back to 0. This allows the sub-processor
2559 * to remain blissfully unaware of whether the command originated on the next
2560 * device up the chain, or was passed through from the AP.
2561 *
2562 * In the above example, if the AP wants to send command 0x0002 to the PD MCU,
2563 * AP sends command 0x4002 to the EC
2564 * EC sends command 0x0002 to the PD MCU
2565 * EC forwards PD MCU response back to the AP
2566 */
2567
2568/* Offset and max command number for sub-device n */
2569#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n))
2570#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff)
2571
2572/*****************************************************************************/
2573/*
2341 * Deprecated constants. These constants have been renamed for clarity. The 2574 * Deprecated constants. These constants have been renamed for clarity. The
2342 * meaning and size has not changed. Programs that use the old names should 2575 * meaning and size has not changed. Programs that use the old names should
2343 * switch to the new names soon, as the old names may not be carried forward 2576 * switch to the new names soon, as the old names may not be carried forward
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
index 956afa445998..5dc743fd63a6 100644
--- a/include/linux/mfd/da9055/core.h
+++ b/include/linux/mfd/da9055/core.h
@@ -89,6 +89,6 @@ static inline int da9055_reg_update(struct da9055 *da9055, unsigned char reg,
89int da9055_device_init(struct da9055 *da9055); 89int da9055_device_init(struct da9055 *da9055);
90void da9055_device_exit(struct da9055 *da9055); 90void da9055_device_exit(struct da9055 *da9055);
91 91
92extern struct regmap_config da9055_regmap_config; 92extern const struct regmap_config da9055_regmap_config;
93 93
94#endif /* __DA9055_CORE_H */ 94#endif /* __DA9055_CORE_H */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 95c8742215a7..612383bd80ae 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -103,6 +103,7 @@ struct da9063;
103struct da9063_pdata { 103struct da9063_pdata {
104 int (*init)(struct da9063 *da9063); 104 int (*init)(struct da9063 *da9063);
105 int irq_base; 105 int irq_base;
106 bool key_power;
106 unsigned flags; 107 unsigned flags;
107 struct da9063_regulators_pdata *regulators_pdata; 108 struct da9063_regulators_pdata *regulators_pdata;
108 struct led_platform_data *leds_pdata; 109 struct led_platform_data *leds_pdata;
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index bb995ab9a575..d4b72d519115 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -125,9 +125,4 @@ enum max77686_opmode {
125 MAX77686_OPMODE_STANDBY, 125 MAX77686_OPMODE_STANDBY,
126}; 126};
127 127
128struct max77686_opmode_data {
129 int id;
130 int mode;
131};
132
133#endif /* __LINUX_MFD_MAX77686_H */ 128#endif /* __LINUX_MFD_MAX77686_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index c9d869027300..cb83883918a7 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -118,47 +118,6 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
118#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) 118#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
119 119
120/** 120/**
121 * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform
122 * data
123 * @sample_time: ADC converstion time in number of clock.
124 * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks,
125 * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks),
126 * recommended is 4.
127 * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC)
128 * @ref_sel: ADC reference source
129 * (0 -> internal reference, 1 -> external reference)
130 * @adc_freq: ADC Clock speed
131 * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz)
132 * @ave_ctrl: Sample average control
133 * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
134 * @touch_det_delay: Touch detect interrupt delay
135 * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us,
136 * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms)
137 * recommended is 3
138 * @settling: Panel driver settling time
139 * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms,
140 * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms)
141 * recommended is 2
142 * @fraction_z: Length of the fractional part in z
143 * (fraction_z ([0..7]) = Count of the fractional part)
144 * recommended is 7
145 * @i_drive: current limit value of the touchscreen drivers
146 * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max)
147 *
148 * */
149struct stmpe_ts_platform_data {
150 u8 sample_time;
151 u8 mod_12b;
152 u8 ref_sel;
153 u8 adc_freq;
154 u8 ave_ctrl;
155 u8 touch_det_delay;
156 u8 settling;
157 u8 fraction_z;
158 u8 i_drive;
159};
160
161/**
162 * struct stmpe_platform_data - STMPE platform data 121 * struct stmpe_platform_data - STMPE platform data
163 * @id: device id to distinguish between multiple STMPEs on the same board 122 * @id: device id to distinguish between multiple STMPEs on the same board
164 * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) 123 * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
@@ -168,7 +127,6 @@ struct stmpe_ts_platform_data {
168 * @irq_over_gpio: true if gpio is used to get irq 127 * @irq_over_gpio: true if gpio is used to get irq
169 * @irq_gpio: gpio number over which irq will be requested (significant only if 128 * @irq_gpio: gpio number over which irq will be requested (significant only if
170 * irq_over_gpio is true) 129 * irq_over_gpio is true)
171 * @ts: touchscreen-specific platform data
172 */ 130 */
173struct stmpe_platform_data { 131struct stmpe_platform_data {
174 int id; 132 int id;
@@ -178,8 +136,6 @@ struct stmpe_platform_data {
178 bool irq_over_gpio; 136 bool irq_over_gpio;
179 int irq_gpio; 137 int irq_gpio;
180 int autosleep_timeout; 138 int autosleep_timeout;
181
182 struct stmpe_ts_platform_data *ts;
183}; 139};
184 140
185#endif 141#endif
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index f62e7cf227c6..58391f2e0414 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -35,6 +35,8 @@
35 35
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/if_link.h> 37#include <linux/if_link.h>
38#include <linux/mlx4/device.h>
39#include <linux/netdevice.h>
38 40
39enum { 41enum {
40 /* initialization and general commands */ 42 /* initialization and general commands */
@@ -300,6 +302,10 @@ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_para
300struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); 302struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
301void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); 303void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
302 304
305int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
306 struct mlx4_counter *counter_stats, int reset);
307int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
308 struct ifla_vf_stats *vf_stats);
303u32 mlx4_comm_get_version(void); 309u32 mlx4_comm_get_version(void);
304int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); 310int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
305int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); 311int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 83e80ab94500..fd13c1ce3b4a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -46,8 +46,9 @@
46 46
47#define MAX_MSIX_P_PORT 17 47#define MAX_MSIX_P_PORT 17
48#define MAX_MSIX 64 48#define MAX_MSIX 64
49#define MSIX_LEGACY_SZ 4
50#define MIN_MSIX_P_PORT 5 49#define MIN_MSIX_P_PORT 5
50#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
51 (dev_cap).num_ports * MIN_MSIX_P_PORT)
51 52
52#define MLX4_MAX_100M_UNITS_VAL 255 /* 53#define MLX4_MAX_100M_UNITS_VAL 255 /*
53 * work around: can't set values 54 * work around: can't set values
@@ -528,7 +529,6 @@ struct mlx4_caps {
528 int num_eqs; 529 int num_eqs;
529 int reserved_eqs; 530 int reserved_eqs;
530 int num_comp_vectors; 531 int num_comp_vectors;
531 int comp_pool;
532 int num_mpts; 532 int num_mpts;
533 int max_fmr_maps; 533 int max_fmr_maps;
534 int num_mtts; 534 int num_mtts;
@@ -771,6 +771,14 @@ union mlx4_ext_av {
771 struct mlx4_eth_av eth; 771 struct mlx4_eth_av eth;
772}; 772};
773 773
774/* Counters should be saturate once they reach their maximum value */
775#define ASSIGN_32BIT_COUNTER(counter, value) do { \
776 if ((value) > U32_MAX) \
777 counter = cpu_to_be32(U32_MAX); \
778 else \
779 counter = cpu_to_be32(value); \
780} while (0)
781
774struct mlx4_counter { 782struct mlx4_counter {
775 u8 reserved1[3]; 783 u8 reserved1[3];
776 u8 counter_mode; 784 u8 counter_mode;
@@ -829,6 +837,12 @@ struct mlx4_dev {
829 struct mlx4_vf_dev *dev_vfs; 837 struct mlx4_vf_dev *dev_vfs;
830}; 838};
831 839
840struct mlx4_clock_params {
841 u64 offset;
842 u8 bar;
843 u8 size;
844};
845
832struct mlx4_eqe { 846struct mlx4_eqe {
833 u8 reserved1; 847 u8 reserved1;
834 u8 type; 848 u8 type;
@@ -957,6 +971,7 @@ struct mlx4_mad_ifc {
957 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 971 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
958 972
959#define MLX4_INVALID_SLAVE_ID 0xFF 973#define MLX4_INVALID_SLAVE_ID 0xFF
974#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
960 975
961void handle_port_mgmt_change_event(struct work_struct *work); 976void handle_port_mgmt_change_event(struct work_struct *work);
962 977
@@ -1332,10 +1347,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1332int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 1347int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1333int mlx4_SYNC_TPT(struct mlx4_dev *dev); 1348int mlx4_SYNC_TPT(struct mlx4_dev *dev);
1334int mlx4_test_interrupts(struct mlx4_dev *dev); 1349int mlx4_test_interrupts(struct mlx4_dev *dev);
1335int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, 1350u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
1336 int *vector); 1351bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
1352struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
1353int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
1337void mlx4_release_eq(struct mlx4_dev *dev, int vec); 1354void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1338 1355
1356int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
1339int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); 1357int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
1340 1358
1341int mlx4_get_phys_port_id(struct mlx4_dev *dev); 1359int mlx4_get_phys_port_id(struct mlx4_dev *dev);
@@ -1344,6 +1362,7 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
1344 1362
1345int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); 1363int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
1346void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); 1364void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
1365int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
1347 1366
1348void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, 1367void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
1349 int port); 1368 int port);
@@ -1485,4 +1504,7 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
1485 enum mlx4_access_reg_method method, 1504 enum mlx4_access_reg_method method,
1486 struct mlx4_ptys_reg *ptys_reg); 1505 struct mlx4_ptys_reg *ptys_reg);
1487 1506
1507int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1508 struct mlx4_clock_params *params);
1509
1488#endif /* MLX4_DEVICE_H */ 1510#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2695ced222df..abc4767695e4 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -169,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
169 struct mlx5_query_cq_mbox_out *out); 169 struct mlx5_query_cq_mbox_out *out);
170int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 170int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
171 struct mlx5_modify_cq_mbox_in *in, int in_sz); 171 struct mlx5_modify_cq_mbox_in *in, int in_sz);
172int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
173 struct mlx5_core_cq *cq, u16 cq_period,
174 u16 cq_max_count);
172int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 175int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
173void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 176void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
174 177
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index abf65c790421..b943cd9e2097 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -35,6 +35,7 @@
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <rdma/ib_verbs.h> 37#include <rdma/ib_verbs.h>
38#include <linux/mlx5/mlx5_ifc.h>
38 39
39#if defined(__LITTLE_ENDIAN) 40#if defined(__LITTLE_ENDIAN)
40#define MLX5_SET_HOST_ENDIANNESS 0 41#define MLX5_SET_HOST_ENDIANNESS 0
@@ -58,6 +59,8 @@
58#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) 59#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
59#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) 60#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
60#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) 61#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
62#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
63#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
61#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) 64#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
62#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) 65#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
63 66
@@ -70,6 +73,14 @@
70 << __mlx5_dw_bit_off(typ, fld))); \ 73 << __mlx5_dw_bit_off(typ, fld))); \
71} while (0) 74} while (0)
72 75
76#define MLX5_SET_TO_ONES(typ, p, fld) do { \
77 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
78 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
79 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
80 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
81 << __mlx5_dw_bit_off(typ, fld))); \
82} while (0)
83
73#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ 84#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
74__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 85__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
75__mlx5_mask(typ, fld)) 86__mlx5_mask(typ, fld))
@@ -88,6 +99,12 @@ __mlx5_mask(typ, fld))
88 99
89#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) 100#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
90 101
102#define MLX5_GET64_PR(typ, p, fld) ({ \
103 u64 ___t = MLX5_GET64(typ, p, fld); \
104 pr_debug(#fld " = 0x%llx\n", ___t); \
105 ___t; \
106})
107
91enum { 108enum {
92 MLX5_MAX_COMMANDS = 32, 109 MLX5_MAX_COMMANDS = 32,
93 MLX5_CMD_DATA_BLOCK_SIZE = 512, 110 MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -115,6 +132,10 @@ enum {
115}; 132};
116 133
117enum { 134enum {
135 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
136};
137
138enum {
118 MLX5_MIN_PKEY_TABLE_SIZE = 128, 139 MLX5_MIN_PKEY_TABLE_SIZE = 128,
119 MLX5_MAX_LOG_PKEY_TABLE = 5, 140 MLX5_MAX_LOG_PKEY_TABLE = 5,
120}; 141};
@@ -264,6 +285,7 @@ enum {
264 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, 285 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
265 MLX5_OPCODE_SEND = 0x0a, 286 MLX5_OPCODE_SEND = 0x0a,
266 MLX5_OPCODE_SEND_IMM = 0x0b, 287 MLX5_OPCODE_SEND_IMM = 0x0b,
288 MLX5_OPCODE_LSO = 0x0e,
267 MLX5_OPCODE_RDMA_READ = 0x10, 289 MLX5_OPCODE_RDMA_READ = 0x10,
268 MLX5_OPCODE_ATOMIC_CS = 0x11, 290 MLX5_OPCODE_ATOMIC_CS = 0x11,
269 MLX5_OPCODE_ATOMIC_FA = 0x12, 291 MLX5_OPCODE_ATOMIC_FA = 0x12,
@@ -312,13 +334,6 @@ enum {
312 MLX5_CAP_OFF_CMDIF_CSUM = 46, 334 MLX5_CAP_OFF_CMDIF_CSUM = 46,
313}; 335};
314 336
315enum {
316 HCA_CAP_OPMOD_GET_MAX = 0,
317 HCA_CAP_OPMOD_GET_CUR = 1,
318 HCA_CAP_OPMOD_GET_ODP_MAX = 4,
319 HCA_CAP_OPMOD_GET_ODP_CUR = 5
320};
321
322struct mlx5_inbox_hdr { 337struct mlx5_inbox_hdr {
323 __be16 opcode; 338 __be16 opcode;
324 u8 rsvd[4]; 339 u8 rsvd[4];
@@ -541,6 +556,10 @@ struct mlx5_cmd_prot_block {
541 u8 sig; 556 u8 sig;
542}; 557};
543 558
559enum {
560 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
561};
562
544struct mlx5_err_cqe { 563struct mlx5_err_cqe {
545 u8 rsvd0[32]; 564 u8 rsvd0[32];
546 __be32 srqn; 565 __be32 srqn;
@@ -554,13 +573,22 @@ struct mlx5_err_cqe {
554}; 573};
555 574
556struct mlx5_cqe64 { 575struct mlx5_cqe64 {
557 u8 rsvd0[17]; 576 u8 rsvd0[4];
577 u8 lro_tcppsh_abort_dupack;
578 u8 lro_min_ttl;
579 __be16 lro_tcp_win;
580 __be32 lro_ack_seq_num;
581 __be32 rss_hash_result;
582 u8 rss_hash_type;
558 u8 ml_path; 583 u8 ml_path;
559 u8 rsvd20[4]; 584 u8 rsvd20[2];
585 __be16 check_sum;
560 __be16 slid; 586 __be16 slid;
561 __be32 flags_rqpn; 587 __be32 flags_rqpn;
562 u8 rsvd28[4]; 588 u8 hds_ip_ext;
563 __be32 srqn; 589 u8 l4_hdr_type_etc;
590 __be16 vlan_info;
591 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
564 __be32 imm_inval_pkey; 592 __be32 imm_inval_pkey;
565 u8 rsvd40[4]; 593 u8 rsvd40[4];
566 __be32 byte_cnt; 594 __be32 byte_cnt;
@@ -571,6 +599,40 @@ struct mlx5_cqe64 {
571 u8 op_own; 599 u8 op_own;
572}; 600};
573 601
602static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
603{
604 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
605}
606
607static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
608{
609 return (cqe->l4_hdr_type_etc >> 4) & 0x7;
610}
611
612static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
613{
614 return !!(cqe->l4_hdr_type_etc & 0x1);
615}
616
617enum {
618 CQE_L4_HDR_TYPE_NONE = 0x0,
619 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
620 CQE_L4_HDR_TYPE_UDP = 0x2,
621 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
622 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
623};
624
625enum {
626 CQE_RSS_HTYPE_IP = 0x3 << 6,
627 CQE_RSS_HTYPE_L4 = 0x3 << 2,
628};
629
630enum {
631 CQE_L2_OK = 1 << 0,
632 CQE_L3_OK = 1 << 1,
633 CQE_L4_OK = 1 << 2,
634};
635
574struct mlx5_sig_err_cqe { 636struct mlx5_sig_err_cqe {
575 u8 rsvd0[16]; 637 u8 rsvd0[16];
576 __be32 expected_trans_sig; 638 __be32 expected_trans_sig;
@@ -996,4 +1058,135 @@ struct mlx5_destroy_psv_out {
996 u8 rsvd[8]; 1058 u8 rsvd[8];
997}; 1059};
998 1060
1061#define MLX5_CMD_OP_MAX 0x920
1062
1063enum {
1064 VPORT_STATE_DOWN = 0x0,
1065 VPORT_STATE_UP = 0x1,
1066};
1067
1068enum {
1069 MLX5_L3_PROT_TYPE_IPV4 = 0,
1070 MLX5_L3_PROT_TYPE_IPV6 = 1,
1071};
1072
1073enum {
1074 MLX5_L4_PROT_TYPE_TCP = 0,
1075 MLX5_L4_PROT_TYPE_UDP = 1,
1076};
1077
1078enum {
1079 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
1080 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
1081 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
1082 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
1083 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
1084};
1085
1086enum {
1087 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
1088 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
1089 MLX5_MATCH_INNER_HEADERS = 1 << 2,
1090
1091};
1092
1093enum {
1094 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
1095 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
1096};
1097
1098enum {
1099 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
1100 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
1101 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
1102};
1103
1104enum {
1105 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1106 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
1107};
1108
1109/* MLX5 DEV CAPs */
1110
1111/* TODO: EAT.ME */
1112enum mlx5_cap_mode {
1113 HCA_CAP_OPMOD_GET_MAX = 0,
1114 HCA_CAP_OPMOD_GET_CUR = 1,
1115};
1116
1117enum mlx5_cap_type {
1118 MLX5_CAP_GENERAL = 0,
1119 MLX5_CAP_ETHERNET_OFFLOADS,
1120 MLX5_CAP_ODP,
1121 MLX5_CAP_ATOMIC,
1122 MLX5_CAP_ROCE,
1123 MLX5_CAP_IPOIB_OFFLOADS,
1124 MLX5_CAP_EOIB_OFFLOADS,
1125 MLX5_CAP_FLOW_TABLE,
1126 /* NUM OF CAP Types */
1127 MLX5_CAP_NUM
1128};
1129
1130/* GET Dev Caps macros */
1131#define MLX5_CAP_GEN(mdev, cap) \
1132 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1133
1134#define MLX5_CAP_GEN_MAX(mdev, cap) \
1135 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
1136
1137#define MLX5_CAP_ETH(mdev, cap) \
1138 MLX5_GET(per_protocol_networking_offload_caps,\
1139 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1140
1141#define MLX5_CAP_ETH_MAX(mdev, cap) \
1142 MLX5_GET(per_protocol_networking_offload_caps,\
1143 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1144
1145#define MLX5_CAP_ROCE(mdev, cap) \
1146 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
1147
1148#define MLX5_CAP_ROCE_MAX(mdev, cap) \
1149 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
1150
1151#define MLX5_CAP_ATOMIC(mdev, cap) \
1152 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
1153
1154#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1155 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
1156
1157#define MLX5_CAP_FLOWTABLE(mdev, cap) \
1158 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
1159
1160#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1161 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1162
1163#define MLX5_CAP_ODP(mdev, cap)\
1164 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1165
1166enum {
1167 MLX5_CMD_STAT_OK = 0x0,
1168 MLX5_CMD_STAT_INT_ERR = 0x1,
1169 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1170 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1171 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1172 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1173 MLX5_CMD_STAT_RES_BUSY = 0x6,
1174 MLX5_CMD_STAT_LIM_ERR = 0x8,
1175 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1176 MLX5_CMD_STAT_IX_ERR = 0xa,
1177 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1178 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1179 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1180 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1181 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1182 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1183};
1184
1185static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1186{
1187 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1188 return 0;
1189 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1190}
1191
999#endif /* MLX5_DEVICE_H */ 1192#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9a90e7523dc2..5722d88c2429 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -44,7 +44,6 @@
44 44
45#include <linux/mlx5/device.h> 45#include <linux/mlx5/device.h>
46#include <linux/mlx5/doorbell.h> 46#include <linux/mlx5/doorbell.h>
47#include <linux/mlx5/mlx5_ifc.h>
48 47
49enum { 48enum {
50 MLX5_BOARD_ID_LEN = 64, 49 MLX5_BOARD_ID_LEN = 64,
@@ -85,7 +84,7 @@ enum {
85}; 84};
86 85
87enum { 86enum {
88 MLX5_MAX_EQ_NAME = 32 87 MLX5_MAX_IRQ_NAME = 32
89}; 88};
90 89
91enum { 90enum {
@@ -108,6 +107,7 @@ enum {
108 MLX5_REG_PUDE = 0x5009, 107 MLX5_REG_PUDE = 0x5009,
109 MLX5_REG_PMPE = 0x5010, 108 MLX5_REG_PMPE = 0x5010,
110 MLX5_REG_PELC = 0x500e, 109 MLX5_REG_PELC = 0x500e,
110 MLX5_REG_PVLC = 0x500f,
111 MLX5_REG_PMLP = 0, /* TBD */ 111 MLX5_REG_PMLP = 0, /* TBD */
112 MLX5_REG_NODE_DESC = 0x6001, 112 MLX5_REG_NODE_DESC = 0x6001,
113 MLX5_REG_HOST_ENDIANNESS = 0x7004, 113 MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -150,6 +150,11 @@ enum mlx5_dev_event {
150 MLX5_DEV_EVENT_CLIENT_REREG, 150 MLX5_DEV_EVENT_CLIENT_REREG,
151}; 151};
152 152
153enum mlx5_port_status {
154 MLX5_PORT_UP = 1 << 1,
155 MLX5_PORT_DOWN = 1 << 2,
156};
157
153struct mlx5_uuar_info { 158struct mlx5_uuar_info {
154 struct mlx5_uar *uars; 159 struct mlx5_uar *uars;
155 int num_uars; 160 int num_uars;
@@ -269,56 +274,7 @@ struct mlx5_cmd {
269struct mlx5_port_caps { 274struct mlx5_port_caps {
270 int gid_table_len; 275 int gid_table_len;
271 int pkey_table_len; 276 int pkey_table_len;
272}; 277 u8 ext_port_cap;
273
274struct mlx5_general_caps {
275 u8 log_max_eq;
276 u8 log_max_cq;
277 u8 log_max_qp;
278 u8 log_max_mkey;
279 u8 log_max_pd;
280 u8 log_max_srq;
281 u8 log_max_strq;
282 u8 log_max_mrw_sz;
283 u8 log_max_bsf_list_size;
284 u8 log_max_klm_list_size;
285 u32 max_cqes;
286 int max_wqes;
287 u32 max_eqes;
288 u32 max_indirection;
289 int max_sq_desc_sz;
290 int max_rq_desc_sz;
291 int max_dc_sq_desc_sz;
292 u64 flags;
293 u16 stat_rate_support;
294 int log_max_msg;
295 int num_ports;
296 u8 log_max_ra_res_qp;
297 u8 log_max_ra_req_qp;
298 int max_srq_wqes;
299 int bf_reg_size;
300 int bf_regs_per_page;
301 struct mlx5_port_caps port[MLX5_MAX_PORTS];
302 u8 ext_port_cap[MLX5_MAX_PORTS];
303 int max_vf;
304 u32 reserved_lkey;
305 u8 local_ca_ack_delay;
306 u8 log_max_mcg;
307 u32 max_qp_mcg;
308 int min_page_sz;
309 int pd_cap;
310 u32 max_qp_counters;
311 u32 pkey_table_size;
312 u8 log_max_ra_req_dc;
313 u8 log_max_ra_res_dc;
314 u32 uar_sz;
315 u8 min_log_pg_sz;
316 u8 log_max_xrcd;
317 u16 log_uar_page_sz;
318};
319
320struct mlx5_caps {
321 struct mlx5_general_caps gen;
322}; 278};
323 279
324struct mlx5_cmd_mailbox { 280struct mlx5_cmd_mailbox {
@@ -334,8 +290,6 @@ struct mlx5_buf_list {
334 290
335struct mlx5_buf { 291struct mlx5_buf {
336 struct mlx5_buf_list direct; 292 struct mlx5_buf_list direct;
337 struct mlx5_buf_list *page_list;
338 int nbufs;
339 int npages; 293 int npages;
340 int size; 294 int size;
341 u8 page_shift; 295 u8 page_shift;
@@ -351,7 +305,6 @@ struct mlx5_eq {
351 u8 eqn; 305 u8 eqn;
352 int nent; 306 int nent;
353 u64 mask; 307 u64 mask;
354 char name[MLX5_MAX_EQ_NAME];
355 struct list_head list; 308 struct list_head list;
356 int index; 309 int index;
357 struct mlx5_rsc_debug *dbg; 310 struct mlx5_rsc_debug *dbg;
@@ -387,6 +340,8 @@ struct mlx5_core_mr {
387 340
388enum mlx5_res_type { 341enum mlx5_res_type {
389 MLX5_RES_QP, 342 MLX5_RES_QP,
343 MLX5_RES_SRQ,
344 MLX5_RES_XSRQ,
390}; 345};
391 346
392struct mlx5_core_rsc_common { 347struct mlx5_core_rsc_common {
@@ -396,6 +351,7 @@ struct mlx5_core_rsc_common {
396}; 351};
397 352
398struct mlx5_core_srq { 353struct mlx5_core_srq {
354 struct mlx5_core_rsc_common common; /* must be first */
399 u32 srqn; 355 u32 srqn;
400 int max; 356 int max;
401 int max_gs; 357 int max_gs;
@@ -414,7 +370,6 @@ struct mlx5_eq_table {
414 struct mlx5_eq pages_eq; 370 struct mlx5_eq pages_eq;
415 struct mlx5_eq async_eq; 371 struct mlx5_eq async_eq;
416 struct mlx5_eq cmd_eq; 372 struct mlx5_eq cmd_eq;
417 struct msix_entry *msix_arr;
418 int num_comp_vectors; 373 int num_comp_vectors;
419 /* protect EQs list 374 /* protect EQs list
420 */ 375 */
@@ -467,9 +422,16 @@ struct mlx5_mr_table {
467 struct radix_tree_root tree; 422 struct radix_tree_root tree;
468}; 423};
469 424
425struct mlx5_irq_info {
426 cpumask_var_t mask;
427 char name[MLX5_MAX_IRQ_NAME];
428};
429
470struct mlx5_priv { 430struct mlx5_priv {
471 char name[MLX5_MAX_NAME_LEN]; 431 char name[MLX5_MAX_NAME_LEN];
472 struct mlx5_eq_table eq_table; 432 struct mlx5_eq_table eq_table;
433 struct msix_entry *msix_arr;
434 struct mlx5_irq_info *irq_info;
473 struct mlx5_uuar_info uuari; 435 struct mlx5_uuar_info uuari;
474 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); 436 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
475 437
@@ -520,7 +482,9 @@ struct mlx5_core_dev {
520 u8 rev_id; 482 u8 rev_id;
521 char board_id[MLX5_BOARD_ID_LEN]; 483 char board_id[MLX5_BOARD_ID_LEN];
522 struct mlx5_cmd cmd; 484 struct mlx5_cmd cmd;
523 struct mlx5_caps caps; 485 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
486 u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
487 u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
524 phys_addr_t iseg_base; 488 phys_addr_t iseg_base;
525 struct mlx5_init_seg __iomem *iseg; 489 struct mlx5_init_seg __iomem *iseg;
526 void (*event) (struct mlx5_core_dev *dev, 490 void (*event) (struct mlx5_core_dev *dev,
@@ -529,6 +493,7 @@ struct mlx5_core_dev {
529 struct mlx5_priv priv; 493 struct mlx5_priv priv;
530 struct mlx5_profile *profile; 494 struct mlx5_profile *profile;
531 atomic_t num_qps; 495 atomic_t num_qps;
496 u32 issi;
532}; 497};
533 498
534struct mlx5_db { 499struct mlx5_db {
@@ -549,6 +514,11 @@ enum {
549 MLX5_COMP_EQ_SIZE = 1024, 514 MLX5_COMP_EQ_SIZE = 1024,
550}; 515};
551 516
517enum {
518 MLX5_PTYS_IB = 1 << 0,
519 MLX5_PTYS_EN = 1 << 2,
520};
521
552struct mlx5_db_pgdir { 522struct mlx5_db_pgdir {
553 struct list_head list; 523 struct list_head list;
554 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); 524 DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
@@ -584,13 +554,44 @@ struct mlx5_pas {
584 u8 log_sz; 554 u8 log_sz;
585}; 555};
586 556
557enum port_state_policy {
558 MLX5_AAA_000
559};
560
561enum phy_port_state {
562 MLX5_AAA_111
563};
564
565struct mlx5_hca_vport_context {
566 u32 field_select;
567 bool sm_virt_aware;
568 bool has_smi;
569 bool has_raw;
570 enum port_state_policy policy;
571 enum phy_port_state phys_state;
572 enum ib_port_state vport_state;
573 u8 port_physical_state;
574 u64 sys_image_guid;
575 u64 port_guid;
576 u64 node_guid;
577 u32 cap_mask1;
578 u32 cap_mask1_perm;
579 u32 cap_mask2;
580 u32 cap_mask2_perm;
581 u16 lid;
582 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
583 u8 lmc;
584 u8 subnet_timeout;
585 u16 sm_lid;
586 u8 sm_sl;
587 u16 qkey_violation_counter;
588 u16 pkey_violation_counter;
589 bool grh_required;
590};
591
587static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) 592static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
588{ 593{
589 if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
590 return buf->direct.buf + offset; 594 return buf->direct.buf + offset;
591 else
592 return buf->page_list[offset >> PAGE_SHIFT].buf +
593 (offset & (PAGE_SIZE - 1));
594} 595}
595 596
596extern struct workqueue_struct *mlx5_core_wq; 597extern struct workqueue_struct *mlx5_core_wq;
@@ -654,8 +655,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
654void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 655void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
655int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 656int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
656int mlx5_cmd_status_to_err_v2(void *ptr); 657int mlx5_cmd_status_to_err_v2(void *ptr);
657int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, 658int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
658 u16 opmod); 659 enum mlx5_cap_mode cap_mode);
659int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 660int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
660 int out_size); 661 int out_size);
661int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 662int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -665,19 +666,21 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
665int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 666int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
666int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 667int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
667int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 668int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
669int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
670void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
668void mlx5_health_cleanup(void); 671void mlx5_health_cleanup(void);
669void __init mlx5_health_init(void); 672void __init mlx5_health_init(void);
670void mlx5_start_health_poll(struct mlx5_core_dev *dev); 673void mlx5_start_health_poll(struct mlx5_core_dev *dev);
671void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 674void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
672int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, 675int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
673 struct mlx5_buf *buf);
674void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); 676void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
675struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, 677struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
676 gfp_t flags, int npages); 678 gfp_t flags, int npages);
677void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, 679void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
678 struct mlx5_cmd_mailbox *head); 680 struct mlx5_cmd_mailbox *head);
679int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 681int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
680 struct mlx5_create_srq_mbox_in *in, int inlen); 682 struct mlx5_create_srq_mbox_in *in, int inlen,
683 int is_xrc);
681int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); 684int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
682int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 685int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
683 struct mlx5_query_srq_mbox_out *out); 686 struct mlx5_query_srq_mbox_out *out);
@@ -696,7 +699,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
696 u32 *mkey); 699 u32 *mkey);
697int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 700int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
698int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); 701int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
699int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, 702int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
700 u16 opmod, u8 port); 703 u16 opmod, u8 port);
701void mlx5_pagealloc_init(struct mlx5_core_dev *dev); 704void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
702void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); 705void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
@@ -734,7 +737,32 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
734int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, 737int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
735 int size_in, void *data_out, int size_out, 738 int size_in, void *data_out, int size_out,
736 u16 reg_num, int arg, int write); 739 u16 reg_num, int arg, int write);
740
737int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); 741int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
742int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
743 int ptys_size, int proto_mask, u8 local_port);
744int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
745 u32 *proto_cap, int proto_mask);
746int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
747 u32 *proto_admin, int proto_mask);
748int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
749 u8 *link_width_oper, u8 local_port);
750int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
751 u8 *proto_oper, int proto_mask,
752 u8 local_port);
753int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
754 int proto_mask);
755int mlx5_set_port_status(struct mlx5_core_dev *dev,
756 enum mlx5_port_status status);
757int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
758
759int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
760void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
761void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
762 u8 port);
763
764int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
765 u8 *vl_hw_cap, u8 local_port);
738 766
739int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 767int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
740void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 768void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -799,6 +827,7 @@ struct mlx5_interface {
799void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); 827void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
800int mlx5_register_interface(struct mlx5_interface *intf); 828int mlx5_register_interface(struct mlx5_interface *intf);
801void mlx5_unregister_interface(struct mlx5_interface *intf); 829void mlx5_unregister_interface(struct mlx5_interface *intf);
830int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
802 831
803struct mlx5_profile { 832struct mlx5_profile {
804 u64 mask; 833 u64 mask;
@@ -809,4 +838,14 @@ struct mlx5_profile {
809 } mr_cache[MAX_MR_CACHE_ENTRIES]; 838 } mr_cache[MAX_MR_CACHE_ENTRIES];
810}; 839};
811 840
841static inline int mlx5_get_gid_table_len(u16 param)
842{
843 if (param > 4) {
844 pr_warn("gid table length is zero\n");
845 return 0;
846 }
847
848 return 8 * (1 << param);
849}
850
812#endif /* MLX5_DRIVER_H */ 851#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
new file mode 100644
index 000000000000..5f922c6d4fc2
--- /dev/null
+++ b/include/linux/mlx5/flow_table.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_FLOW_TABLE_H
34#define MLX5_FLOW_TABLE_H
35
36#include <linux/mlx5/driver.h>
37
38struct mlx5_flow_table_group {
39 u8 log_sz;
40 u8 match_criteria_enable;
41 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
42};
43
44void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
45 u16 num_groups,
46 struct mlx5_flow_table_group *group);
47void mlx5_destroy_flow_table(void *flow_table);
48int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
49 void *match_criteria, void *flow_context,
50 u32 *flow_index);
51void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
52u32 mlx5_get_flow_table_id(void *flow_table);
53
54#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index cb3ad17edd1f..6d2f6fee041c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -28,12 +28,45 @@
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31*/
32
33#ifndef MLX5_IFC_H 32#ifndef MLX5_IFC_H
34#define MLX5_IFC_H 33#define MLX5_IFC_H
35 34
36enum { 35enum {
36 MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0,
37 MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1,
38 MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2,
39 MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3,
40 MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13,
41 MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14,
42 MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c,
43 MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d,
44 MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4,
45 MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5,
46 MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7,
47 MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc,
48 MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10,
49 MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11,
50 MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12,
51 MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8,
52 MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9,
53 MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15,
54 MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
55 MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
56 MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b,
57 MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
58 MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
59 MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb
60};
61
62enum {
63 MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
64 MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
65 MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
66 MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
67};
68
69enum {
37 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, 70 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
38 MLX5_CMD_OP_QUERY_ADAPTER = 0x101, 71 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
39 MLX5_CMD_OP_INIT_HCA = 0x102, 72 MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -43,6 +76,8 @@ enum {
43 MLX5_CMD_OP_QUERY_PAGES = 0x107, 76 MLX5_CMD_OP_QUERY_PAGES = 0x107,
44 MLX5_CMD_OP_MANAGE_PAGES = 0x108, 77 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
45 MLX5_CMD_OP_SET_HCA_CAP = 0x109, 78 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
79 MLX5_CMD_OP_QUERY_ISSI = 0x10a,
80 MLX5_CMD_OP_SET_ISSI = 0x10b,
46 MLX5_CMD_OP_CREATE_MKEY = 0x200, 81 MLX5_CMD_OP_CREATE_MKEY = 0x200,
47 MLX5_CMD_OP_QUERY_MKEY = 0x201, 82 MLX5_CMD_OP_QUERY_MKEY = 0x201,
48 MLX5_CMD_OP_DESTROY_MKEY = 0x202, 83 MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -66,6 +101,7 @@ enum {
66 MLX5_CMD_OP_2ERR_QP = 0x507, 101 MLX5_CMD_OP_2ERR_QP = 0x507,
67 MLX5_CMD_OP_2RST_QP = 0x50a, 102 MLX5_CMD_OP_2RST_QP = 0x50a,
68 MLX5_CMD_OP_QUERY_QP = 0x50b, 103 MLX5_CMD_OP_QUERY_QP = 0x50b,
104 MLX5_CMD_OP_SQD_RTS_QP = 0x50c,
69 MLX5_CMD_OP_INIT2INIT_QP = 0x50e, 105 MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
70 MLX5_CMD_OP_CREATE_PSV = 0x600, 106 MLX5_CMD_OP_CREATE_PSV = 0x600,
71 MLX5_CMD_OP_DESTROY_PSV = 0x601, 107 MLX5_CMD_OP_DESTROY_PSV = 0x601,
@@ -73,7 +109,10 @@ enum {
73 MLX5_CMD_OP_DESTROY_SRQ = 0x701, 109 MLX5_CMD_OP_DESTROY_SRQ = 0x701,
74 MLX5_CMD_OP_QUERY_SRQ = 0x702, 110 MLX5_CMD_OP_QUERY_SRQ = 0x702,
75 MLX5_CMD_OP_ARM_RQ = 0x703, 111 MLX5_CMD_OP_ARM_RQ = 0x703,
76 MLX5_CMD_OP_RESIZE_SRQ = 0x704, 112 MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705,
113 MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706,
114 MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707,
115 MLX5_CMD_OP_ARM_XRC_SRQ = 0x708,
77 MLX5_CMD_OP_CREATE_DCT = 0x710, 116 MLX5_CMD_OP_CREATE_DCT = 0x710,
78 MLX5_CMD_OP_DESTROY_DCT = 0x711, 117 MLX5_CMD_OP_DESTROY_DCT = 0x711,
79 MLX5_CMD_OP_DRAIN_DCT = 0x712, 118 MLX5_CMD_OP_DRAIN_DCT = 0x712,
@@ -85,8 +124,12 @@ enum {
85 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, 124 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
86 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, 125 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
87 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, 126 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
88 MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760, 127 MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760,
89 MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, 128 MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
129 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762,
130 MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
131 MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
132 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
90 MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, 133 MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
91 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, 134 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
92 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, 135 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
@@ -98,7 +141,7 @@ enum {
98 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, 141 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
99 MLX5_CMD_OP_ACCESS_REG = 0x805, 142 MLX5_CMD_OP_ACCESS_REG = 0x805,
100 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, 143 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
101 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, 144 MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
102 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, 145 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
103 MLX5_CMD_OP_MAD_IFC = 0x50d, 146 MLX5_CMD_OP_MAD_IFC = 0x50d,
104 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, 147 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -106,23 +149,22 @@ enum {
106 MLX5_CMD_OP_NOP = 0x80d, 149 MLX5_CMD_OP_NOP = 0x80d,
107 MLX5_CMD_OP_ALLOC_XRCD = 0x80e, 150 MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
108 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, 151 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
109 MLX5_CMD_OP_SET_BURST_SIZE = 0x812, 152 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
110 MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813, 153 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817,
111 MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, 154 MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822,
112 MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, 155 MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823,
113 MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820, 156 MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824,
114 MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821, 157 MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825,
115 MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822, 158 MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826,
116 MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823, 159 MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827,
117 MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824, 160 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828,
161 MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
162 MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
163 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
118 MLX5_CMD_OP_CREATE_TIR = 0x900, 164 MLX5_CMD_OP_CREATE_TIR = 0x900,
119 MLX5_CMD_OP_MODIFY_TIR = 0x901, 165 MLX5_CMD_OP_MODIFY_TIR = 0x901,
120 MLX5_CMD_OP_DESTROY_TIR = 0x902, 166 MLX5_CMD_OP_DESTROY_TIR = 0x902,
121 MLX5_CMD_OP_QUERY_TIR = 0x903, 167 MLX5_CMD_OP_QUERY_TIR = 0x903,
122 MLX5_CMD_OP_CREATE_TIS = 0x912,
123 MLX5_CMD_OP_MODIFY_TIS = 0x913,
124 MLX5_CMD_OP_DESTROY_TIS = 0x914,
125 MLX5_CMD_OP_QUERY_TIS = 0x915,
126 MLX5_CMD_OP_CREATE_SQ = 0x904, 168 MLX5_CMD_OP_CREATE_SQ = 0x904,
127 MLX5_CMD_OP_MODIFY_SQ = 0x905, 169 MLX5_CMD_OP_MODIFY_SQ = 0x905,
128 MLX5_CMD_OP_DESTROY_SQ = 0x906, 170 MLX5_CMD_OP_DESTROY_SQ = 0x906,
@@ -135,9 +177,430 @@ enum {
135 MLX5_CMD_OP_MODIFY_RMP = 0x90d, 177 MLX5_CMD_OP_MODIFY_RMP = 0x90d,
136 MLX5_CMD_OP_DESTROY_RMP = 0x90e, 178 MLX5_CMD_OP_DESTROY_RMP = 0x90e,
137 MLX5_CMD_OP_QUERY_RMP = 0x90f, 179 MLX5_CMD_OP_QUERY_RMP = 0x90f,
138 MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910, 180 MLX5_CMD_OP_CREATE_TIS = 0x912,
139 MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911, 181 MLX5_CMD_OP_MODIFY_TIS = 0x913,
140 MLX5_CMD_OP_MAX = 0x911 182 MLX5_CMD_OP_DESTROY_TIS = 0x914,
183 MLX5_CMD_OP_QUERY_TIS = 0x915,
184 MLX5_CMD_OP_CREATE_RQT = 0x916,
185 MLX5_CMD_OP_MODIFY_RQT = 0x917,
186 MLX5_CMD_OP_DESTROY_RQT = 0x918,
187 MLX5_CMD_OP_QUERY_RQT = 0x919,
188 MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
189 MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931,
190 MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932,
191 MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
192 MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934,
193 MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935,
194 MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
195 MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
196 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938
197};
198
199struct mlx5_ifc_flow_table_fields_supported_bits {
200 u8 outer_dmac[0x1];
201 u8 outer_smac[0x1];
202 u8 outer_ether_type[0x1];
203 u8 reserved_0[0x1];
204 u8 outer_first_prio[0x1];
205 u8 outer_first_cfi[0x1];
206 u8 outer_first_vid[0x1];
207 u8 reserved_1[0x1];
208 u8 outer_second_prio[0x1];
209 u8 outer_second_cfi[0x1];
210 u8 outer_second_vid[0x1];
211 u8 reserved_2[0x1];
212 u8 outer_sip[0x1];
213 u8 outer_dip[0x1];
214 u8 outer_frag[0x1];
215 u8 outer_ip_protocol[0x1];
216 u8 outer_ip_ecn[0x1];
217 u8 outer_ip_dscp[0x1];
218 u8 outer_udp_sport[0x1];
219 u8 outer_udp_dport[0x1];
220 u8 outer_tcp_sport[0x1];
221 u8 outer_tcp_dport[0x1];
222 u8 outer_tcp_flags[0x1];
223 u8 outer_gre_protocol[0x1];
224 u8 outer_gre_key[0x1];
225 u8 outer_vxlan_vni[0x1];
226 u8 reserved_3[0x5];
227 u8 source_eswitch_port[0x1];
228
229 u8 inner_dmac[0x1];
230 u8 inner_smac[0x1];
231 u8 inner_ether_type[0x1];
232 u8 reserved_4[0x1];
233 u8 inner_first_prio[0x1];
234 u8 inner_first_cfi[0x1];
235 u8 inner_first_vid[0x1];
236 u8 reserved_5[0x1];
237 u8 inner_second_prio[0x1];
238 u8 inner_second_cfi[0x1];
239 u8 inner_second_vid[0x1];
240 u8 reserved_6[0x1];
241 u8 inner_sip[0x1];
242 u8 inner_dip[0x1];
243 u8 inner_frag[0x1];
244 u8 inner_ip_protocol[0x1];
245 u8 inner_ip_ecn[0x1];
246 u8 inner_ip_dscp[0x1];
247 u8 inner_udp_sport[0x1];
248 u8 inner_udp_dport[0x1];
249 u8 inner_tcp_sport[0x1];
250 u8 inner_tcp_dport[0x1];
251 u8 inner_tcp_flags[0x1];
252 u8 reserved_7[0x9];
253
254 u8 reserved_8[0x40];
255};
256
257struct mlx5_ifc_flow_table_prop_layout_bits {
258 u8 ft_support[0x1];
259 u8 reserved_0[0x1f];
260
261 u8 reserved_1[0x2];
262 u8 log_max_ft_size[0x6];
263 u8 reserved_2[0x10];
264 u8 max_ft_level[0x8];
265
266 u8 reserved_3[0x20];
267
268 u8 reserved_4[0x18];
269 u8 log_max_ft_num[0x8];
270
271 u8 reserved_5[0x18];
272 u8 log_max_destination[0x8];
273
274 u8 reserved_6[0x18];
275 u8 log_max_flow[0x8];
276
277 u8 reserved_7[0x40];
278
279 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
280
281 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
282};
283
284struct mlx5_ifc_odp_per_transport_service_cap_bits {
285 u8 send[0x1];
286 u8 receive[0x1];
287 u8 write[0x1];
288 u8 read[0x1];
289 u8 reserved_0[0x1];
290 u8 srq_receive[0x1];
291 u8 reserved_1[0x1a];
292};
293
294struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
295 u8 smac_47_16[0x20];
296
297 u8 smac_15_0[0x10];
298 u8 ethertype[0x10];
299
300 u8 dmac_47_16[0x20];
301
302 u8 dmac_15_0[0x10];
303 u8 first_prio[0x3];
304 u8 first_cfi[0x1];
305 u8 first_vid[0xc];
306
307 u8 ip_protocol[0x8];
308 u8 ip_dscp[0x6];
309 u8 ip_ecn[0x2];
310 u8 vlan_tag[0x1];
311 u8 reserved_0[0x1];
312 u8 frag[0x1];
313 u8 reserved_1[0x4];
314 u8 tcp_flags[0x9];
315
316 u8 tcp_sport[0x10];
317 u8 tcp_dport[0x10];
318
319 u8 reserved_2[0x20];
320
321 u8 udp_sport[0x10];
322 u8 udp_dport[0x10];
323
324 u8 src_ip[4][0x20];
325
326 u8 dst_ip[4][0x20];
327};
328
329struct mlx5_ifc_fte_match_set_misc_bits {
330 u8 reserved_0[0x20];
331
332 u8 reserved_1[0x10];
333 u8 source_port[0x10];
334
335 u8 outer_second_prio[0x3];
336 u8 outer_second_cfi[0x1];
337 u8 outer_second_vid[0xc];
338 u8 inner_second_prio[0x3];
339 u8 inner_second_cfi[0x1];
340 u8 inner_second_vid[0xc];
341
342 u8 outer_second_vlan_tag[0x1];
343 u8 inner_second_vlan_tag[0x1];
344 u8 reserved_2[0xe];
345 u8 gre_protocol[0x10];
346
347 u8 gre_key_h[0x18];
348 u8 gre_key_l[0x8];
349
350 u8 vxlan_vni[0x18];
351 u8 reserved_3[0x8];
352
353 u8 reserved_4[0x20];
354
355 u8 reserved_5[0xc];
356 u8 outer_ipv6_flow_label[0x14];
357
358 u8 reserved_6[0xc];
359 u8 inner_ipv6_flow_label[0x14];
360
361 u8 reserved_7[0xe0];
362};
363
364struct mlx5_ifc_cmd_pas_bits {
365 u8 pa_h[0x20];
366
367 u8 pa_l[0x14];
368 u8 reserved_0[0xc];
369};
370
371struct mlx5_ifc_uint64_bits {
372 u8 hi[0x20];
373
374 u8 lo[0x20];
375};
376
377enum {
378 MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0,
379 MLX5_ADS_STAT_RATE_2_5GBPS = 0x7,
380 MLX5_ADS_STAT_RATE_10GBPS = 0x8,
381 MLX5_ADS_STAT_RATE_30GBPS = 0x9,
382 MLX5_ADS_STAT_RATE_5GBPS = 0xa,
383 MLX5_ADS_STAT_RATE_20GBPS = 0xb,
384 MLX5_ADS_STAT_RATE_40GBPS = 0xc,
385 MLX5_ADS_STAT_RATE_60GBPS = 0xd,
386 MLX5_ADS_STAT_RATE_80GBPS = 0xe,
387 MLX5_ADS_STAT_RATE_120GBPS = 0xf,
388};
389
390struct mlx5_ifc_ads_bits {
391 u8 fl[0x1];
392 u8 free_ar[0x1];
393 u8 reserved_0[0xe];
394 u8 pkey_index[0x10];
395
396 u8 reserved_1[0x8];
397 u8 grh[0x1];
398 u8 mlid[0x7];
399 u8 rlid[0x10];
400
401 u8 ack_timeout[0x5];
402 u8 reserved_2[0x3];
403 u8 src_addr_index[0x8];
404 u8 reserved_3[0x4];
405 u8 stat_rate[0x4];
406 u8 hop_limit[0x8];
407
408 u8 reserved_4[0x4];
409 u8 tclass[0x8];
410 u8 flow_label[0x14];
411
412 u8 rgid_rip[16][0x8];
413
414 u8 reserved_5[0x4];
415 u8 f_dscp[0x1];
416 u8 f_ecn[0x1];
417 u8 reserved_6[0x1];
418 u8 f_eth_prio[0x1];
419 u8 ecn[0x2];
420 u8 dscp[0x6];
421 u8 udp_sport[0x10];
422
423 u8 dei_cfi[0x1];
424 u8 eth_prio[0x3];
425 u8 sl[0x4];
426 u8 port[0x8];
427 u8 rmac_47_32[0x10];
428
429 u8 rmac_31_0[0x20];
430};
431
432struct mlx5_ifc_flow_table_nic_cap_bits {
433 u8 reserved_0[0x200];
434
435 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
436
437 u8 reserved_1[0x200];
438
439 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
440
441 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
442
443 u8 reserved_2[0x200];
444
445 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
446
447 u8 reserved_3[0x7200];
448};
449
450struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
451 u8 csum_cap[0x1];
452 u8 vlan_cap[0x1];
453 u8 lro_cap[0x1];
454 u8 lro_psh_flag[0x1];
455 u8 lro_time_stamp[0x1];
456 u8 reserved_0[0x6];
457 u8 max_lso_cap[0x5];
458 u8 reserved_1[0x4];
459 u8 rss_ind_tbl_cap[0x4];
460 u8 reserved_2[0x3];
461 u8 tunnel_lso_const_out_ip_id[0x1];
462 u8 reserved_3[0x2];
463 u8 tunnel_statless_gre[0x1];
464 u8 tunnel_stateless_vxlan[0x1];
465
466 u8 reserved_4[0x20];
467
468 u8 reserved_5[0x10];
469 u8 lro_min_mss_size[0x10];
470
471 u8 reserved_6[0x120];
472
473 u8 lro_timer_supported_periods[4][0x20];
474
475 u8 reserved_7[0x600];
476};
477
478struct mlx5_ifc_roce_cap_bits {
479 u8 roce_apm[0x1];
480 u8 reserved_0[0x1f];
481
482 u8 reserved_1[0x60];
483
484 u8 reserved_2[0xc];
485 u8 l3_type[0x4];
486 u8 reserved_3[0x8];
487 u8 roce_version[0x8];
488
489 u8 reserved_4[0x10];
490 u8 r_roce_dest_udp_port[0x10];
491
492 u8 r_roce_max_src_udp_port[0x10];
493 u8 r_roce_min_src_udp_port[0x10];
494
495 u8 reserved_5[0x10];
496 u8 roce_address_table_size[0x10];
497
498 u8 reserved_6[0x700];
499};
500
501enum {
502 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
503 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
504 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4,
505 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8,
506 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10,
507 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20,
508 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40,
509 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80,
510 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100,
511};
512
513enum {
514 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1,
515 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2,
516 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4,
517 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8,
518 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10,
519 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20,
520 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40,
521 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80,
522 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100,
523};
524
525struct mlx5_ifc_atomic_caps_bits {
526 u8 reserved_0[0x40];
527
528 u8 atomic_req_endianness[0x1];
529 u8 reserved_1[0x1f];
530
531 u8 reserved_2[0x20];
532
533 u8 reserved_3[0x10];
534 u8 atomic_operations[0x10];
535
536 u8 reserved_4[0x10];
537 u8 atomic_size_qp[0x10];
538
539 u8 reserved_5[0x10];
540 u8 atomic_size_dc[0x10];
541
542 u8 reserved_6[0x720];
543};
544
545struct mlx5_ifc_odp_cap_bits {
546 u8 reserved_0[0x40];
547
548 u8 sig[0x1];
549 u8 reserved_1[0x1f];
550
551 u8 reserved_2[0x20];
552
553 struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
554
555 struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
556
557 struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
558
559 u8 reserved_3[0x720];
560};
561
562enum {
563 MLX5_WQ_TYPE_LINKED_LIST = 0x0,
564 MLX5_WQ_TYPE_CYCLIC = 0x1,
565 MLX5_WQ_TYPE_STRQ = 0x2,
566};
567
568enum {
569 MLX5_WQ_END_PAD_MODE_NONE = 0x0,
570 MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
571};
572
573enum {
574 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0,
575 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1,
576 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2,
577 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3,
578 MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4,
579};
580
581enum {
582 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0,
583 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1,
584 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2,
585 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3,
586 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4,
587 MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5,
588};
589
590enum {
591 MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0,
592 MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1,
593};
594
595enum {
596 MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0,
597 MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1,
598 MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3,
599};
600
601enum {
602 MLX5_CAP_PORT_TYPE_IB = 0x0,
603 MLX5_CAP_PORT_TYPE_ETH = 0x1,
141}; 604};
142 605
143struct mlx5_ifc_cmd_hca_cap_bits { 606struct mlx5_ifc_cmd_hca_cap_bits {
@@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
148 u8 reserved_1[0xb]; 611 u8 reserved_1[0xb];
149 u8 log_max_qp[0x5]; 612 u8 log_max_qp[0x5];
150 613
151 u8 log_max_strq_sz[0x8]; 614 u8 reserved_2[0xb];
152 u8 reserved_2[0x3]; 615 u8 log_max_srq[0x5];
153 u8 log_max_srqs[0x5];
154 u8 reserved_3[0x10]; 616 u8 reserved_3[0x10];
155 617
156 u8 reserved_4[0x8]; 618 u8 reserved_4[0x8];
@@ -185,123 +647,2112 @@ struct mlx5_ifc_cmd_hca_cap_bits {
185 u8 pad_cap[0x1]; 647 u8 pad_cap[0x1];
186 u8 cc_query_allowed[0x1]; 648 u8 cc_query_allowed[0x1];
187 u8 cc_modify_allowed[0x1]; 649 u8 cc_modify_allowed[0x1];
188 u8 reserved_15[0x1d]; 650 u8 reserved_15[0xd];
651 u8 gid_table_size[0x10];
189 652
190 u8 reserved_16[0x6]; 653 u8 out_of_seq_cnt[0x1];
654 u8 vport_counters[0x1];
655 u8 reserved_16[0x4];
191 u8 max_qp_cnt[0xa]; 656 u8 max_qp_cnt[0xa];
192 u8 pkey_table_size[0x10]; 657 u8 pkey_table_size[0x10];
193 658
194 u8 eswitch_owner[0x1]; 659 u8 vport_group_manager[0x1];
195 u8 reserved_17[0xa]; 660 u8 vhca_group_manager[0x1];
661 u8 ib_virt[0x1];
662 u8 eth_virt[0x1];
663 u8 reserved_17[0x1];
664 u8 ets[0x1];
665 u8 nic_flow_table[0x1];
666 u8 reserved_18[0x4];
196 u8 local_ca_ack_delay[0x5]; 667 u8 local_ca_ack_delay[0x5];
197 u8 reserved_18[0x8]; 668 u8 reserved_19[0x6];
669 u8 port_type[0x2];
198 u8 num_ports[0x8]; 670 u8 num_ports[0x8];
199 671
200 u8 reserved_19[0x3]; 672 u8 reserved_20[0x3];
201 u8 log_max_msg[0x5]; 673 u8 log_max_msg[0x5];
202 u8 reserved_20[0x18]; 674 u8 reserved_21[0x18];
203 675
204 u8 stat_rate_support[0x10]; 676 u8 stat_rate_support[0x10];
205 u8 reserved_21[0x10]; 677 u8 reserved_22[0xc];
678 u8 cqe_version[0x4];
206 679
207 u8 reserved_22[0x10]; 680 u8 compact_address_vector[0x1];
681 u8 reserved_23[0xe];
682 u8 drain_sigerr[0x1];
208 u8 cmdif_checksum[0x2]; 683 u8 cmdif_checksum[0x2];
209 u8 sigerr_cqe[0x1]; 684 u8 sigerr_cqe[0x1];
210 u8 reserved_23[0x1]; 685 u8 reserved_24[0x1];
211 u8 wq_signature[0x1]; 686 u8 wq_signature[0x1];
212 u8 sctr_data_cqe[0x1]; 687 u8 sctr_data_cqe[0x1];
213 u8 reserved_24[0x1]; 688 u8 reserved_25[0x1];
214 u8 sho[0x1]; 689 u8 sho[0x1];
215 u8 tph[0x1]; 690 u8 tph[0x1];
216 u8 rf[0x1]; 691 u8 rf[0x1];
217 u8 dc[0x1]; 692 u8 dct[0x1];
218 u8 reserved_25[0x2]; 693 u8 reserved_26[0x1];
694 u8 eth_net_offloads[0x1];
219 u8 roce[0x1]; 695 u8 roce[0x1];
220 u8 atomic[0x1]; 696 u8 atomic[0x1];
221 u8 rsz_srq[0x1]; 697 u8 reserved_27[0x1];
222 698
223 u8 cq_oi[0x1]; 699 u8 cq_oi[0x1];
224 u8 cq_resize[0x1]; 700 u8 cq_resize[0x1];
225 u8 cq_moderation[0x1]; 701 u8 cq_moderation[0x1];
226 u8 sniffer_rule_flow[0x1]; 702 u8 reserved_28[0x3];
227 u8 sniffer_rule_vport[0x1]; 703 u8 cq_eq_remap[0x1];
228 u8 sniffer_rule_phy[0x1];
229 u8 reserved_26[0x1];
230 u8 pg[0x1]; 704 u8 pg[0x1];
231 u8 block_lb_mc[0x1]; 705 u8 block_lb_mc[0x1];
232 u8 reserved_27[0x3]; 706 u8 reserved_29[0x1];
707 u8 scqe_break_moderation[0x1];
708 u8 reserved_30[0x1];
233 u8 cd[0x1]; 709 u8 cd[0x1];
234 u8 reserved_28[0x1]; 710 u8 reserved_31[0x1];
235 u8 apm[0x1]; 711 u8 apm[0x1];
236 u8 reserved_29[0x7]; 712 u8 reserved_32[0x7];
237 u8 qkv[0x1]; 713 u8 qkv[0x1];
238 u8 pkv[0x1]; 714 u8 pkv[0x1];
239 u8 reserved_30[0x4]; 715 u8 reserved_33[0x4];
240 u8 xrc[0x1]; 716 u8 xrc[0x1];
241 u8 ud[0x1]; 717 u8 ud[0x1];
242 u8 uc[0x1]; 718 u8 uc[0x1];
243 u8 rc[0x1]; 719 u8 rc[0x1];
244 720
245 u8 reserved_31[0xa]; 721 u8 reserved_34[0xa];
246 u8 uar_sz[0x6]; 722 u8 uar_sz[0x6];
247 u8 reserved_32[0x8]; 723 u8 reserved_35[0x8];
248 u8 log_pg_sz[0x8]; 724 u8 log_pg_sz[0x8];
249 725
250 u8 bf[0x1]; 726 u8 bf[0x1];
251 u8 reserved_33[0xa]; 727 u8 reserved_36[0x1];
728 u8 pad_tx_eth_packet[0x1];
729 u8 reserved_37[0x8];
252 u8 log_bf_reg_size[0x5]; 730 u8 log_bf_reg_size[0x5];
253 u8 reserved_34[0x10]; 731 u8 reserved_38[0x10];
254 732
255 u8 reserved_35[0x10]; 733 u8 reserved_39[0x10];
256 u8 max_wqe_sz_sq[0x10]; 734 u8 max_wqe_sz_sq[0x10];
257 735
258 u8 reserved_36[0x10]; 736 u8 reserved_40[0x10];
259 u8 max_wqe_sz_rq[0x10]; 737 u8 max_wqe_sz_rq[0x10];
260 738
261 u8 reserved_37[0x10]; 739 u8 reserved_41[0x10];
262 u8 max_wqe_sz_sq_dc[0x10]; 740 u8 max_wqe_sz_sq_dc[0x10];
263 741
264 u8 reserved_38[0x7]; 742 u8 reserved_42[0x7];
265 u8 max_qp_mcg[0x19]; 743 u8 max_qp_mcg[0x19];
266 744
267 u8 reserved_39[0x18]; 745 u8 reserved_43[0x18];
268 u8 log_max_mcg[0x8]; 746 u8 log_max_mcg[0x8];
269 747
270 u8 reserved_40[0xb]; 748 u8 reserved_44[0x3];
749 u8 log_max_transport_domain[0x5];
750 u8 reserved_45[0x3];
271 u8 log_max_pd[0x5]; 751 u8 log_max_pd[0x5];
272 u8 reserved_41[0xb]; 752 u8 reserved_46[0xb];
273 u8 log_max_xrcd[0x5]; 753 u8 log_max_xrcd[0x5];
274 754
275 u8 reserved_42[0x20]; 755 u8 reserved_47[0x20];
276 756
277 u8 reserved_43[0x3]; 757 u8 reserved_48[0x3];
278 u8 log_max_rq[0x5]; 758 u8 log_max_rq[0x5];
279 u8 reserved_44[0x3]; 759 u8 reserved_49[0x3];
280 u8 log_max_sq[0x5]; 760 u8 log_max_sq[0x5];
281 u8 reserved_45[0x3]; 761 u8 reserved_50[0x3];
282 u8 log_max_tir[0x5]; 762 u8 log_max_tir[0x5];
283 u8 reserved_46[0x3]; 763 u8 reserved_51[0x3];
284 u8 log_max_tis[0x5]; 764 u8 log_max_tis[0x5];
285 765
286 u8 reserved_47[0x13]; 766 u8 basic_cyclic_rcv_wqe[0x1];
287 u8 log_max_rq_per_tir[0x5]; 767 u8 reserved_52[0x2];
288 u8 reserved_48[0x3]; 768 u8 log_max_rmp[0x5];
769 u8 reserved_53[0x3];
770 u8 log_max_rqt[0x5];
771 u8 reserved_54[0x3];
772 u8 log_max_rqt_size[0x5];
773 u8 reserved_55[0x3];
289 u8 log_max_tis_per_sq[0x5]; 774 u8 log_max_tis_per_sq[0x5];
290 775
291 u8 reserved_49[0xe0]; 776 u8 reserved_56[0x3];
777 u8 log_max_stride_sz_rq[0x5];
778 u8 reserved_57[0x3];
779 u8 log_min_stride_sz_rq[0x5];
780 u8 reserved_58[0x3];
781 u8 log_max_stride_sz_sq[0x5];
782 u8 reserved_59[0x3];
783 u8 log_min_stride_sz_sq[0x5];
292 784
293 u8 reserved_50[0x10]; 785 u8 reserved_60[0x1b];
786 u8 log_max_wq_sz[0x5];
787
788 u8 reserved_61[0xa0];
789
790 u8 reserved_62[0x3];
791 u8 log_max_l2_table[0x5];
792 u8 reserved_63[0x8];
294 u8 log_uar_page_sz[0x10]; 793 u8 log_uar_page_sz[0x10];
295 794
296 u8 reserved_51[0x100]; 795 u8 reserved_64[0x100];
297 796
298 u8 reserved_52[0x1f]; 797 u8 reserved_65[0x1f];
299 u8 cqe_zip[0x1]; 798 u8 cqe_zip[0x1];
300 799
301 u8 cqe_zip_timeout[0x10]; 800 u8 cqe_zip_timeout[0x10];
302 u8 cqe_zip_max_num[0x10]; 801 u8 cqe_zip_max_num[0x10];
303 802
304 u8 reserved_53[0x220]; 803 u8 reserved_66[0x220];
804};
805
806enum {
807 MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1,
808 MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2,
809};
810
811struct mlx5_ifc_dest_format_struct_bits {
812 u8 destination_type[0x8];
813 u8 destination_id[0x18];
814
815 u8 reserved_0[0x20];
816};
817
818struct mlx5_ifc_fte_match_param_bits {
819 struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
820
821 struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
822
823 struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
824
825 u8 reserved_0[0xa00];
826};
827
828enum {
829 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
830 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
831 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
832 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
833 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
834};
835
836struct mlx5_ifc_rx_hash_field_select_bits {
837 u8 l3_prot_type[0x1];
838 u8 l4_prot_type[0x1];
839 u8 selected_fields[0x1e];
840};
841
842enum {
843 MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0,
844 MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1,
845};
846
847enum {
848 MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0,
849 MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1,
850};
851
852struct mlx5_ifc_wq_bits {
853 u8 wq_type[0x4];
854 u8 wq_signature[0x1];
855 u8 end_padding_mode[0x2];
856 u8 cd_slave[0x1];
857 u8 reserved_0[0x18];
858
859 u8 hds_skip_first_sge[0x1];
860 u8 log2_hds_buf_size[0x3];
861 u8 reserved_1[0x7];
862 u8 page_offset[0x5];
863 u8 lwm[0x10];
864
865 u8 reserved_2[0x8];
866 u8 pd[0x18];
867
868 u8 reserved_3[0x8];
869 u8 uar_page[0x18];
870
871 u8 dbr_addr[0x40];
872
873 u8 hw_counter[0x20];
874
875 u8 sw_counter[0x20];
876
877 u8 reserved_4[0xc];
878 u8 log_wq_stride[0x4];
879 u8 reserved_5[0x3];
880 u8 log_wq_pg_sz[0x5];
881 u8 reserved_6[0x3];
882 u8 log_wq_sz[0x5];
883
884 u8 reserved_7[0x4e0];
885
886 struct mlx5_ifc_cmd_pas_bits pas[0];
887};
888
889struct mlx5_ifc_rq_num_bits {
890 u8 reserved_0[0x8];
891 u8 rq_num[0x18];
892};
893
894struct mlx5_ifc_mac_address_layout_bits {
895 u8 reserved_0[0x10];
896 u8 mac_addr_47_32[0x10];
897
898 u8 mac_addr_31_0[0x20];
899};
900
901struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
902 u8 reserved_0[0xa0];
903
904 u8 min_time_between_cnps[0x20];
905
906 u8 reserved_1[0x12];
907 u8 cnp_dscp[0x6];
908 u8 reserved_2[0x5];
909 u8 cnp_802p_prio[0x3];
910
911 u8 reserved_3[0x720];
912};
913
914struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
915 u8 reserved_0[0x60];
916
917 u8 reserved_1[0x4];
918 u8 clamp_tgt_rate[0x1];
919 u8 reserved_2[0x3];
920 u8 clamp_tgt_rate_after_time_inc[0x1];
921 u8 reserved_3[0x17];
922
923 u8 reserved_4[0x20];
924
925 u8 rpg_time_reset[0x20];
926
927 u8 rpg_byte_reset[0x20];
928
929 u8 rpg_threshold[0x20];
930
931 u8 rpg_max_rate[0x20];
932
933 u8 rpg_ai_rate[0x20];
934
935 u8 rpg_hai_rate[0x20];
936
937 u8 rpg_gd[0x20];
938
939 u8 rpg_min_dec_fac[0x20];
940
941 u8 rpg_min_rate[0x20];
942
943 u8 reserved_5[0xe0];
944
945 u8 rate_to_set_on_first_cnp[0x20];
946
947 u8 dce_tcp_g[0x20];
948
949 u8 dce_tcp_rtt[0x20];
950
951 u8 rate_reduce_monitor_period[0x20];
952
953 u8 reserved_6[0x20];
954
955 u8 initial_alpha_value[0x20];
956
957 u8 reserved_7[0x4a0];
958};
959
960struct mlx5_ifc_cong_control_802_1qau_rp_bits {
961 u8 reserved_0[0x80];
962
963 u8 rppp_max_rps[0x20];
964
965 u8 rpg_time_reset[0x20];
966
967 u8 rpg_byte_reset[0x20];
968
969 u8 rpg_threshold[0x20];
970
971 u8 rpg_max_rate[0x20];
972
973 u8 rpg_ai_rate[0x20];
974
975 u8 rpg_hai_rate[0x20];
976
977 u8 rpg_gd[0x20];
978
979 u8 rpg_min_dec_fac[0x20];
980
981 u8 rpg_min_rate[0x20];
982
983 u8 reserved_1[0x640];
984};
985
986enum {
987 MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1,
988 MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2,
989 MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4,
990};
991
992struct mlx5_ifc_resize_field_select_bits {
993 u8 resize_field_select[0x20];
994};
995
996enum {
997 MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
998 MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
999 MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4,
1000 MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8,
1001};
1002
1003struct mlx5_ifc_modify_field_select_bits {
1004 u8 modify_field_select[0x20];
1005};
1006
1007struct mlx5_ifc_field_select_r_roce_np_bits {
1008 u8 field_select_r_roce_np[0x20];
1009};
1010
1011struct mlx5_ifc_field_select_r_roce_rp_bits {
1012 u8 field_select_r_roce_rp[0x20];
1013};
1014
1015enum {
1016 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4,
1017 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8,
1018 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10,
1019 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20,
1020 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40,
1021 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80,
1022 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100,
1023 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200,
1024 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400,
1025 MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800,
1026};
1027
1028struct mlx5_ifc_field_select_802_1qau_rp_bits {
1029 u8 field_select_8021qaurp[0x20];
1030};
1031
1032struct mlx5_ifc_phys_layer_cntrs_bits {
1033 u8 time_since_last_clear_high[0x20];
1034
1035 u8 time_since_last_clear_low[0x20];
1036
1037 u8 symbol_errors_high[0x20];
1038
1039 u8 symbol_errors_low[0x20];
1040
1041 u8 sync_headers_errors_high[0x20];
1042
1043 u8 sync_headers_errors_low[0x20];
1044
1045 u8 edpl_bip_errors_lane0_high[0x20];
1046
1047 u8 edpl_bip_errors_lane0_low[0x20];
1048
1049 u8 edpl_bip_errors_lane1_high[0x20];
1050
1051 u8 edpl_bip_errors_lane1_low[0x20];
1052
1053 u8 edpl_bip_errors_lane2_high[0x20];
1054
1055 u8 edpl_bip_errors_lane2_low[0x20];
1056
1057 u8 edpl_bip_errors_lane3_high[0x20];
1058
1059 u8 edpl_bip_errors_lane3_low[0x20];
1060
1061 u8 fc_fec_corrected_blocks_lane0_high[0x20];
1062
1063 u8 fc_fec_corrected_blocks_lane0_low[0x20];
1064
1065 u8 fc_fec_corrected_blocks_lane1_high[0x20];
1066
1067 u8 fc_fec_corrected_blocks_lane1_low[0x20];
1068
1069 u8 fc_fec_corrected_blocks_lane2_high[0x20];
1070
1071 u8 fc_fec_corrected_blocks_lane2_low[0x20];
1072
1073 u8 fc_fec_corrected_blocks_lane3_high[0x20];
1074
1075 u8 fc_fec_corrected_blocks_lane3_low[0x20];
1076
1077 u8 fc_fec_uncorrectable_blocks_lane0_high[0x20];
1078
1079 u8 fc_fec_uncorrectable_blocks_lane0_low[0x20];
1080
1081 u8 fc_fec_uncorrectable_blocks_lane1_high[0x20];
1082
1083 u8 fc_fec_uncorrectable_blocks_lane1_low[0x20];
1084
1085 u8 fc_fec_uncorrectable_blocks_lane2_high[0x20];
1086
1087 u8 fc_fec_uncorrectable_blocks_lane2_low[0x20];
1088
1089 u8 fc_fec_uncorrectable_blocks_lane3_high[0x20];
1090
1091 u8 fc_fec_uncorrectable_blocks_lane3_low[0x20];
1092
1093 u8 rs_fec_corrected_blocks_high[0x20];
1094
1095 u8 rs_fec_corrected_blocks_low[0x20];
1096
1097 u8 rs_fec_uncorrectable_blocks_high[0x20];
1098
1099 u8 rs_fec_uncorrectable_blocks_low[0x20];
1100
1101 u8 rs_fec_no_errors_blocks_high[0x20];
1102
1103 u8 rs_fec_no_errors_blocks_low[0x20];
1104
1105 u8 rs_fec_single_error_blocks_high[0x20];
1106
1107 u8 rs_fec_single_error_blocks_low[0x20];
1108
1109 u8 rs_fec_corrected_symbols_total_high[0x20];
1110
1111 u8 rs_fec_corrected_symbols_total_low[0x20];
1112
1113 u8 rs_fec_corrected_symbols_lane0_high[0x20];
1114
1115 u8 rs_fec_corrected_symbols_lane0_low[0x20];
1116
1117 u8 rs_fec_corrected_symbols_lane1_high[0x20];
1118
1119 u8 rs_fec_corrected_symbols_lane1_low[0x20];
1120
1121 u8 rs_fec_corrected_symbols_lane2_high[0x20];
1122
1123 u8 rs_fec_corrected_symbols_lane2_low[0x20];
1124
1125 u8 rs_fec_corrected_symbols_lane3_high[0x20];
1126
1127 u8 rs_fec_corrected_symbols_lane3_low[0x20];
1128
1129 u8 link_down_events[0x20];
1130
1131 u8 successful_recovery_events[0x20];
1132
1133 u8 reserved_0[0x180];
1134};
1135
1136struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
1137 u8 transmit_queue_high[0x20];
1138
1139 u8 transmit_queue_low[0x20];
1140
1141 u8 reserved_0[0x780];
1142};
1143
1144struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1145 u8 rx_octets_high[0x20];
1146
1147 u8 rx_octets_low[0x20];
1148
1149 u8 reserved_0[0xc0];
1150
1151 u8 rx_frames_high[0x20];
1152
1153 u8 rx_frames_low[0x20];
1154
1155 u8 tx_octets_high[0x20];
1156
1157 u8 tx_octets_low[0x20];
1158
1159 u8 reserved_1[0xc0];
1160
1161 u8 tx_frames_high[0x20];
1162
1163 u8 tx_frames_low[0x20];
1164
1165 u8 rx_pause_high[0x20];
1166
1167 u8 rx_pause_low[0x20];
1168
1169 u8 rx_pause_duration_high[0x20];
1170
1171 u8 rx_pause_duration_low[0x20];
1172
1173 u8 tx_pause_high[0x20];
1174
1175 u8 tx_pause_low[0x20];
1176
1177 u8 tx_pause_duration_high[0x20];
1178
1179 u8 tx_pause_duration_low[0x20];
1180
1181 u8 rx_pause_transition_high[0x20];
1182
1183 u8 rx_pause_transition_low[0x20];
1184
1185 u8 reserved_2[0x400];
1186};
1187
1188struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
1189 u8 port_transmit_wait_high[0x20];
1190
1191 u8 port_transmit_wait_low[0x20];
1192
1193 u8 reserved_0[0x780];
1194};
1195
1196struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
1197 u8 dot3stats_alignment_errors_high[0x20];
1198
1199 u8 dot3stats_alignment_errors_low[0x20];
1200
1201 u8 dot3stats_fcs_errors_high[0x20];
1202
1203 u8 dot3stats_fcs_errors_low[0x20];
1204
1205 u8 dot3stats_single_collision_frames_high[0x20];
1206
1207 u8 dot3stats_single_collision_frames_low[0x20];
1208
1209 u8 dot3stats_multiple_collision_frames_high[0x20];
1210
1211 u8 dot3stats_multiple_collision_frames_low[0x20];
1212
1213 u8 dot3stats_sqe_test_errors_high[0x20];
1214
1215 u8 dot3stats_sqe_test_errors_low[0x20];
1216
1217 u8 dot3stats_deferred_transmissions_high[0x20];
1218
1219 u8 dot3stats_deferred_transmissions_low[0x20];
1220
1221 u8 dot3stats_late_collisions_high[0x20];
1222
1223 u8 dot3stats_late_collisions_low[0x20];
1224
1225 u8 dot3stats_excessive_collisions_high[0x20];
1226
1227 u8 dot3stats_excessive_collisions_low[0x20];
1228
1229 u8 dot3stats_internal_mac_transmit_errors_high[0x20];
1230
1231 u8 dot3stats_internal_mac_transmit_errors_low[0x20];
1232
1233 u8 dot3stats_carrier_sense_errors_high[0x20];
1234
1235 u8 dot3stats_carrier_sense_errors_low[0x20];
1236
1237 u8 dot3stats_frame_too_longs_high[0x20];
1238
1239 u8 dot3stats_frame_too_longs_low[0x20];
1240
1241 u8 dot3stats_internal_mac_receive_errors_high[0x20];
1242
1243 u8 dot3stats_internal_mac_receive_errors_low[0x20];
1244
1245 u8 dot3stats_symbol_errors_high[0x20];
1246
1247 u8 dot3stats_symbol_errors_low[0x20];
1248
1249 u8 dot3control_in_unknown_opcodes_high[0x20];
1250
1251 u8 dot3control_in_unknown_opcodes_low[0x20];
1252
1253 u8 dot3in_pause_frames_high[0x20];
1254
1255 u8 dot3in_pause_frames_low[0x20];
1256
1257 u8 dot3out_pause_frames_high[0x20];
1258
1259 u8 dot3out_pause_frames_low[0x20];
1260
1261 u8 reserved_0[0x3c0];
1262};
1263
1264struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
1265 u8 ether_stats_drop_events_high[0x20];
1266
1267 u8 ether_stats_drop_events_low[0x20];
1268
1269 u8 ether_stats_octets_high[0x20];
1270
1271 u8 ether_stats_octets_low[0x20];
1272
1273 u8 ether_stats_pkts_high[0x20];
1274
1275 u8 ether_stats_pkts_low[0x20];
1276
1277 u8 ether_stats_broadcast_pkts_high[0x20];
1278
1279 u8 ether_stats_broadcast_pkts_low[0x20];
1280
1281 u8 ether_stats_multicast_pkts_high[0x20];
1282
1283 u8 ether_stats_multicast_pkts_low[0x20];
1284
1285 u8 ether_stats_crc_align_errors_high[0x20];
1286
1287 u8 ether_stats_crc_align_errors_low[0x20];
1288
1289 u8 ether_stats_undersize_pkts_high[0x20];
1290
1291 u8 ether_stats_undersize_pkts_low[0x20];
1292
1293 u8 ether_stats_oversize_pkts_high[0x20];
1294
1295 u8 ether_stats_oversize_pkts_low[0x20];
1296
1297 u8 ether_stats_fragments_high[0x20];
1298
1299 u8 ether_stats_fragments_low[0x20];
1300
1301 u8 ether_stats_jabbers_high[0x20];
1302
1303 u8 ether_stats_jabbers_low[0x20];
1304
1305 u8 ether_stats_collisions_high[0x20];
1306
1307 u8 ether_stats_collisions_low[0x20];
1308
1309 u8 ether_stats_pkts64octets_high[0x20];
1310
1311 u8 ether_stats_pkts64octets_low[0x20];
1312
1313 u8 ether_stats_pkts65to127octets_high[0x20];
1314
1315 u8 ether_stats_pkts65to127octets_low[0x20];
1316
1317 u8 ether_stats_pkts128to255octets_high[0x20];
1318
1319 u8 ether_stats_pkts128to255octets_low[0x20];
1320
1321 u8 ether_stats_pkts256to511octets_high[0x20];
1322
1323 u8 ether_stats_pkts256to511octets_low[0x20];
1324
1325 u8 ether_stats_pkts512to1023octets_high[0x20];
1326
1327 u8 ether_stats_pkts512to1023octets_low[0x20];
1328
1329 u8 ether_stats_pkts1024to1518octets_high[0x20];
1330
1331 u8 ether_stats_pkts1024to1518octets_low[0x20];
1332
1333 u8 ether_stats_pkts1519to2047octets_high[0x20];
1334
1335 u8 ether_stats_pkts1519to2047octets_low[0x20];
1336
1337 u8 ether_stats_pkts2048to4095octets_high[0x20];
1338
1339 u8 ether_stats_pkts2048to4095octets_low[0x20];
1340
1341 u8 ether_stats_pkts4096to8191octets_high[0x20];
1342
1343 u8 ether_stats_pkts4096to8191octets_low[0x20];
1344
1345 u8 ether_stats_pkts8192to10239octets_high[0x20];
1346
1347 u8 ether_stats_pkts8192to10239octets_low[0x20];
1348
1349 u8 reserved_0[0x280];
1350};
1351
1352struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
1353 u8 if_in_octets_high[0x20];
1354
1355 u8 if_in_octets_low[0x20];
1356
1357 u8 if_in_ucast_pkts_high[0x20];
1358
1359 u8 if_in_ucast_pkts_low[0x20];
1360
1361 u8 if_in_discards_high[0x20];
1362
1363 u8 if_in_discards_low[0x20];
1364
1365 u8 if_in_errors_high[0x20];
1366
1367 u8 if_in_errors_low[0x20];
1368
1369 u8 if_in_unknown_protos_high[0x20];
1370
1371 u8 if_in_unknown_protos_low[0x20];
1372
1373 u8 if_out_octets_high[0x20];
1374
1375 u8 if_out_octets_low[0x20];
1376
1377 u8 if_out_ucast_pkts_high[0x20];
1378
1379 u8 if_out_ucast_pkts_low[0x20];
1380
1381 u8 if_out_discards_high[0x20];
1382
1383 u8 if_out_discards_low[0x20];
1384
1385 u8 if_out_errors_high[0x20];
1386
1387 u8 if_out_errors_low[0x20];
1388
1389 u8 if_in_multicast_pkts_high[0x20];
1390
1391 u8 if_in_multicast_pkts_low[0x20];
1392
1393 u8 if_in_broadcast_pkts_high[0x20];
1394
1395 u8 if_in_broadcast_pkts_low[0x20];
1396
1397 u8 if_out_multicast_pkts_high[0x20];
1398
1399 u8 if_out_multicast_pkts_low[0x20];
1400
1401 u8 if_out_broadcast_pkts_high[0x20];
1402
1403 u8 if_out_broadcast_pkts_low[0x20];
1404
1405 u8 reserved_0[0x480];
1406};
1407
1408struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
1409 u8 a_frames_transmitted_ok_high[0x20];
1410
1411 u8 a_frames_transmitted_ok_low[0x20];
1412
1413 u8 a_frames_received_ok_high[0x20];
1414
1415 u8 a_frames_received_ok_low[0x20];
1416
1417 u8 a_frame_check_sequence_errors_high[0x20];
1418
1419 u8 a_frame_check_sequence_errors_low[0x20];
1420
1421 u8 a_alignment_errors_high[0x20];
1422
1423 u8 a_alignment_errors_low[0x20];
1424
1425 u8 a_octets_transmitted_ok_high[0x20];
1426
1427 u8 a_octets_transmitted_ok_low[0x20];
1428
1429 u8 a_octets_received_ok_high[0x20];
1430
1431 u8 a_octets_received_ok_low[0x20];
1432
1433 u8 a_multicast_frames_xmitted_ok_high[0x20];
1434
1435 u8 a_multicast_frames_xmitted_ok_low[0x20];
1436
1437 u8 a_broadcast_frames_xmitted_ok_high[0x20];
1438
1439 u8 a_broadcast_frames_xmitted_ok_low[0x20];
1440
1441 u8 a_multicast_frames_received_ok_high[0x20];
1442
1443 u8 a_multicast_frames_received_ok_low[0x20];
1444
1445 u8 a_broadcast_frames_received_ok_high[0x20];
1446
1447 u8 a_broadcast_frames_received_ok_low[0x20];
1448
1449 u8 a_in_range_length_errors_high[0x20];
1450
1451 u8 a_in_range_length_errors_low[0x20];
1452
1453 u8 a_out_of_range_length_field_high[0x20];
1454
1455 u8 a_out_of_range_length_field_low[0x20];
1456
1457 u8 a_frame_too_long_errors_high[0x20];
1458
1459 u8 a_frame_too_long_errors_low[0x20];
1460
1461 u8 a_symbol_error_during_carrier_high[0x20];
1462
1463 u8 a_symbol_error_during_carrier_low[0x20];
1464
1465 u8 a_mac_control_frames_transmitted_high[0x20];
1466
1467 u8 a_mac_control_frames_transmitted_low[0x20];
1468
1469 u8 a_mac_control_frames_received_high[0x20];
1470
1471 u8 a_mac_control_frames_received_low[0x20];
1472
1473 u8 a_unsupported_opcodes_received_high[0x20];
1474
1475 u8 a_unsupported_opcodes_received_low[0x20];
1476
1477 u8 a_pause_mac_ctrl_frames_received_high[0x20];
1478
1479 u8 a_pause_mac_ctrl_frames_received_low[0x20];
1480
1481 u8 a_pause_mac_ctrl_frames_transmitted_high[0x20];
1482
1483 u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
1484
1485 u8 reserved_0[0x300];
1486};
1487
1488struct mlx5_ifc_cmd_inter_comp_event_bits {
1489 u8 command_completion_vector[0x20];
1490
1491 u8 reserved_0[0xc0];
1492};
1493
1494struct mlx5_ifc_stall_vl_event_bits {
1495 u8 reserved_0[0x18];
1496 u8 port_num[0x1];
1497 u8 reserved_1[0x3];
1498 u8 vl[0x4];
1499
1500 u8 reserved_2[0xa0];
1501};
1502
1503struct mlx5_ifc_db_bf_congestion_event_bits {
1504 u8 event_subtype[0x8];
1505 u8 reserved_0[0x8];
1506 u8 congestion_level[0x8];
1507 u8 reserved_1[0x8];
1508
1509 u8 reserved_2[0xa0];
1510};
1511
1512struct mlx5_ifc_gpio_event_bits {
1513 u8 reserved_0[0x60];
1514
1515 u8 gpio_event_hi[0x20];
1516
1517 u8 gpio_event_lo[0x20];
1518
1519 u8 reserved_1[0x40];
1520};
1521
1522struct mlx5_ifc_port_state_change_event_bits {
1523 u8 reserved_0[0x40];
1524
1525 u8 port_num[0x4];
1526 u8 reserved_1[0x1c];
1527
1528 u8 reserved_2[0x80];
1529};
1530
1531struct mlx5_ifc_dropped_packet_logged_bits {
1532 u8 reserved_0[0xe0];
1533};
1534
1535enum {
1536 MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
1537 MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
1538};
1539
1540struct mlx5_ifc_cq_error_bits {
1541 u8 reserved_0[0x8];
1542 u8 cqn[0x18];
1543
1544 u8 reserved_1[0x20];
1545
1546 u8 reserved_2[0x18];
1547 u8 syndrome[0x8];
1548
1549 u8 reserved_3[0x80];
1550};
1551
1552struct mlx5_ifc_rdma_page_fault_event_bits {
1553 u8 bytes_committed[0x20];
1554
1555 u8 r_key[0x20];
1556
1557 u8 reserved_0[0x10];
1558 u8 packet_len[0x10];
1559
1560 u8 rdma_op_len[0x20];
1561
1562 u8 rdma_va[0x40];
1563
1564 u8 reserved_1[0x5];
1565 u8 rdma[0x1];
1566 u8 write[0x1];
1567 u8 requestor[0x1];
1568 u8 qp_number[0x18];
1569};
1570
1571struct mlx5_ifc_wqe_associated_page_fault_event_bits {
1572 u8 bytes_committed[0x20];
1573
1574 u8 reserved_0[0x10];
1575 u8 wqe_index[0x10];
1576
1577 u8 reserved_1[0x10];
1578 u8 len[0x10];
1579
1580 u8 reserved_2[0x60];
1581
1582 u8 reserved_3[0x5];
1583 u8 rdma[0x1];
1584 u8 write_read[0x1];
1585 u8 requestor[0x1];
1586 u8 qpn[0x18];
1587};
1588
1589struct mlx5_ifc_qp_events_bits {
1590 u8 reserved_0[0xa0];
1591
1592 u8 type[0x8];
1593 u8 reserved_1[0x18];
1594
1595 u8 reserved_2[0x8];
1596 u8 qpn_rqn_sqn[0x18];
1597};
1598
1599struct mlx5_ifc_dct_events_bits {
1600 u8 reserved_0[0xc0];
1601
1602 u8 reserved_1[0x8];
1603 u8 dct_number[0x18];
1604};
1605
1606struct mlx5_ifc_comp_event_bits {
1607 u8 reserved_0[0xc0];
1608
1609 u8 reserved_1[0x8];
1610 u8 cq_number[0x18];
1611};
1612
1613enum {
1614 MLX5_QPC_STATE_RST = 0x0,
1615 MLX5_QPC_STATE_INIT = 0x1,
1616 MLX5_QPC_STATE_RTR = 0x2,
1617 MLX5_QPC_STATE_RTS = 0x3,
1618 MLX5_QPC_STATE_SQER = 0x4,
1619 MLX5_QPC_STATE_ERR = 0x6,
1620 MLX5_QPC_STATE_SQD = 0x7,
1621 MLX5_QPC_STATE_SUSPENDED = 0x9,
1622};
1623
1624enum {
1625 MLX5_QPC_ST_RC = 0x0,
1626 MLX5_QPC_ST_UC = 0x1,
1627 MLX5_QPC_ST_UD = 0x2,
1628 MLX5_QPC_ST_XRC = 0x3,
1629 MLX5_QPC_ST_DCI = 0x5,
1630 MLX5_QPC_ST_QP0 = 0x7,
1631 MLX5_QPC_ST_QP1 = 0x8,
1632 MLX5_QPC_ST_RAW_DATAGRAM = 0x9,
1633 MLX5_QPC_ST_REG_UMR = 0xc,
1634};
1635
1636enum {
1637 MLX5_QPC_PM_STATE_ARMED = 0x0,
1638 MLX5_QPC_PM_STATE_REARM = 0x1,
1639 MLX5_QPC_PM_STATE_RESERVED = 0x2,
1640 MLX5_QPC_PM_STATE_MIGRATED = 0x3,
1641};
1642
1643enum {
1644 MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
1645 MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
1646};
1647
1648enum {
1649 MLX5_QPC_MTU_256_BYTES = 0x1,
1650 MLX5_QPC_MTU_512_BYTES = 0x2,
1651 MLX5_QPC_MTU_1K_BYTES = 0x3,
1652 MLX5_QPC_MTU_2K_BYTES = 0x4,
1653 MLX5_QPC_MTU_4K_BYTES = 0x5,
1654 MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7,
1655};
1656
1657enum {
1658 MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1,
1659 MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2,
1660 MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3,
1661 MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4,
1662 MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5,
1663 MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6,
1664 MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7,
1665 MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8,
1666};
1667
1668enum {
1669 MLX5_QPC_CS_REQ_DISABLE = 0x0,
1670 MLX5_QPC_CS_REQ_UP_TO_32B = 0x11,
1671 MLX5_QPC_CS_REQ_UP_TO_64B = 0x22,
1672};
1673
1674enum {
1675 MLX5_QPC_CS_RES_DISABLE = 0x0,
1676 MLX5_QPC_CS_RES_UP_TO_32B = 0x1,
1677 MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
1678};
1679
1680struct mlx5_ifc_qpc_bits {
1681 u8 state[0x4];
1682 u8 reserved_0[0x4];
1683 u8 st[0x8];
1684 u8 reserved_1[0x3];
1685 u8 pm_state[0x2];
1686 u8 reserved_2[0x7];
1687 u8 end_padding_mode[0x2];
1688 u8 reserved_3[0x2];
1689
1690 u8 wq_signature[0x1];
1691 u8 block_lb_mc[0x1];
1692 u8 atomic_like_write_en[0x1];
1693 u8 latency_sensitive[0x1];
1694 u8 reserved_4[0x1];
1695 u8 drain_sigerr[0x1];
1696 u8 reserved_5[0x2];
1697 u8 pd[0x18];
1698
1699 u8 mtu[0x3];
1700 u8 log_msg_max[0x5];
1701 u8 reserved_6[0x1];
1702 u8 log_rq_size[0x4];
1703 u8 log_rq_stride[0x3];
1704 u8 no_sq[0x1];
1705 u8 log_sq_size[0x4];
1706 u8 reserved_7[0x6];
1707 u8 rlky[0x1];
1708 u8 reserved_8[0x4];
1709
1710 u8 counter_set_id[0x8];
1711 u8 uar_page[0x18];
1712
1713 u8 reserved_9[0x8];
1714 u8 user_index[0x18];
1715
1716 u8 reserved_10[0x3];
1717 u8 log_page_size[0x5];
1718 u8 remote_qpn[0x18];
1719
1720 struct mlx5_ifc_ads_bits primary_address_path;
1721
1722 struct mlx5_ifc_ads_bits secondary_address_path;
1723
1724 u8 log_ack_req_freq[0x4];
1725 u8 reserved_11[0x4];
1726 u8 log_sra_max[0x3];
1727 u8 reserved_12[0x2];
1728 u8 retry_count[0x3];
1729 u8 rnr_retry[0x3];
1730 u8 reserved_13[0x1];
1731 u8 fre[0x1];
1732 u8 cur_rnr_retry[0x3];
1733 u8 cur_retry_count[0x3];
1734 u8 reserved_14[0x5];
1735
1736 u8 reserved_15[0x20];
1737
1738 u8 reserved_16[0x8];
1739 u8 next_send_psn[0x18];
1740
1741 u8 reserved_17[0x8];
1742 u8 cqn_snd[0x18];
1743
1744 u8 reserved_18[0x40];
1745
1746 u8 reserved_19[0x8];
1747 u8 last_acked_psn[0x18];
1748
1749 u8 reserved_20[0x8];
1750 u8 ssn[0x18];
1751
1752 u8 reserved_21[0x8];
1753 u8 log_rra_max[0x3];
1754 u8 reserved_22[0x1];
1755 u8 atomic_mode[0x4];
1756 u8 rre[0x1];
1757 u8 rwe[0x1];
1758 u8 rae[0x1];
1759 u8 reserved_23[0x1];
1760 u8 page_offset[0x6];
1761 u8 reserved_24[0x3];
1762 u8 cd_slave_receive[0x1];
1763 u8 cd_slave_send[0x1];
1764 u8 cd_master[0x1];
1765
1766 u8 reserved_25[0x3];
1767 u8 min_rnr_nak[0x5];
1768 u8 next_rcv_psn[0x18];
1769
1770 u8 reserved_26[0x8];
1771 u8 xrcd[0x18];
1772
1773 u8 reserved_27[0x8];
1774 u8 cqn_rcv[0x18];
1775
1776 u8 dbr_addr[0x40];
1777
1778 u8 q_key[0x20];
1779
1780 u8 reserved_28[0x5];
1781 u8 rq_type[0x3];
1782 u8 srqn_rmpn[0x18];
1783
1784 u8 reserved_29[0x8];
1785 u8 rmsn[0x18];
1786
1787 u8 hw_sq_wqebb_counter[0x10];
1788 u8 sw_sq_wqebb_counter[0x10];
1789
1790 u8 hw_rq_counter[0x20];
1791
1792 u8 sw_rq_counter[0x20];
1793
1794 u8 reserved_30[0x20];
1795
1796 u8 reserved_31[0xf];
1797 u8 cgs[0x1];
1798 u8 cs_req[0x8];
1799 u8 cs_res[0x8];
1800
1801 u8 dc_access_key[0x40];
1802
1803 u8 reserved_32[0xc0];
1804};
1805
1806struct mlx5_ifc_roce_addr_layout_bits {
1807 u8 source_l3_address[16][0x8];
1808
1809 u8 reserved_0[0x3];
1810 u8 vlan_valid[0x1];
1811 u8 vlan_id[0xc];
1812 u8 source_mac_47_32[0x10];
1813
1814 u8 source_mac_31_0[0x20];
1815
1816 u8 reserved_1[0x14];
1817 u8 roce_l3_type[0x4];
1818 u8 roce_version[0x8];
1819
1820 u8 reserved_2[0x20];
1821};
1822
1823union mlx5_ifc_hca_cap_union_bits {
1824 struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
1825 struct mlx5_ifc_odp_cap_bits odp_cap;
1826 struct mlx5_ifc_atomic_caps_bits atomic_caps;
1827 struct mlx5_ifc_roce_cap_bits roce_cap;
1828 struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
1829 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
1830 u8 reserved_0[0x8000];
1831};
1832
1833enum {
1834 MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
1835 MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
1836 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
1837};
1838
1839struct mlx5_ifc_flow_context_bits {
1840 u8 reserved_0[0x20];
1841
1842 u8 group_id[0x20];
1843
1844 u8 reserved_1[0x8];
1845 u8 flow_tag[0x18];
1846
1847 u8 reserved_2[0x10];
1848 u8 action[0x10];
1849
1850 u8 reserved_3[0x8];
1851 u8 destination_list_size[0x18];
1852
1853 u8 reserved_4[0x160];
1854
1855 struct mlx5_ifc_fte_match_param_bits match_value;
1856
1857 u8 reserved_5[0x600];
1858
1859 struct mlx5_ifc_dest_format_struct_bits destination[0];
1860};
1861
1862enum {
1863 MLX5_XRC_SRQC_STATE_GOOD = 0x0,
1864 MLX5_XRC_SRQC_STATE_ERROR = 0x1,
1865};
1866
1867struct mlx5_ifc_xrc_srqc_bits {
1868 u8 state[0x4];
1869 u8 log_xrc_srq_size[0x4];
1870 u8 reserved_0[0x18];
1871
1872 u8 wq_signature[0x1];
1873 u8 cont_srq[0x1];
1874 u8 reserved_1[0x1];
1875 u8 rlky[0x1];
1876 u8 basic_cyclic_rcv_wqe[0x1];
1877 u8 log_rq_stride[0x3];
1878 u8 xrcd[0x18];
1879
1880 u8 page_offset[0x6];
1881 u8 reserved_2[0x2];
1882 u8 cqn[0x18];
1883
1884 u8 reserved_3[0x20];
1885
1886 u8 user_index_equal_xrc_srqn[0x1];
1887 u8 reserved_4[0x1];
1888 u8 log_page_size[0x6];
1889 u8 user_index[0x18];
1890
1891 u8 reserved_5[0x20];
1892
1893 u8 reserved_6[0x8];
1894 u8 pd[0x18];
1895
1896 u8 lwm[0x10];
1897 u8 wqe_cnt[0x10];
1898
1899 u8 reserved_7[0x40];
1900
1901 u8 db_record_addr_h[0x20];
1902
1903 u8 db_record_addr_l[0x1e];
1904 u8 reserved_8[0x2];
1905
1906 u8 reserved_9[0x80];
1907};
1908
1909struct mlx5_ifc_traffic_counter_bits {
1910 u8 packets[0x40];
1911
1912 u8 octets[0x40];
1913};
1914
1915struct mlx5_ifc_tisc_bits {
1916 u8 reserved_0[0xc];
1917 u8 prio[0x4];
1918 u8 reserved_1[0x10];
1919
1920 u8 reserved_2[0x100];
1921
1922 u8 reserved_3[0x8];
1923 u8 transport_domain[0x18];
1924
1925 u8 reserved_4[0x3c0];
1926};
1927
1928enum {
1929 MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
1930 MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
1931};
1932
1933enum {
1934 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
1935 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
1936};
1937
1938enum {
1939 MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
1940 MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
1941 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
1942};
1943
1944enum {
1945 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
1946 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
1947};
1948
1949struct mlx5_ifc_tirc_bits {
1950 u8 reserved_0[0x20];
1951
1952 u8 disp_type[0x4];
1953 u8 reserved_1[0x1c];
1954
1955 u8 reserved_2[0x40];
1956
1957 u8 reserved_3[0x4];
1958 u8 lro_timeout_period_usecs[0x10];
1959 u8 lro_enable_mask[0x4];
1960 u8 lro_max_ip_payload_size[0x8];
1961
1962 u8 reserved_4[0x40];
1963
1964 u8 reserved_5[0x8];
1965 u8 inline_rqn[0x18];
1966
1967 u8 rx_hash_symmetric[0x1];
1968 u8 reserved_6[0x1];
1969 u8 tunneled_offload_en[0x1];
1970 u8 reserved_7[0x5];
1971 u8 indirect_table[0x18];
1972
1973 u8 rx_hash_fn[0x4];
1974 u8 reserved_8[0x2];
1975 u8 self_lb_block[0x2];
1976 u8 transport_domain[0x18];
1977
1978 u8 rx_hash_toeplitz_key[10][0x20];
1979
1980 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
1981
1982 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
1983
1984 u8 reserved_9[0x4c0];
1985};
1986
1987enum {
1988 MLX5_SRQC_STATE_GOOD = 0x0,
1989 MLX5_SRQC_STATE_ERROR = 0x1,
1990};
1991
1992struct mlx5_ifc_srqc_bits {
1993 u8 state[0x4];
1994 u8 log_srq_size[0x4];
1995 u8 reserved_0[0x18];
1996
1997 u8 wq_signature[0x1];
1998 u8 cont_srq[0x1];
1999 u8 reserved_1[0x1];
2000 u8 rlky[0x1];
2001 u8 reserved_2[0x1];
2002 u8 log_rq_stride[0x3];
2003 u8 xrcd[0x18];
2004
2005 u8 page_offset[0x6];
2006 u8 reserved_3[0x2];
2007 u8 cqn[0x18];
2008
2009 u8 reserved_4[0x20];
2010
2011 u8 reserved_5[0x2];
2012 u8 log_page_size[0x6];
2013 u8 reserved_6[0x18];
2014
2015 u8 reserved_7[0x20];
2016
2017 u8 reserved_8[0x8];
2018 u8 pd[0x18];
2019
2020 u8 lwm[0x10];
2021 u8 wqe_cnt[0x10];
2022
2023 u8 reserved_9[0x40];
2024
2025 u8 dbr_addr[0x40];
2026
2027 u8 reserved_10[0x80];
2028};
2029
2030enum {
2031 MLX5_SQC_STATE_RST = 0x0,
2032 MLX5_SQC_STATE_RDY = 0x1,
2033 MLX5_SQC_STATE_ERR = 0x3,
2034};
2035
2036struct mlx5_ifc_sqc_bits {
2037 u8 rlky[0x1];
2038 u8 cd_master[0x1];
2039 u8 fre[0x1];
2040 u8 flush_in_error_en[0x1];
2041 u8 reserved_0[0x4];
2042 u8 state[0x4];
2043 u8 reserved_1[0x14];
2044
2045 u8 reserved_2[0x8];
2046 u8 user_index[0x18];
2047
2048 u8 reserved_3[0x8];
2049 u8 cqn[0x18];
2050
2051 u8 reserved_4[0xa0];
2052
2053 u8 tis_lst_sz[0x10];
2054 u8 reserved_5[0x10];
2055
2056 u8 reserved_6[0x40];
2057
2058 u8 reserved_7[0x8];
2059 u8 tis_num_0[0x18];
2060
2061 struct mlx5_ifc_wq_bits wq;
2062};
2063
2064struct mlx5_ifc_rqtc_bits {
2065 u8 reserved_0[0xa0];
2066
2067 u8 reserved_1[0x10];
2068 u8 rqt_max_size[0x10];
2069
2070 u8 reserved_2[0x10];
2071 u8 rqt_actual_size[0x10];
2072
2073 u8 reserved_3[0x6a0];
2074
2075 struct mlx5_ifc_rq_num_bits rq_num[0];
2076};
2077
2078enum {
2079 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
2080 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
2081};
2082
2083enum {
2084 MLX5_RQC_STATE_RST = 0x0,
2085 MLX5_RQC_STATE_RDY = 0x1,
2086 MLX5_RQC_STATE_ERR = 0x3,
2087};
2088
2089struct mlx5_ifc_rqc_bits {
2090 u8 rlky[0x1];
2091 u8 reserved_0[0x2];
2092 u8 vsd[0x1];
2093 u8 mem_rq_type[0x4];
2094 u8 state[0x4];
2095 u8 reserved_1[0x1];
2096 u8 flush_in_error_en[0x1];
2097 u8 reserved_2[0x12];
2098
2099 u8 reserved_3[0x8];
2100 u8 user_index[0x18];
2101
2102 u8 reserved_4[0x8];
2103 u8 cqn[0x18];
2104
2105 u8 counter_set_id[0x8];
2106 u8 reserved_5[0x18];
2107
2108 u8 reserved_6[0x8];
2109 u8 rmpn[0x18];
2110
2111 u8 reserved_7[0xe0];
2112
2113 struct mlx5_ifc_wq_bits wq;
2114};
2115
2116enum {
2117 MLX5_RMPC_STATE_RDY = 0x1,
2118 MLX5_RMPC_STATE_ERR = 0x3,
2119};
2120
2121struct mlx5_ifc_rmpc_bits {
2122 u8 reserved_0[0x8];
2123 u8 state[0x4];
2124 u8 reserved_1[0x14];
2125
2126 u8 basic_cyclic_rcv_wqe[0x1];
2127 u8 reserved_2[0x1f];
2128
2129 u8 reserved_3[0x140];
2130
2131 struct mlx5_ifc_wq_bits wq;
2132};
2133
2134enum {
2135 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
2136};
2137
2138struct mlx5_ifc_nic_vport_context_bits {
2139 u8 reserved_0[0x1f];
2140 u8 roce_en[0x1];
2141
2142 u8 reserved_1[0x760];
2143
2144 u8 reserved_2[0x5];
2145 u8 allowed_list_type[0x3];
2146 u8 reserved_3[0xc];
2147 u8 allowed_list_size[0xc];
2148
2149 struct mlx5_ifc_mac_address_layout_bits permanent_address;
2150
2151 u8 reserved_4[0x20];
2152
2153 u8 current_uc_mac_address[0][0x40];
2154};
2155
2156enum {
2157 MLX5_MKC_ACCESS_MODE_PA = 0x0,
2158 MLX5_MKC_ACCESS_MODE_MTT = 0x1,
2159 MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
2160};
2161
2162struct mlx5_ifc_mkc_bits {
2163 u8 reserved_0[0x1];
2164 u8 free[0x1];
2165 u8 reserved_1[0xd];
2166 u8 small_fence_on_rdma_read_response[0x1];
2167 u8 umr_en[0x1];
2168 u8 a[0x1];
2169 u8 rw[0x1];
2170 u8 rr[0x1];
2171 u8 lw[0x1];
2172 u8 lr[0x1];
2173 u8 access_mode[0x2];
2174 u8 reserved_2[0x8];
2175
2176 u8 qpn[0x18];
2177 u8 mkey_7_0[0x8];
2178
2179 u8 reserved_3[0x20];
2180
2181 u8 length64[0x1];
2182 u8 bsf_en[0x1];
2183 u8 sync_umr[0x1];
2184 u8 reserved_4[0x2];
2185 u8 expected_sigerr_count[0x1];
2186 u8 reserved_5[0x1];
2187 u8 en_rinval[0x1];
2188 u8 pd[0x18];
2189
2190 u8 start_addr[0x40];
2191
2192 u8 len[0x40];
2193
2194 u8 bsf_octword_size[0x20];
2195
2196 u8 reserved_6[0x80];
2197
2198 u8 translations_octword_size[0x20];
2199
2200 u8 reserved_7[0x1b];
2201 u8 log_page_size[0x5];
2202
2203 u8 reserved_8[0x20];
2204};
2205
2206struct mlx5_ifc_pkey_bits {
2207 u8 reserved_0[0x10];
2208 u8 pkey[0x10];
2209};
2210
2211struct mlx5_ifc_array128_auto_bits {
2212 u8 array128_auto[16][0x8];
2213};
2214
2215struct mlx5_ifc_hca_vport_context_bits {
2216 u8 field_select[0x20];
2217
2218 u8 reserved_0[0xe0];
2219
2220 u8 sm_virt_aware[0x1];
2221 u8 has_smi[0x1];
2222 u8 has_raw[0x1];
2223 u8 grh_required[0x1];
2224 u8 reserved_1[0xc];
2225 u8 port_physical_state[0x4];
2226 u8 vport_state_policy[0x4];
2227 u8 port_state[0x4];
2228 u8 vport_state[0x4];
2229
2230 u8 reserved_2[0x20];
2231
2232 u8 system_image_guid[0x40];
2233
2234 u8 port_guid[0x40];
2235
2236 u8 node_guid[0x40];
2237
2238 u8 cap_mask1[0x20];
2239
2240 u8 cap_mask1_field_select[0x20];
2241
2242 u8 cap_mask2[0x20];
2243
2244 u8 cap_mask2_field_select[0x20];
2245
2246 u8 reserved_3[0x80];
2247
2248 u8 lid[0x10];
2249 u8 reserved_4[0x4];
2250 u8 init_type_reply[0x4];
2251 u8 lmc[0x3];
2252 u8 subnet_timeout[0x5];
2253
2254 u8 sm_lid[0x10];
2255 u8 sm_sl[0x4];
2256 u8 reserved_5[0xc];
2257
2258 u8 qkey_violation_counter[0x10];
2259 u8 pkey_violation_counter[0x10];
2260
2261 u8 reserved_6[0xca0];
2262};
2263
2264enum {
2265 MLX5_EQC_STATUS_OK = 0x0,
2266 MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
2267};
2268
2269enum {
2270 MLX5_EQC_ST_ARMED = 0x9,
2271 MLX5_EQC_ST_FIRED = 0xa,
2272};
2273
2274struct mlx5_ifc_eqc_bits {
2275 u8 status[0x4];
2276 u8 reserved_0[0x9];
2277 u8 ec[0x1];
2278 u8 oi[0x1];
2279 u8 reserved_1[0x5];
2280 u8 st[0x4];
2281 u8 reserved_2[0x8];
2282
2283 u8 reserved_3[0x20];
2284
2285 u8 reserved_4[0x14];
2286 u8 page_offset[0x6];
2287 u8 reserved_5[0x6];
2288
2289 u8 reserved_6[0x3];
2290 u8 log_eq_size[0x5];
2291 u8 uar_page[0x18];
2292
2293 u8 reserved_7[0x20];
2294
2295 u8 reserved_8[0x18];
2296 u8 intr[0x8];
2297
2298 u8 reserved_9[0x3];
2299 u8 log_page_size[0x5];
2300 u8 reserved_10[0x18];
2301
2302 u8 reserved_11[0x60];
2303
2304 u8 reserved_12[0x8];
2305 u8 consumer_counter[0x18];
2306
2307 u8 reserved_13[0x8];
2308 u8 producer_counter[0x18];
2309
2310 u8 reserved_14[0x80];
2311};
2312
2313enum {
2314 MLX5_DCTC_STATE_ACTIVE = 0x0,
2315 MLX5_DCTC_STATE_DRAINING = 0x1,
2316 MLX5_DCTC_STATE_DRAINED = 0x2,
2317};
2318
2319enum {
2320 MLX5_DCTC_CS_RES_DISABLE = 0x0,
2321 MLX5_DCTC_CS_RES_NA = 0x1,
2322 MLX5_DCTC_CS_RES_UP_TO_64B = 0x2,
2323};
2324
2325enum {
2326 MLX5_DCTC_MTU_256_BYTES = 0x1,
2327 MLX5_DCTC_MTU_512_BYTES = 0x2,
2328 MLX5_DCTC_MTU_1K_BYTES = 0x3,
2329 MLX5_DCTC_MTU_2K_BYTES = 0x4,
2330 MLX5_DCTC_MTU_4K_BYTES = 0x5,
2331};
2332
2333struct mlx5_ifc_dctc_bits {
2334 u8 reserved_0[0x4];
2335 u8 state[0x4];
2336 u8 reserved_1[0x18];
2337
2338 u8 reserved_2[0x8];
2339 u8 user_index[0x18];
2340
2341 u8 reserved_3[0x8];
2342 u8 cqn[0x18];
2343
2344 u8 counter_set_id[0x8];
2345 u8 atomic_mode[0x4];
2346 u8 rre[0x1];
2347 u8 rwe[0x1];
2348 u8 rae[0x1];
2349 u8 atomic_like_write_en[0x1];
2350 u8 latency_sensitive[0x1];
2351 u8 rlky[0x1];
2352 u8 free_ar[0x1];
2353 u8 reserved_4[0xd];
2354
2355 u8 reserved_5[0x8];
2356 u8 cs_res[0x8];
2357 u8 reserved_6[0x3];
2358 u8 min_rnr_nak[0x5];
2359 u8 reserved_7[0x8];
2360
2361 u8 reserved_8[0x8];
2362 u8 srqn[0x18];
2363
2364 u8 reserved_9[0x8];
2365 u8 pd[0x18];
2366
2367 u8 tclass[0x8];
2368 u8 reserved_10[0x4];
2369 u8 flow_label[0x14];
2370
2371 u8 dc_access_key[0x40];
2372
2373 u8 reserved_11[0x5];
2374 u8 mtu[0x3];
2375 u8 port[0x8];
2376 u8 pkey_index[0x10];
2377
2378 u8 reserved_12[0x8];
2379 u8 my_addr_index[0x8];
2380 u8 reserved_13[0x8];
2381 u8 hop_limit[0x8];
2382
2383 u8 dc_access_key_violation_count[0x20];
2384
2385 u8 reserved_14[0x14];
2386 u8 dei_cfi[0x1];
2387 u8 eth_prio[0x3];
2388 u8 ecn[0x2];
2389 u8 dscp[0x6];
2390
2391 u8 reserved_15[0x40];
2392};
2393
2394enum {
2395 MLX5_CQC_STATUS_OK = 0x0,
2396 MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9,
2397 MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa,
2398};
2399
2400enum {
2401 MLX5_CQC_CQE_SZ_64_BYTES = 0x0,
2402 MLX5_CQC_CQE_SZ_128_BYTES = 0x1,
2403};
2404
2405enum {
2406 MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6,
2407 MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9,
2408 MLX5_CQC_ST_FIRED = 0xa,
2409};
2410
2411struct mlx5_ifc_cqc_bits {
2412 u8 status[0x4];
2413 u8 reserved_0[0x4];
2414 u8 cqe_sz[0x3];
2415 u8 cc[0x1];
2416 u8 reserved_1[0x1];
2417 u8 scqe_break_moderation_en[0x1];
2418 u8 oi[0x1];
2419 u8 reserved_2[0x2];
2420 u8 cqe_zip_en[0x1];
2421 u8 mini_cqe_res_format[0x2];
2422 u8 st[0x4];
2423 u8 reserved_3[0x8];
2424
2425 u8 reserved_4[0x20];
2426
2427 u8 reserved_5[0x14];
2428 u8 page_offset[0x6];
2429 u8 reserved_6[0x6];
2430
2431 u8 reserved_7[0x3];
2432 u8 log_cq_size[0x5];
2433 u8 uar_page[0x18];
2434
2435 u8 reserved_8[0x4];
2436 u8 cq_period[0xc];
2437 u8 cq_max_count[0x10];
2438
2439 u8 reserved_9[0x18];
2440 u8 c_eqn[0x8];
2441
2442 u8 reserved_10[0x3];
2443 u8 log_page_size[0x5];
2444 u8 reserved_11[0x18];
2445
2446 u8 reserved_12[0x20];
2447
2448 u8 reserved_13[0x8];
2449 u8 last_notified_index[0x18];
2450
2451 u8 reserved_14[0x8];
2452 u8 last_solicit_index[0x18];
2453
2454 u8 reserved_15[0x8];
2455 u8 consumer_counter[0x18];
2456
2457 u8 reserved_16[0x8];
2458 u8 producer_counter[0x18];
2459
2460 u8 reserved_17[0x40];
2461
2462 u8 dbr_addr[0x40];
2463};
2464
2465union mlx5_ifc_cong_control_roce_ecn_auto_bits {
2466 struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
2467 struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
2468 struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
2469 u8 reserved_0[0x800];
2470};
2471
2472struct mlx5_ifc_query_adapter_param_block_bits {
2473 u8 reserved_0[0xc0];
2474
2475 u8 reserved_1[0x8];
2476 u8 ieee_vendor_id[0x18];
2477
2478 u8 reserved_2[0x10];
2479 u8 vsd_vendor_id[0x10];
2480
2481 u8 vsd[208][0x8];
2482
2483 u8 vsd_contd_psid[16][0x8];
2484};
2485
2486union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
2487 struct mlx5_ifc_modify_field_select_bits modify_field_select;
2488 struct mlx5_ifc_resize_field_select_bits resize_field_select;
2489 u8 reserved_0[0x20];
2490};
2491
2492union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
2493 struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
2494 struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
2495 struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
2496 u8 reserved_0[0x20];
2497};
2498
2499union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
2500 struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
2501 struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
2502 struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
2503 struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
2504 struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
2505 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
2506 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
2507 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
2508 u8 reserved_0[0x7c0];
2509};
2510
2511union mlx5_ifc_event_auto_bits {
2512 struct mlx5_ifc_comp_event_bits comp_event;
2513 struct mlx5_ifc_dct_events_bits dct_events;
2514 struct mlx5_ifc_qp_events_bits qp_events;
2515 struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
2516 struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
2517 struct mlx5_ifc_cq_error_bits cq_error;
2518 struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
2519 struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
2520 struct mlx5_ifc_gpio_event_bits gpio_event;
2521 struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
2522 struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
2523 struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
2524 u8 reserved_0[0xe0];
2525};
2526
2527struct mlx5_ifc_health_buffer_bits {
2528 u8 reserved_0[0x100];
2529
2530 u8 assert_existptr[0x20];
2531
2532 u8 assert_callra[0x20];
2533
2534 u8 reserved_1[0x40];
2535
2536 u8 fw_version[0x20];
2537
2538 u8 hw_id[0x20];
2539
2540 u8 reserved_2[0x20];
2541
2542 u8 irisc_index[0x8];
2543 u8 synd[0x8];
2544 u8 ext_synd[0x10];
2545};
2546
2547struct mlx5_ifc_register_loopback_control_bits {
2548 u8 no_lb[0x1];
2549 u8 reserved_0[0x7];
2550 u8 port[0x8];
2551 u8 reserved_1[0x10];
2552
2553 u8 reserved_2[0x60];
2554};
2555
2556struct mlx5_ifc_teardown_hca_out_bits {
2557 u8 status[0x8];
2558 u8 reserved_0[0x18];
2559
2560 u8 syndrome[0x20];
2561
2562 u8 reserved_1[0x40];
2563};
2564
2565enum {
2566 MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
2567 MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1,
2568};
2569
2570struct mlx5_ifc_teardown_hca_in_bits {
2571 u8 opcode[0x10];
2572 u8 reserved_0[0x10];
2573
2574 u8 reserved_1[0x10];
2575 u8 op_mod[0x10];
2576
2577 u8 reserved_2[0x10];
2578 u8 profile[0x10];
2579
2580 u8 reserved_3[0x20];
2581};
2582
2583struct mlx5_ifc_sqerr2rts_qp_out_bits {
2584 u8 status[0x8];
2585 u8 reserved_0[0x18];
2586
2587 u8 syndrome[0x20];
2588
2589 u8 reserved_1[0x40];
2590};
2591
2592struct mlx5_ifc_sqerr2rts_qp_in_bits {
2593 u8 opcode[0x10];
2594 u8 reserved_0[0x10];
2595
2596 u8 reserved_1[0x10];
2597 u8 op_mod[0x10];
2598
2599 u8 reserved_2[0x8];
2600 u8 qpn[0x18];
2601
2602 u8 reserved_3[0x20];
2603
2604 u8 opt_param_mask[0x20];
2605
2606 u8 reserved_4[0x20];
2607
2608 struct mlx5_ifc_qpc_bits qpc;
2609
2610 u8 reserved_5[0x80];
2611};
2612
2613struct mlx5_ifc_sqd2rts_qp_out_bits {
2614 u8 status[0x8];
2615 u8 reserved_0[0x18];
2616
2617 u8 syndrome[0x20];
2618
2619 u8 reserved_1[0x40];
2620};
2621
2622struct mlx5_ifc_sqd2rts_qp_in_bits {
2623 u8 opcode[0x10];
2624 u8 reserved_0[0x10];
2625
2626 u8 reserved_1[0x10];
2627 u8 op_mod[0x10];
2628
2629 u8 reserved_2[0x8];
2630 u8 qpn[0x18];
2631
2632 u8 reserved_3[0x20];
2633
2634 u8 opt_param_mask[0x20];
2635
2636 u8 reserved_4[0x20];
2637
2638 struct mlx5_ifc_qpc_bits qpc;
2639
2640 u8 reserved_5[0x80];
2641};
2642
2643struct mlx5_ifc_set_roce_address_out_bits {
2644 u8 status[0x8];
2645 u8 reserved_0[0x18];
2646
2647 u8 syndrome[0x20];
2648
2649 u8 reserved_1[0x40];
2650};
2651
2652struct mlx5_ifc_set_roce_address_in_bits {
2653 u8 opcode[0x10];
2654 u8 reserved_0[0x10];
2655
2656 u8 reserved_1[0x10];
2657 u8 op_mod[0x10];
2658
2659 u8 roce_address_index[0x10];
2660 u8 reserved_2[0x10];
2661
2662 u8 reserved_3[0x20];
2663
2664 struct mlx5_ifc_roce_addr_layout_bits roce_address;
2665};
2666
2667struct mlx5_ifc_set_mad_demux_out_bits {
2668 u8 status[0x8];
2669 u8 reserved_0[0x18];
2670
2671 u8 syndrome[0x20];
2672
2673 u8 reserved_1[0x40];
2674};
2675
2676enum {
2677 MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0,
2678 MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2,
2679};
2680
2681struct mlx5_ifc_set_mad_demux_in_bits {
2682 u8 opcode[0x10];
2683 u8 reserved_0[0x10];
2684
2685 u8 reserved_1[0x10];
2686 u8 op_mod[0x10];
2687
2688 u8 reserved_2[0x20];
2689
2690 u8 reserved_3[0x6];
2691 u8 demux_mode[0x2];
2692 u8 reserved_4[0x18];
2693};
2694
2695struct mlx5_ifc_set_l2_table_entry_out_bits {
2696 u8 status[0x8];
2697 u8 reserved_0[0x18];
2698
2699 u8 syndrome[0x20];
2700
2701 u8 reserved_1[0x40];
2702};
2703
2704struct mlx5_ifc_set_l2_table_entry_in_bits {
2705 u8 opcode[0x10];
2706 u8 reserved_0[0x10];
2707
2708 u8 reserved_1[0x10];
2709 u8 op_mod[0x10];
2710
2711 u8 reserved_2[0x60];
2712
2713 u8 reserved_3[0x8];
2714 u8 table_index[0x18];
2715
2716 u8 reserved_4[0x20];
2717
2718 u8 reserved_5[0x13];
2719 u8 vlan_valid[0x1];
2720 u8 vlan[0xc];
2721
2722 struct mlx5_ifc_mac_address_layout_bits mac_address;
2723
2724 u8 reserved_6[0xc0];
2725};
2726
2727struct mlx5_ifc_set_issi_out_bits {
2728 u8 status[0x8];
2729 u8 reserved_0[0x18];
2730
2731 u8 syndrome[0x20];
2732
2733 u8 reserved_1[0x40];
2734};
2735
2736struct mlx5_ifc_set_issi_in_bits {
2737 u8 opcode[0x10];
2738 u8 reserved_0[0x10];
2739
2740 u8 reserved_1[0x10];
2741 u8 op_mod[0x10];
2742
2743 u8 reserved_2[0x10];
2744 u8 current_issi[0x10];
2745
2746 u8 reserved_3[0x20];
2747};
2748
2749struct mlx5_ifc_set_hca_cap_out_bits {
2750 u8 status[0x8];
2751 u8 reserved_0[0x18];
2752
2753 u8 syndrome[0x20];
2754
2755 u8 reserved_1[0x40];
305}; 2756};
306 2757
307struct mlx5_ifc_set_hca_cap_in_bits { 2758struct mlx5_ifc_set_hca_cap_in_bits {
@@ -313,10 +2764,653 @@ struct mlx5_ifc_set_hca_cap_in_bits {
313 2764
314 u8 reserved_2[0x40]; 2765 u8 reserved_2[0x40];
315 2766
316 struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct; 2767 union mlx5_ifc_hca_cap_union_bits capability;
317}; 2768};
318 2769
319struct mlx5_ifc_query_hca_cap_in_bits { 2770struct mlx5_ifc_set_fte_out_bits {
2771 u8 status[0x8];
2772 u8 reserved_0[0x18];
2773
2774 u8 syndrome[0x20];
2775
2776 u8 reserved_1[0x40];
2777};
2778
2779struct mlx5_ifc_set_fte_in_bits {
2780 u8 opcode[0x10];
2781 u8 reserved_0[0x10];
2782
2783 u8 reserved_1[0x10];
2784 u8 op_mod[0x10];
2785
2786 u8 reserved_2[0x40];
2787
2788 u8 table_type[0x8];
2789 u8 reserved_3[0x18];
2790
2791 u8 reserved_4[0x8];
2792 u8 table_id[0x18];
2793
2794 u8 reserved_5[0x40];
2795
2796 u8 flow_index[0x20];
2797
2798 u8 reserved_6[0xe0];
2799
2800 struct mlx5_ifc_flow_context_bits flow_context;
2801};
2802
2803struct mlx5_ifc_rts2rts_qp_out_bits {
2804 u8 status[0x8];
2805 u8 reserved_0[0x18];
2806
2807 u8 syndrome[0x20];
2808
2809 u8 reserved_1[0x40];
2810};
2811
2812struct mlx5_ifc_rts2rts_qp_in_bits {
2813 u8 opcode[0x10];
2814 u8 reserved_0[0x10];
2815
2816 u8 reserved_1[0x10];
2817 u8 op_mod[0x10];
2818
2819 u8 reserved_2[0x8];
2820 u8 qpn[0x18];
2821
2822 u8 reserved_3[0x20];
2823
2824 u8 opt_param_mask[0x20];
2825
2826 u8 reserved_4[0x20];
2827
2828 struct mlx5_ifc_qpc_bits qpc;
2829
2830 u8 reserved_5[0x80];
2831};
2832
2833struct mlx5_ifc_rtr2rts_qp_out_bits {
2834 u8 status[0x8];
2835 u8 reserved_0[0x18];
2836
2837 u8 syndrome[0x20];
2838
2839 u8 reserved_1[0x40];
2840};
2841
2842struct mlx5_ifc_rtr2rts_qp_in_bits {
2843 u8 opcode[0x10];
2844 u8 reserved_0[0x10];
2845
2846 u8 reserved_1[0x10];
2847 u8 op_mod[0x10];
2848
2849 u8 reserved_2[0x8];
2850 u8 qpn[0x18];
2851
2852 u8 reserved_3[0x20];
2853
2854 u8 opt_param_mask[0x20];
2855
2856 u8 reserved_4[0x20];
2857
2858 struct mlx5_ifc_qpc_bits qpc;
2859
2860 u8 reserved_5[0x80];
2861};
2862
2863struct mlx5_ifc_rst2init_qp_out_bits {
2864 u8 status[0x8];
2865 u8 reserved_0[0x18];
2866
2867 u8 syndrome[0x20];
2868
2869 u8 reserved_1[0x40];
2870};
2871
2872struct mlx5_ifc_rst2init_qp_in_bits {
2873 u8 opcode[0x10];
2874 u8 reserved_0[0x10];
2875
2876 u8 reserved_1[0x10];
2877 u8 op_mod[0x10];
2878
2879 u8 reserved_2[0x8];
2880 u8 qpn[0x18];
2881
2882 u8 reserved_3[0x20];
2883
2884 u8 opt_param_mask[0x20];
2885
2886 u8 reserved_4[0x20];
2887
2888 struct mlx5_ifc_qpc_bits qpc;
2889
2890 u8 reserved_5[0x80];
2891};
2892
2893struct mlx5_ifc_query_xrc_srq_out_bits {
2894 u8 status[0x8];
2895 u8 reserved_0[0x18];
2896
2897 u8 syndrome[0x20];
2898
2899 u8 reserved_1[0x40];
2900
2901 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
2902
2903 u8 reserved_2[0x600];
2904
2905 u8 pas[0][0x40];
2906};
2907
2908struct mlx5_ifc_query_xrc_srq_in_bits {
2909 u8 opcode[0x10];
2910 u8 reserved_0[0x10];
2911
2912 u8 reserved_1[0x10];
2913 u8 op_mod[0x10];
2914
2915 u8 reserved_2[0x8];
2916 u8 xrc_srqn[0x18];
2917
2918 u8 reserved_3[0x20];
2919};
2920
2921enum {
2922 MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0,
2923 MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1,
2924};
2925
2926struct mlx5_ifc_query_vport_state_out_bits {
2927 u8 status[0x8];
2928 u8 reserved_0[0x18];
2929
2930 u8 syndrome[0x20];
2931
2932 u8 reserved_1[0x20];
2933
2934 u8 reserved_2[0x18];
2935 u8 admin_state[0x4];
2936 u8 state[0x4];
2937};
2938
2939enum {
2940 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
2941};
2942
2943struct mlx5_ifc_query_vport_state_in_bits {
2944 u8 opcode[0x10];
2945 u8 reserved_0[0x10];
2946
2947 u8 reserved_1[0x10];
2948 u8 op_mod[0x10];
2949
2950 u8 other_vport[0x1];
2951 u8 reserved_2[0xf];
2952 u8 vport_number[0x10];
2953
2954 u8 reserved_3[0x20];
2955};
2956
2957struct mlx5_ifc_query_vport_counter_out_bits {
2958 u8 status[0x8];
2959 u8 reserved_0[0x18];
2960
2961 u8 syndrome[0x20];
2962
2963 u8 reserved_1[0x40];
2964
2965 struct mlx5_ifc_traffic_counter_bits received_errors;
2966
2967 struct mlx5_ifc_traffic_counter_bits transmit_errors;
2968
2969 struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
2970
2971 struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
2972
2973 struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
2974
2975 struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
2976
2977 struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
2978
2979 struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
2980
2981 struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
2982
2983 struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
2984
2985 struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
2986
2987 struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
2988
2989 u8 reserved_2[0xa00];
2990};
2991
2992enum {
2993 MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0,
2994};
2995
2996struct mlx5_ifc_query_vport_counter_in_bits {
2997 u8 opcode[0x10];
2998 u8 reserved_0[0x10];
2999
3000 u8 reserved_1[0x10];
3001 u8 op_mod[0x10];
3002
3003 u8 other_vport[0x1];
3004 u8 reserved_2[0xf];
3005 u8 vport_number[0x10];
3006
3007 u8 reserved_3[0x60];
3008
3009 u8 clear[0x1];
3010 u8 reserved_4[0x1f];
3011
3012 u8 reserved_5[0x20];
3013};
3014
3015struct mlx5_ifc_query_tis_out_bits {
3016 u8 status[0x8];
3017 u8 reserved_0[0x18];
3018
3019 u8 syndrome[0x20];
3020
3021 u8 reserved_1[0x40];
3022
3023 struct mlx5_ifc_tisc_bits tis_context;
3024};
3025
3026struct mlx5_ifc_query_tis_in_bits {
3027 u8 opcode[0x10];
3028 u8 reserved_0[0x10];
3029
3030 u8 reserved_1[0x10];
3031 u8 op_mod[0x10];
3032
3033 u8 reserved_2[0x8];
3034 u8 tisn[0x18];
3035
3036 u8 reserved_3[0x20];
3037};
3038
3039struct mlx5_ifc_query_tir_out_bits {
3040 u8 status[0x8];
3041 u8 reserved_0[0x18];
3042
3043 u8 syndrome[0x20];
3044
3045 u8 reserved_1[0xc0];
3046
3047 struct mlx5_ifc_tirc_bits tir_context;
3048};
3049
3050struct mlx5_ifc_query_tir_in_bits {
3051 u8 opcode[0x10];
3052 u8 reserved_0[0x10];
3053
3054 u8 reserved_1[0x10];
3055 u8 op_mod[0x10];
3056
3057 u8 reserved_2[0x8];
3058 u8 tirn[0x18];
3059
3060 u8 reserved_3[0x20];
3061};
3062
3063struct mlx5_ifc_query_srq_out_bits {
3064 u8 status[0x8];
3065 u8 reserved_0[0x18];
3066
3067 u8 syndrome[0x20];
3068
3069 u8 reserved_1[0x40];
3070
3071 struct mlx5_ifc_srqc_bits srq_context_entry;
3072
3073 u8 reserved_2[0x600];
3074
3075 u8 pas[0][0x40];
3076};
3077
3078struct mlx5_ifc_query_srq_in_bits {
3079 u8 opcode[0x10];
3080 u8 reserved_0[0x10];
3081
3082 u8 reserved_1[0x10];
3083 u8 op_mod[0x10];
3084
3085 u8 reserved_2[0x8];
3086 u8 srqn[0x18];
3087
3088 u8 reserved_3[0x20];
3089};
3090
3091struct mlx5_ifc_query_sq_out_bits {
3092 u8 status[0x8];
3093 u8 reserved_0[0x18];
3094
3095 u8 syndrome[0x20];
3096
3097 u8 reserved_1[0xc0];
3098
3099 struct mlx5_ifc_sqc_bits sq_context;
3100};
3101
3102struct mlx5_ifc_query_sq_in_bits {
3103 u8 opcode[0x10];
3104 u8 reserved_0[0x10];
3105
3106 u8 reserved_1[0x10];
3107 u8 op_mod[0x10];
3108
3109 u8 reserved_2[0x8];
3110 u8 sqn[0x18];
3111
3112 u8 reserved_3[0x20];
3113};
3114
3115struct mlx5_ifc_query_special_contexts_out_bits {
3116 u8 status[0x8];
3117 u8 reserved_0[0x18];
3118
3119 u8 syndrome[0x20];
3120
3121 u8 reserved_1[0x20];
3122
3123 u8 resd_lkey[0x20];
3124};
3125
3126struct mlx5_ifc_query_special_contexts_in_bits {
3127 u8 opcode[0x10];
3128 u8 reserved_0[0x10];
3129
3130 u8 reserved_1[0x10];
3131 u8 op_mod[0x10];
3132
3133 u8 reserved_2[0x40];
3134};
3135
3136struct mlx5_ifc_query_rqt_out_bits {
3137 u8 status[0x8];
3138 u8 reserved_0[0x18];
3139
3140 u8 syndrome[0x20];
3141
3142 u8 reserved_1[0xc0];
3143
3144 struct mlx5_ifc_rqtc_bits rqt_context;
3145};
3146
3147struct mlx5_ifc_query_rqt_in_bits {
3148 u8 opcode[0x10];
3149 u8 reserved_0[0x10];
3150
3151 u8 reserved_1[0x10];
3152 u8 op_mod[0x10];
3153
3154 u8 reserved_2[0x8];
3155 u8 rqtn[0x18];
3156
3157 u8 reserved_3[0x20];
3158};
3159
3160struct mlx5_ifc_query_rq_out_bits {
3161 u8 status[0x8];
3162 u8 reserved_0[0x18];
3163
3164 u8 syndrome[0x20];
3165
3166 u8 reserved_1[0xc0];
3167
3168 struct mlx5_ifc_rqc_bits rq_context;
3169};
3170
3171struct mlx5_ifc_query_rq_in_bits {
3172 u8 opcode[0x10];
3173 u8 reserved_0[0x10];
3174
3175 u8 reserved_1[0x10];
3176 u8 op_mod[0x10];
3177
3178 u8 reserved_2[0x8];
3179 u8 rqn[0x18];
3180
3181 u8 reserved_3[0x20];
3182};
3183
3184struct mlx5_ifc_query_roce_address_out_bits {
3185 u8 status[0x8];
3186 u8 reserved_0[0x18];
3187
3188 u8 syndrome[0x20];
3189
3190 u8 reserved_1[0x40];
3191
3192 struct mlx5_ifc_roce_addr_layout_bits roce_address;
3193};
3194
3195struct mlx5_ifc_query_roce_address_in_bits {
3196 u8 opcode[0x10];
3197 u8 reserved_0[0x10];
3198
3199 u8 reserved_1[0x10];
3200 u8 op_mod[0x10];
3201
3202 u8 roce_address_index[0x10];
3203 u8 reserved_2[0x10];
3204
3205 u8 reserved_3[0x20];
3206};
3207
3208struct mlx5_ifc_query_rmp_out_bits {
3209 u8 status[0x8];
3210 u8 reserved_0[0x18];
3211
3212 u8 syndrome[0x20];
3213
3214 u8 reserved_1[0xc0];
3215
3216 struct mlx5_ifc_rmpc_bits rmp_context;
3217};
3218
3219struct mlx5_ifc_query_rmp_in_bits {
3220 u8 opcode[0x10];
3221 u8 reserved_0[0x10];
3222
3223 u8 reserved_1[0x10];
3224 u8 op_mod[0x10];
3225
3226 u8 reserved_2[0x8];
3227 u8 rmpn[0x18];
3228
3229 u8 reserved_3[0x20];
3230};
3231
3232struct mlx5_ifc_query_qp_out_bits {
3233 u8 status[0x8];
3234 u8 reserved_0[0x18];
3235
3236 u8 syndrome[0x20];
3237
3238 u8 reserved_1[0x40];
3239
3240 u8 opt_param_mask[0x20];
3241
3242 u8 reserved_2[0x20];
3243
3244 struct mlx5_ifc_qpc_bits qpc;
3245
3246 u8 reserved_3[0x80];
3247
3248 u8 pas[0][0x40];
3249};
3250
3251struct mlx5_ifc_query_qp_in_bits {
3252 u8 opcode[0x10];
3253 u8 reserved_0[0x10];
3254
3255 u8 reserved_1[0x10];
3256 u8 op_mod[0x10];
3257
3258 u8 reserved_2[0x8];
3259 u8 qpn[0x18];
3260
3261 u8 reserved_3[0x20];
3262};
3263
3264struct mlx5_ifc_query_q_counter_out_bits {
3265 u8 status[0x8];
3266 u8 reserved_0[0x18];
3267
3268 u8 syndrome[0x20];
3269
3270 u8 reserved_1[0x40];
3271
3272 u8 rx_write_requests[0x20];
3273
3274 u8 reserved_2[0x20];
3275
3276 u8 rx_read_requests[0x20];
3277
3278 u8 reserved_3[0x20];
3279
3280 u8 rx_atomic_requests[0x20];
3281
3282 u8 reserved_4[0x20];
3283
3284 u8 rx_dct_connect[0x20];
3285
3286 u8 reserved_5[0x20];
3287
3288 u8 out_of_buffer[0x20];
3289
3290 u8 reserved_6[0x20];
3291
3292 u8 out_of_sequence[0x20];
3293
3294 u8 reserved_7[0x620];
3295};
3296
3297struct mlx5_ifc_query_q_counter_in_bits {
3298 u8 opcode[0x10];
3299 u8 reserved_0[0x10];
3300
3301 u8 reserved_1[0x10];
3302 u8 op_mod[0x10];
3303
3304 u8 reserved_2[0x80];
3305
3306 u8 clear[0x1];
3307 u8 reserved_3[0x1f];
3308
3309 u8 reserved_4[0x18];
3310 u8 counter_set_id[0x8];
3311};
3312
3313struct mlx5_ifc_query_pages_out_bits {
3314 u8 status[0x8];
3315 u8 reserved_0[0x18];
3316
3317 u8 syndrome[0x20];
3318
3319 u8 reserved_1[0x10];
3320 u8 function_id[0x10];
3321
3322 u8 num_pages[0x20];
3323};
3324
3325enum {
3326 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1,
3327 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2,
3328 MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3,
3329};
3330
3331struct mlx5_ifc_query_pages_in_bits {
3332 u8 opcode[0x10];
3333 u8 reserved_0[0x10];
3334
3335 u8 reserved_1[0x10];
3336 u8 op_mod[0x10];
3337
3338 u8 reserved_2[0x10];
3339 u8 function_id[0x10];
3340
3341 u8 reserved_3[0x20];
3342};
3343
3344struct mlx5_ifc_query_nic_vport_context_out_bits {
3345 u8 status[0x8];
3346 u8 reserved_0[0x18];
3347
3348 u8 syndrome[0x20];
3349
3350 u8 reserved_1[0x40];
3351
3352 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
3353};
3354
3355struct mlx5_ifc_query_nic_vport_context_in_bits {
3356 u8 opcode[0x10];
3357 u8 reserved_0[0x10];
3358
3359 u8 reserved_1[0x10];
3360 u8 op_mod[0x10];
3361
3362 u8 other_vport[0x1];
3363 u8 reserved_2[0xf];
3364 u8 vport_number[0x10];
3365
3366 u8 reserved_3[0x5];
3367 u8 allowed_list_type[0x3];
3368 u8 reserved_4[0x18];
3369};
3370
3371struct mlx5_ifc_query_mkey_out_bits {
3372 u8 status[0x8];
3373 u8 reserved_0[0x18];
3374
3375 u8 syndrome[0x20];
3376
3377 u8 reserved_1[0x40];
3378
3379 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
3380
3381 u8 reserved_2[0x600];
3382
3383 u8 bsf0_klm0_pas_mtt0_1[16][0x8];
3384
3385 u8 bsf1_klm1_pas_mtt2_3[16][0x8];
3386};
3387
3388struct mlx5_ifc_query_mkey_in_bits {
3389 u8 opcode[0x10];
3390 u8 reserved_0[0x10];
3391
3392 u8 reserved_1[0x10];
3393 u8 op_mod[0x10];
3394
3395 u8 reserved_2[0x8];
3396 u8 mkey_index[0x18];
3397
3398 u8 pg_access[0x1];
3399 u8 reserved_3[0x1f];
3400};
3401
3402struct mlx5_ifc_query_mad_demux_out_bits {
3403 u8 status[0x8];
3404 u8 reserved_0[0x18];
3405
3406 u8 syndrome[0x20];
3407
3408 u8 reserved_1[0x40];
3409
3410 u8 mad_dumux_parameters_block[0x20];
3411};
3412
3413struct mlx5_ifc_query_mad_demux_in_bits {
320 u8 opcode[0x10]; 3414 u8 opcode[0x10];
321 u8 reserved_0[0x10]; 3415 u8 reserved_0[0x10];
322 3416
@@ -326,6 +3420,146 @@ struct mlx5_ifc_query_hca_cap_in_bits {
326 u8 reserved_2[0x40]; 3420 u8 reserved_2[0x40];
327}; 3421};
328 3422
3423struct mlx5_ifc_query_l2_table_entry_out_bits {
3424 u8 status[0x8];
3425 u8 reserved_0[0x18];
3426
3427 u8 syndrome[0x20];
3428
3429 u8 reserved_1[0xa0];
3430
3431 u8 reserved_2[0x13];
3432 u8 vlan_valid[0x1];
3433 u8 vlan[0xc];
3434
3435 struct mlx5_ifc_mac_address_layout_bits mac_address;
3436
3437 u8 reserved_3[0xc0];
3438};
3439
3440struct mlx5_ifc_query_l2_table_entry_in_bits {
3441 u8 opcode[0x10];
3442 u8 reserved_0[0x10];
3443
3444 u8 reserved_1[0x10];
3445 u8 op_mod[0x10];
3446
3447 u8 reserved_2[0x60];
3448
3449 u8 reserved_3[0x8];
3450 u8 table_index[0x18];
3451
3452 u8 reserved_4[0x140];
3453};
3454
3455struct mlx5_ifc_query_issi_out_bits {
3456 u8 status[0x8];
3457 u8 reserved_0[0x18];
3458
3459 u8 syndrome[0x20];
3460
3461 u8 reserved_1[0x10];
3462 u8 current_issi[0x10];
3463
3464 u8 reserved_2[0xa0];
3465
3466 u8 supported_issi_reserved[76][0x8];
3467 u8 supported_issi_dw0[0x20];
3468};
3469
3470struct mlx5_ifc_query_issi_in_bits {
3471 u8 opcode[0x10];
3472 u8 reserved_0[0x10];
3473
3474 u8 reserved_1[0x10];
3475 u8 op_mod[0x10];
3476
3477 u8 reserved_2[0x40];
3478};
3479
3480struct mlx5_ifc_query_hca_vport_pkey_out_bits {
3481 u8 status[0x8];
3482 u8 reserved_0[0x18];
3483
3484 u8 syndrome[0x20];
3485
3486 u8 reserved_1[0x40];
3487
3488 struct mlx5_ifc_pkey_bits pkey[0];
3489};
3490
3491struct mlx5_ifc_query_hca_vport_pkey_in_bits {
3492 u8 opcode[0x10];
3493 u8 reserved_0[0x10];
3494
3495 u8 reserved_1[0x10];
3496 u8 op_mod[0x10];
3497
3498 u8 other_vport[0x1];
3499 u8 reserved_2[0xb];
3500 u8 port_num[0x4];
3501 u8 vport_number[0x10];
3502
3503 u8 reserved_3[0x10];
3504 u8 pkey_index[0x10];
3505};
3506
3507struct mlx5_ifc_query_hca_vport_gid_out_bits {
3508 u8 status[0x8];
3509 u8 reserved_0[0x18];
3510
3511 u8 syndrome[0x20];
3512
3513 u8 reserved_1[0x20];
3514
3515 u8 gids_num[0x10];
3516 u8 reserved_2[0x10];
3517
3518 struct mlx5_ifc_array128_auto_bits gid[0];
3519};
3520
3521struct mlx5_ifc_query_hca_vport_gid_in_bits {
3522 u8 opcode[0x10];
3523 u8 reserved_0[0x10];
3524
3525 u8 reserved_1[0x10];
3526 u8 op_mod[0x10];
3527
3528 u8 other_vport[0x1];
3529 u8 reserved_2[0xb];
3530 u8 port_num[0x4];
3531 u8 vport_number[0x10];
3532
3533 u8 reserved_3[0x10];
3534 u8 gid_index[0x10];
3535};
3536
3537struct mlx5_ifc_query_hca_vport_context_out_bits {
3538 u8 status[0x8];
3539 u8 reserved_0[0x18];
3540
3541 u8 syndrome[0x20];
3542
3543 u8 reserved_1[0x40];
3544
3545 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
3546};
3547
3548struct mlx5_ifc_query_hca_vport_context_in_bits {
3549 u8 opcode[0x10];
3550 u8 reserved_0[0x10];
3551
3552 u8 reserved_1[0x10];
3553 u8 op_mod[0x10];
3554
3555 u8 other_vport[0x1];
3556 u8 reserved_2[0xb];
3557 u8 port_num[0x4];
3558 u8 vport_number[0x10];
3559
3560 u8 reserved_3[0x20];
3561};
3562
329struct mlx5_ifc_query_hca_cap_out_bits { 3563struct mlx5_ifc_query_hca_cap_out_bits {
330 u8 status[0x8]; 3564 u8 status[0x8];
331 u8 reserved_0[0x18]; 3565 u8 reserved_0[0x18];
@@ -334,16 +3568,3216 @@ struct mlx5_ifc_query_hca_cap_out_bits {
334 3568
335 u8 reserved_1[0x40]; 3569 u8 reserved_1[0x40];
336 3570
337 u8 capability_struct[256][0x8]; 3571 union mlx5_ifc_hca_cap_union_bits capability;
338}; 3572};
339 3573
340struct mlx5_ifc_set_hca_cap_out_bits { 3574struct mlx5_ifc_query_hca_cap_in_bits {
3575 u8 opcode[0x10];
3576 u8 reserved_0[0x10];
3577
3578 u8 reserved_1[0x10];
3579 u8 op_mod[0x10];
3580
3581 u8 reserved_2[0x40];
3582};
3583
3584struct mlx5_ifc_query_flow_table_out_bits {
3585 u8 status[0x8];
3586 u8 reserved_0[0x18];
3587
3588 u8 syndrome[0x20];
3589
3590 u8 reserved_1[0x80];
3591
3592 u8 reserved_2[0x8];
3593 u8 level[0x8];
3594 u8 reserved_3[0x8];
3595 u8 log_size[0x8];
3596
3597 u8 reserved_4[0x120];
3598};
3599
3600struct mlx5_ifc_query_flow_table_in_bits {
3601 u8 opcode[0x10];
3602 u8 reserved_0[0x10];
3603
3604 u8 reserved_1[0x10];
3605 u8 op_mod[0x10];
3606
3607 u8 reserved_2[0x40];
3608
3609 u8 table_type[0x8];
3610 u8 reserved_3[0x18];
3611
3612 u8 reserved_4[0x8];
3613 u8 table_id[0x18];
3614
3615 u8 reserved_5[0x140];
3616};
3617
3618struct mlx5_ifc_query_fte_out_bits {
3619 u8 status[0x8];
3620 u8 reserved_0[0x18];
3621
3622 u8 syndrome[0x20];
3623
3624 u8 reserved_1[0x1c0];
3625
3626 struct mlx5_ifc_flow_context_bits flow_context;
3627};
3628
3629struct mlx5_ifc_query_fte_in_bits {
3630 u8 opcode[0x10];
3631 u8 reserved_0[0x10];
3632
3633 u8 reserved_1[0x10];
3634 u8 op_mod[0x10];
3635
3636 u8 reserved_2[0x40];
3637
3638 u8 table_type[0x8];
3639 u8 reserved_3[0x18];
3640
3641 u8 reserved_4[0x8];
3642 u8 table_id[0x18];
3643
3644 u8 reserved_5[0x40];
3645
3646 u8 flow_index[0x20];
3647
3648 u8 reserved_6[0xe0];
3649};
3650
3651enum {
3652 MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
3653 MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
3654 MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
3655};
3656
3657struct mlx5_ifc_query_flow_group_out_bits {
3658 u8 status[0x8];
3659 u8 reserved_0[0x18];
3660
3661 u8 syndrome[0x20];
3662
3663 u8 reserved_1[0xa0];
3664
3665 u8 start_flow_index[0x20];
3666
3667 u8 reserved_2[0x20];
3668
3669 u8 end_flow_index[0x20];
3670
3671 u8 reserved_3[0xa0];
3672
3673 u8 reserved_4[0x18];
3674 u8 match_criteria_enable[0x8];
3675
3676 struct mlx5_ifc_fte_match_param_bits match_criteria;
3677
3678 u8 reserved_5[0xe00];
3679};
3680
3681struct mlx5_ifc_query_flow_group_in_bits {
3682 u8 opcode[0x10];
3683 u8 reserved_0[0x10];
3684
3685 u8 reserved_1[0x10];
3686 u8 op_mod[0x10];
3687
3688 u8 reserved_2[0x40];
3689
3690 u8 table_type[0x8];
3691 u8 reserved_3[0x18];
3692
3693 u8 reserved_4[0x8];
3694 u8 table_id[0x18];
3695
3696 u8 group_id[0x20];
3697
3698 u8 reserved_5[0x120];
3699};
3700
3701struct mlx5_ifc_query_eq_out_bits {
3702 u8 status[0x8];
3703 u8 reserved_0[0x18];
3704
3705 u8 syndrome[0x20];
3706
3707 u8 reserved_1[0x40];
3708
3709 struct mlx5_ifc_eqc_bits eq_context_entry;
3710
3711 u8 reserved_2[0x40];
3712
3713 u8 event_bitmask[0x40];
3714
3715 u8 reserved_3[0x580];
3716
3717 u8 pas[0][0x40];
3718};
3719
3720struct mlx5_ifc_query_eq_in_bits {
3721 u8 opcode[0x10];
3722 u8 reserved_0[0x10];
3723
3724 u8 reserved_1[0x10];
3725 u8 op_mod[0x10];
3726
3727 u8 reserved_2[0x18];
3728 u8 eq_number[0x8];
3729
3730 u8 reserved_3[0x20];
3731};
3732
3733struct mlx5_ifc_query_dct_out_bits {
3734 u8 status[0x8];
3735 u8 reserved_0[0x18];
3736
3737 u8 syndrome[0x20];
3738
3739 u8 reserved_1[0x40];
3740
3741 struct mlx5_ifc_dctc_bits dct_context_entry;
3742
3743 u8 reserved_2[0x180];
3744};
3745
3746struct mlx5_ifc_query_dct_in_bits {
3747 u8 opcode[0x10];
3748 u8 reserved_0[0x10];
3749
3750 u8 reserved_1[0x10];
3751 u8 op_mod[0x10];
3752
3753 u8 reserved_2[0x8];
3754 u8 dctn[0x18];
3755
3756 u8 reserved_3[0x20];
3757};
3758
3759struct mlx5_ifc_query_cq_out_bits {
341 u8 status[0x8]; 3760 u8 status[0x8];
342 u8 reserved_0[0x18]; 3761 u8 reserved_0[0x18];
343 3762
344 u8 syndrome[0x20]; 3763 u8 syndrome[0x20];
345 3764
346 u8 reserved_1[0x40]; 3765 u8 reserved_1[0x40];
3766
3767 struct mlx5_ifc_cqc_bits cq_context;
3768
3769 u8 reserved_2[0x600];
3770
3771 u8 pas[0][0x40];
3772};
3773
3774struct mlx5_ifc_query_cq_in_bits {
3775 u8 opcode[0x10];
3776 u8 reserved_0[0x10];
3777
3778 u8 reserved_1[0x10];
3779 u8 op_mod[0x10];
3780
3781 u8 reserved_2[0x8];
3782 u8 cqn[0x18];
3783
3784 u8 reserved_3[0x20];
3785};
3786
3787struct mlx5_ifc_query_cong_status_out_bits {
3788 u8 status[0x8];
3789 u8 reserved_0[0x18];
3790
3791 u8 syndrome[0x20];
3792
3793 u8 reserved_1[0x20];
3794
3795 u8 enable[0x1];
3796 u8 tag_enable[0x1];
3797 u8 reserved_2[0x1e];
3798};
3799
3800struct mlx5_ifc_query_cong_status_in_bits {
3801 u8 opcode[0x10];
3802 u8 reserved_0[0x10];
3803
3804 u8 reserved_1[0x10];
3805 u8 op_mod[0x10];
3806
3807 u8 reserved_2[0x18];
3808 u8 priority[0x4];
3809 u8 cong_protocol[0x4];
3810
3811 u8 reserved_3[0x20];
3812};
3813
3814struct mlx5_ifc_query_cong_statistics_out_bits {
3815 u8 status[0x8];
3816 u8 reserved_0[0x18];
3817
3818 u8 syndrome[0x20];
3819
3820 u8 reserved_1[0x40];
3821
3822 u8 cur_flows[0x20];
3823
3824 u8 sum_flows[0x20];
3825
3826 u8 cnp_ignored_high[0x20];
3827
3828 u8 cnp_ignored_low[0x20];
3829
3830 u8 cnp_handled_high[0x20];
3831
3832 u8 cnp_handled_low[0x20];
3833
3834 u8 reserved_2[0x100];
3835
3836 u8 time_stamp_high[0x20];
3837
3838 u8 time_stamp_low[0x20];
3839
3840 u8 accumulators_period[0x20];
3841
3842 u8 ecn_marked_roce_packets_high[0x20];
3843
3844 u8 ecn_marked_roce_packets_low[0x20];
3845
3846 u8 cnps_sent_high[0x20];
3847
3848 u8 cnps_sent_low[0x20];
3849
3850 u8 reserved_3[0x560];
3851};
3852
3853struct mlx5_ifc_query_cong_statistics_in_bits {
3854 u8 opcode[0x10];
3855 u8 reserved_0[0x10];
3856
3857 u8 reserved_1[0x10];
3858 u8 op_mod[0x10];
3859
3860 u8 clear[0x1];
3861 u8 reserved_2[0x1f];
3862
3863 u8 reserved_3[0x20];
3864};
3865
3866struct mlx5_ifc_query_cong_params_out_bits {
3867 u8 status[0x8];
3868 u8 reserved_0[0x18];
3869
3870 u8 syndrome[0x20];
3871
3872 u8 reserved_1[0x40];
3873
3874 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
3875};
3876
3877struct mlx5_ifc_query_cong_params_in_bits {
3878 u8 opcode[0x10];
3879 u8 reserved_0[0x10];
3880
3881 u8 reserved_1[0x10];
3882 u8 op_mod[0x10];
3883
3884 u8 reserved_2[0x1c];
3885 u8 cong_protocol[0x4];
3886
3887 u8 reserved_3[0x20];
3888};
3889
3890struct mlx5_ifc_query_adapter_out_bits {
3891 u8 status[0x8];
3892 u8 reserved_0[0x18];
3893
3894 u8 syndrome[0x20];
3895
3896 u8 reserved_1[0x40];
3897
3898 struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
3899};
3900
3901struct mlx5_ifc_query_adapter_in_bits {
3902 u8 opcode[0x10];
3903 u8 reserved_0[0x10];
3904
3905 u8 reserved_1[0x10];
3906 u8 op_mod[0x10];
3907
3908 u8 reserved_2[0x40];
3909};
3910
3911struct mlx5_ifc_qp_2rst_out_bits {
3912 u8 status[0x8];
3913 u8 reserved_0[0x18];
3914
3915 u8 syndrome[0x20];
3916
3917 u8 reserved_1[0x40];
3918};
3919
3920struct mlx5_ifc_qp_2rst_in_bits {
3921 u8 opcode[0x10];
3922 u8 reserved_0[0x10];
3923
3924 u8 reserved_1[0x10];
3925 u8 op_mod[0x10];
3926
3927 u8 reserved_2[0x8];
3928 u8 qpn[0x18];
3929
3930 u8 reserved_3[0x20];
3931};
3932
3933struct mlx5_ifc_qp_2err_out_bits {
3934 u8 status[0x8];
3935 u8 reserved_0[0x18];
3936
3937 u8 syndrome[0x20];
3938
3939 u8 reserved_1[0x40];
3940};
3941
3942struct mlx5_ifc_qp_2err_in_bits {
3943 u8 opcode[0x10];
3944 u8 reserved_0[0x10];
3945
3946 u8 reserved_1[0x10];
3947 u8 op_mod[0x10];
3948
3949 u8 reserved_2[0x8];
3950 u8 qpn[0x18];
3951
3952 u8 reserved_3[0x20];
3953};
3954
3955struct mlx5_ifc_page_fault_resume_out_bits {
3956 u8 status[0x8];
3957 u8 reserved_0[0x18];
3958
3959 u8 syndrome[0x20];
3960
3961 u8 reserved_1[0x40];
3962};
3963
3964struct mlx5_ifc_page_fault_resume_in_bits {
3965 u8 opcode[0x10];
3966 u8 reserved_0[0x10];
3967
3968 u8 reserved_1[0x10];
3969 u8 op_mod[0x10];
3970
3971 u8 error[0x1];
3972 u8 reserved_2[0x4];
3973 u8 rdma[0x1];
3974 u8 read_write[0x1];
3975 u8 req_res[0x1];
3976 u8 qpn[0x18];
3977
3978 u8 reserved_3[0x20];
3979};
3980
3981struct mlx5_ifc_nop_out_bits {
3982 u8 status[0x8];
3983 u8 reserved_0[0x18];
3984
3985 u8 syndrome[0x20];
3986
3987 u8 reserved_1[0x40];
3988};
3989
3990struct mlx5_ifc_nop_in_bits {
3991 u8 opcode[0x10];
3992 u8 reserved_0[0x10];
3993
3994 u8 reserved_1[0x10];
3995 u8 op_mod[0x10];
3996
3997 u8 reserved_2[0x40];
3998};
3999
4000struct mlx5_ifc_modify_vport_state_out_bits {
4001 u8 status[0x8];
4002 u8 reserved_0[0x18];
4003
4004 u8 syndrome[0x20];
4005
4006 u8 reserved_1[0x40];
4007};
4008
4009struct mlx5_ifc_modify_vport_state_in_bits {
4010 u8 opcode[0x10];
4011 u8 reserved_0[0x10];
4012
4013 u8 reserved_1[0x10];
4014 u8 op_mod[0x10];
4015
4016 u8 other_vport[0x1];
4017 u8 reserved_2[0xf];
4018 u8 vport_number[0x10];
4019
4020 u8 reserved_3[0x18];
4021 u8 admin_state[0x4];
4022 u8 reserved_4[0x4];
4023};
4024
4025struct mlx5_ifc_modify_tis_out_bits {
4026 u8 status[0x8];
4027 u8 reserved_0[0x18];
4028
4029 u8 syndrome[0x20];
4030
4031 u8 reserved_1[0x40];
4032};
4033
4034struct mlx5_ifc_modify_tis_in_bits {
4035 u8 opcode[0x10];
4036 u8 reserved_0[0x10];
4037
4038 u8 reserved_1[0x10];
4039 u8 op_mod[0x10];
4040
4041 u8 reserved_2[0x8];
4042 u8 tisn[0x18];
4043
4044 u8 reserved_3[0x20];
4045
4046 u8 modify_bitmask[0x40];
4047
4048 u8 reserved_4[0x40];
4049
4050 struct mlx5_ifc_tisc_bits ctx;
4051};
4052
4053struct mlx5_ifc_modify_tir_out_bits {
4054 u8 status[0x8];
4055 u8 reserved_0[0x18];
4056
4057 u8 syndrome[0x20];
4058
4059 u8 reserved_1[0x40];
4060};
4061
4062struct mlx5_ifc_modify_tir_in_bits {
4063 u8 opcode[0x10];
4064 u8 reserved_0[0x10];
4065
4066 u8 reserved_1[0x10];
4067 u8 op_mod[0x10];
4068
4069 u8 reserved_2[0x8];
4070 u8 tirn[0x18];
4071
4072 u8 reserved_3[0x20];
4073
4074 u8 modify_bitmask[0x40];
4075
4076 u8 reserved_4[0x40];
4077
4078 struct mlx5_ifc_tirc_bits ctx;
4079};
4080
4081struct mlx5_ifc_modify_sq_out_bits {
4082 u8 status[0x8];
4083 u8 reserved_0[0x18];
4084
4085 u8 syndrome[0x20];
4086
4087 u8 reserved_1[0x40];
4088};
4089
4090struct mlx5_ifc_modify_sq_in_bits {
4091 u8 opcode[0x10];
4092 u8 reserved_0[0x10];
4093
4094 u8 reserved_1[0x10];
4095 u8 op_mod[0x10];
4096
4097 u8 sq_state[0x4];
4098 u8 reserved_2[0x4];
4099 u8 sqn[0x18];
4100
4101 u8 reserved_3[0x20];
4102
4103 u8 modify_bitmask[0x40];
4104
4105 u8 reserved_4[0x40];
4106
4107 struct mlx5_ifc_sqc_bits ctx;
4108};
4109
4110struct mlx5_ifc_modify_rqt_out_bits {
4111 u8 status[0x8];
4112 u8 reserved_0[0x18];
4113
4114 u8 syndrome[0x20];
4115
4116 u8 reserved_1[0x40];
4117};
4118
4119struct mlx5_ifc_modify_rqt_in_bits {
4120 u8 opcode[0x10];
4121 u8 reserved_0[0x10];
4122
4123 u8 reserved_1[0x10];
4124 u8 op_mod[0x10];
4125
4126 u8 reserved_2[0x8];
4127 u8 rqtn[0x18];
4128
4129 u8 reserved_3[0x20];
4130
4131 u8 modify_bitmask[0x40];
4132
4133 u8 reserved_4[0x40];
4134
4135 struct mlx5_ifc_rqtc_bits ctx;
4136};
4137
4138struct mlx5_ifc_modify_rq_out_bits {
4139 u8 status[0x8];
4140 u8 reserved_0[0x18];
4141
4142 u8 syndrome[0x20];
4143
4144 u8 reserved_1[0x40];
4145};
4146
4147struct mlx5_ifc_modify_rq_in_bits {
4148 u8 opcode[0x10];
4149 u8 reserved_0[0x10];
4150
4151 u8 reserved_1[0x10];
4152 u8 op_mod[0x10];
4153
4154 u8 rq_state[0x4];
4155 u8 reserved_2[0x4];
4156 u8 rqn[0x18];
4157
4158 u8 reserved_3[0x20];
4159
4160 u8 modify_bitmask[0x40];
4161
4162 u8 reserved_4[0x40];
4163
4164 struct mlx5_ifc_rqc_bits ctx;
4165};
4166
4167struct mlx5_ifc_modify_rmp_out_bits {
4168 u8 status[0x8];
4169 u8 reserved_0[0x18];
4170
4171 u8 syndrome[0x20];
4172
4173 u8 reserved_1[0x40];
4174};
4175
4176struct mlx5_ifc_rmp_bitmask_bits {
4177 u8 reserved[0x20];
4178
4179 u8 reserved1[0x1f];
4180 u8 lwm[0x1];
4181};
4182
4183struct mlx5_ifc_modify_rmp_in_bits {
4184 u8 opcode[0x10];
4185 u8 reserved_0[0x10];
4186
4187 u8 reserved_1[0x10];
4188 u8 op_mod[0x10];
4189
4190 u8 rmp_state[0x4];
4191 u8 reserved_2[0x4];
4192 u8 rmpn[0x18];
4193
4194 u8 reserved_3[0x20];
4195
4196 struct mlx5_ifc_rmp_bitmask_bits bitmask;
4197
4198 u8 reserved_4[0x40];
4199
4200 struct mlx5_ifc_rmpc_bits ctx;
4201};
4202
4203struct mlx5_ifc_modify_nic_vport_context_out_bits {
4204 u8 status[0x8];
4205 u8 reserved_0[0x18];
4206
4207 u8 syndrome[0x20];
4208
4209 u8 reserved_1[0x40];
4210};
4211
4212struct mlx5_ifc_modify_nic_vport_field_select_bits {
4213 u8 reserved_0[0x1c];
4214 u8 permanent_address[0x1];
4215 u8 addresses_list[0x1];
4216 u8 roce_en[0x1];
4217 u8 reserved_1[0x1];
4218};
4219
4220struct mlx5_ifc_modify_nic_vport_context_in_bits {
4221 u8 opcode[0x10];
4222 u8 reserved_0[0x10];
4223
4224 u8 reserved_1[0x10];
4225 u8 op_mod[0x10];
4226
4227 u8 other_vport[0x1];
4228 u8 reserved_2[0xf];
4229 u8 vport_number[0x10];
4230
4231 struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
4232
4233 u8 reserved_3[0x780];
4234
4235 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
4236};
4237
4238struct mlx5_ifc_modify_hca_vport_context_out_bits {
4239 u8 status[0x8];
4240 u8 reserved_0[0x18];
4241
4242 u8 syndrome[0x20];
4243
4244 u8 reserved_1[0x40];
4245};
4246
4247struct mlx5_ifc_modify_hca_vport_context_in_bits {
4248 u8 opcode[0x10];
4249 u8 reserved_0[0x10];
4250
4251 u8 reserved_1[0x10];
4252 u8 op_mod[0x10];
4253
4254 u8 other_vport[0x1];
4255 u8 reserved_2[0xb];
4256 u8 port_num[0x4];
4257 u8 vport_number[0x10];
4258
4259 u8 reserved_3[0x20];
4260
4261 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
4262};
4263
4264struct mlx5_ifc_modify_cq_out_bits {
4265 u8 status[0x8];
4266 u8 reserved_0[0x18];
4267
4268 u8 syndrome[0x20];
4269
4270 u8 reserved_1[0x40];
4271};
4272
4273enum {
4274 MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0,
4275 MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1,
4276};
4277
4278struct mlx5_ifc_modify_cq_in_bits {
4279 u8 opcode[0x10];
4280 u8 reserved_0[0x10];
4281
4282 u8 reserved_1[0x10];
4283 u8 op_mod[0x10];
4284
4285 u8 reserved_2[0x8];
4286 u8 cqn[0x18];
4287
4288 union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
4289
4290 struct mlx5_ifc_cqc_bits cq_context;
4291
4292 u8 reserved_3[0x600];
4293
4294 u8 pas[0][0x40];
4295};
4296
4297struct mlx5_ifc_modify_cong_status_out_bits {
4298 u8 status[0x8];
4299 u8 reserved_0[0x18];
4300
4301 u8 syndrome[0x20];
4302
4303 u8 reserved_1[0x40];
4304};
4305
4306struct mlx5_ifc_modify_cong_status_in_bits {
4307 u8 opcode[0x10];
4308 u8 reserved_0[0x10];
4309
4310 u8 reserved_1[0x10];
4311 u8 op_mod[0x10];
4312
4313 u8 reserved_2[0x18];
4314 u8 priority[0x4];
4315 u8 cong_protocol[0x4];
4316
4317 u8 enable[0x1];
4318 u8 tag_enable[0x1];
4319 u8 reserved_3[0x1e];
4320};
4321
4322struct mlx5_ifc_modify_cong_params_out_bits {
4323 u8 status[0x8];
4324 u8 reserved_0[0x18];
4325
4326 u8 syndrome[0x20];
4327
4328 u8 reserved_1[0x40];
4329};
4330
4331struct mlx5_ifc_modify_cong_params_in_bits {
4332 u8 opcode[0x10];
4333 u8 reserved_0[0x10];
4334
4335 u8 reserved_1[0x10];
4336 u8 op_mod[0x10];
4337
4338 u8 reserved_2[0x1c];
4339 u8 cong_protocol[0x4];
4340
4341 union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
4342
4343 u8 reserved_3[0x80];
4344
4345 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
4346};
4347
4348struct mlx5_ifc_manage_pages_out_bits {
4349 u8 status[0x8];
4350 u8 reserved_0[0x18];
4351
4352 u8 syndrome[0x20];
4353
4354 u8 output_num_entries[0x20];
4355
4356 u8 reserved_1[0x20];
4357
4358 u8 pas[0][0x40];
4359};
4360
4361enum {
4362 MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0,
4363 MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1,
4364 MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2,
4365};
4366
4367struct mlx5_ifc_manage_pages_in_bits {
4368 u8 opcode[0x10];
4369 u8 reserved_0[0x10];
4370
4371 u8 reserved_1[0x10];
4372 u8 op_mod[0x10];
4373
4374 u8 reserved_2[0x10];
4375 u8 function_id[0x10];
4376
4377 u8 input_num_entries[0x20];
4378
4379 u8 pas[0][0x40];
4380};
4381
4382struct mlx5_ifc_mad_ifc_out_bits {
4383 u8 status[0x8];
4384 u8 reserved_0[0x18];
4385
4386 u8 syndrome[0x20];
4387
4388 u8 reserved_1[0x40];
4389
4390 u8 response_mad_packet[256][0x8];
4391};
4392
4393struct mlx5_ifc_mad_ifc_in_bits {
4394 u8 opcode[0x10];
4395 u8 reserved_0[0x10];
4396
4397 u8 reserved_1[0x10];
4398 u8 op_mod[0x10];
4399
4400 u8 remote_lid[0x10];
4401 u8 reserved_2[0x8];
4402 u8 port[0x8];
4403
4404 u8 reserved_3[0x20];
4405
4406 u8 mad[256][0x8];
4407};
4408
4409struct mlx5_ifc_init_hca_out_bits {
4410 u8 status[0x8];
4411 u8 reserved_0[0x18];
4412
4413 u8 syndrome[0x20];
4414
4415 u8 reserved_1[0x40];
4416};
4417
4418struct mlx5_ifc_init_hca_in_bits {
4419 u8 opcode[0x10];
4420 u8 reserved_0[0x10];
4421
4422 u8 reserved_1[0x10];
4423 u8 op_mod[0x10];
4424
4425 u8 reserved_2[0x40];
4426};
4427
4428struct mlx5_ifc_init2rtr_qp_out_bits {
4429 u8 status[0x8];
4430 u8 reserved_0[0x18];
4431
4432 u8 syndrome[0x20];
4433
4434 u8 reserved_1[0x40];
4435};
4436
4437struct mlx5_ifc_init2rtr_qp_in_bits {
4438 u8 opcode[0x10];
4439 u8 reserved_0[0x10];
4440
4441 u8 reserved_1[0x10];
4442 u8 op_mod[0x10];
4443
4444 u8 reserved_2[0x8];
4445 u8 qpn[0x18];
4446
4447 u8 reserved_3[0x20];
4448
4449 u8 opt_param_mask[0x20];
4450
4451 u8 reserved_4[0x20];
4452
4453 struct mlx5_ifc_qpc_bits qpc;
4454
4455 u8 reserved_5[0x80];
4456};
4457
4458struct mlx5_ifc_init2init_qp_out_bits {
4459 u8 status[0x8];
4460 u8 reserved_0[0x18];
4461
4462 u8 syndrome[0x20];
4463
4464 u8 reserved_1[0x40];
4465};
4466
4467struct mlx5_ifc_init2init_qp_in_bits {
4468 u8 opcode[0x10];
4469 u8 reserved_0[0x10];
4470
4471 u8 reserved_1[0x10];
4472 u8 op_mod[0x10];
4473
4474 u8 reserved_2[0x8];
4475 u8 qpn[0x18];
4476
4477 u8 reserved_3[0x20];
4478
4479 u8 opt_param_mask[0x20];
4480
4481 u8 reserved_4[0x20];
4482
4483 struct mlx5_ifc_qpc_bits qpc;
4484
4485 u8 reserved_5[0x80];
4486};
4487
4488struct mlx5_ifc_get_dropped_packet_log_out_bits {
4489 u8 status[0x8];
4490 u8 reserved_0[0x18];
4491
4492 u8 syndrome[0x20];
4493
4494 u8 reserved_1[0x40];
4495
4496 u8 packet_headers_log[128][0x8];
4497
4498 u8 packet_syndrome[64][0x8];
4499};
4500
4501struct mlx5_ifc_get_dropped_packet_log_in_bits {
4502 u8 opcode[0x10];
4503 u8 reserved_0[0x10];
4504
4505 u8 reserved_1[0x10];
4506 u8 op_mod[0x10];
4507
4508 u8 reserved_2[0x40];
4509};
4510
4511struct mlx5_ifc_gen_eqe_in_bits {
4512 u8 opcode[0x10];
4513 u8 reserved_0[0x10];
4514
4515 u8 reserved_1[0x10];
4516 u8 op_mod[0x10];
4517
4518 u8 reserved_2[0x18];
4519 u8 eq_number[0x8];
4520
4521 u8 reserved_3[0x20];
4522
4523 u8 eqe[64][0x8];
4524};
4525
4526struct mlx5_ifc_gen_eq_out_bits {
4527 u8 status[0x8];
4528 u8 reserved_0[0x18];
4529
4530 u8 syndrome[0x20];
4531
4532 u8 reserved_1[0x40];
4533};
4534
4535struct mlx5_ifc_enable_hca_out_bits {
4536 u8 status[0x8];
4537 u8 reserved_0[0x18];
4538
4539 u8 syndrome[0x20];
4540
4541 u8 reserved_1[0x20];
4542};
4543
4544struct mlx5_ifc_enable_hca_in_bits {
4545 u8 opcode[0x10];
4546 u8 reserved_0[0x10];
4547
4548 u8 reserved_1[0x10];
4549 u8 op_mod[0x10];
4550
4551 u8 reserved_2[0x10];
4552 u8 function_id[0x10];
4553
4554 u8 reserved_3[0x20];
4555};
4556
4557struct mlx5_ifc_drain_dct_out_bits {
4558 u8 status[0x8];
4559 u8 reserved_0[0x18];
4560
4561 u8 syndrome[0x20];
4562
4563 u8 reserved_1[0x40];
4564};
4565
4566struct mlx5_ifc_drain_dct_in_bits {
4567 u8 opcode[0x10];
4568 u8 reserved_0[0x10];
4569
4570 u8 reserved_1[0x10];
4571 u8 op_mod[0x10];
4572
4573 u8 reserved_2[0x8];
4574 u8 dctn[0x18];
4575
4576 u8 reserved_3[0x20];
4577};
4578
4579struct mlx5_ifc_disable_hca_out_bits {
4580 u8 status[0x8];
4581 u8 reserved_0[0x18];
4582
4583 u8 syndrome[0x20];
4584
4585 u8 reserved_1[0x20];
4586};
4587
4588struct mlx5_ifc_disable_hca_in_bits {
4589 u8 opcode[0x10];
4590 u8 reserved_0[0x10];
4591
4592 u8 reserved_1[0x10];
4593 u8 op_mod[0x10];
4594
4595 u8 reserved_2[0x10];
4596 u8 function_id[0x10];
4597
4598 u8 reserved_3[0x20];
4599};
4600
4601struct mlx5_ifc_detach_from_mcg_out_bits {
4602 u8 status[0x8];
4603 u8 reserved_0[0x18];
4604
4605 u8 syndrome[0x20];
4606
4607 u8 reserved_1[0x40];
4608};
4609
4610struct mlx5_ifc_detach_from_mcg_in_bits {
4611 u8 opcode[0x10];
4612 u8 reserved_0[0x10];
4613
4614 u8 reserved_1[0x10];
4615 u8 op_mod[0x10];
4616
4617 u8 reserved_2[0x8];
4618 u8 qpn[0x18];
4619
4620 u8 reserved_3[0x20];
4621
4622 u8 multicast_gid[16][0x8];
4623};
4624
4625struct mlx5_ifc_destroy_xrc_srq_out_bits {
4626 u8 status[0x8];
4627 u8 reserved_0[0x18];
4628
4629 u8 syndrome[0x20];
4630
4631 u8 reserved_1[0x40];
4632};
4633
4634struct mlx5_ifc_destroy_xrc_srq_in_bits {
4635 u8 opcode[0x10];
4636 u8 reserved_0[0x10];
4637
4638 u8 reserved_1[0x10];
4639 u8 op_mod[0x10];
4640
4641 u8 reserved_2[0x8];
4642 u8 xrc_srqn[0x18];
4643
4644 u8 reserved_3[0x20];
4645};
4646
4647struct mlx5_ifc_destroy_tis_out_bits {
4648 u8 status[0x8];
4649 u8 reserved_0[0x18];
4650
4651 u8 syndrome[0x20];
4652
4653 u8 reserved_1[0x40];
4654};
4655
4656struct mlx5_ifc_destroy_tis_in_bits {
4657 u8 opcode[0x10];
4658 u8 reserved_0[0x10];
4659
4660 u8 reserved_1[0x10];
4661 u8 op_mod[0x10];
4662
4663 u8 reserved_2[0x8];
4664 u8 tisn[0x18];
4665
4666 u8 reserved_3[0x20];
4667};
4668
4669struct mlx5_ifc_destroy_tir_out_bits {
4670 u8 status[0x8];
4671 u8 reserved_0[0x18];
4672
4673 u8 syndrome[0x20];
4674
4675 u8 reserved_1[0x40];
4676};
4677
4678struct mlx5_ifc_destroy_tir_in_bits {
4679 u8 opcode[0x10];
4680 u8 reserved_0[0x10];
4681
4682 u8 reserved_1[0x10];
4683 u8 op_mod[0x10];
4684
4685 u8 reserved_2[0x8];
4686 u8 tirn[0x18];
4687
4688 u8 reserved_3[0x20];
4689};
4690
4691struct mlx5_ifc_destroy_srq_out_bits {
4692 u8 status[0x8];
4693 u8 reserved_0[0x18];
4694
4695 u8 syndrome[0x20];
4696
4697 u8 reserved_1[0x40];
4698};
4699
4700struct mlx5_ifc_destroy_srq_in_bits {
4701 u8 opcode[0x10];
4702 u8 reserved_0[0x10];
4703
4704 u8 reserved_1[0x10];
4705 u8 op_mod[0x10];
4706
4707 u8 reserved_2[0x8];
4708 u8 srqn[0x18];
4709
4710 u8 reserved_3[0x20];
4711};
4712
4713struct mlx5_ifc_destroy_sq_out_bits {
4714 u8 status[0x8];
4715 u8 reserved_0[0x18];
4716
4717 u8 syndrome[0x20];
4718
4719 u8 reserved_1[0x40];
4720};
4721
4722struct mlx5_ifc_destroy_sq_in_bits {
4723 u8 opcode[0x10];
4724 u8 reserved_0[0x10];
4725
4726 u8 reserved_1[0x10];
4727 u8 op_mod[0x10];
4728
4729 u8 reserved_2[0x8];
4730 u8 sqn[0x18];
4731
4732 u8 reserved_3[0x20];
4733};
4734
4735struct mlx5_ifc_destroy_rqt_out_bits {
4736 u8 status[0x8];
4737 u8 reserved_0[0x18];
4738
4739 u8 syndrome[0x20];
4740
4741 u8 reserved_1[0x40];
4742};
4743
4744struct mlx5_ifc_destroy_rqt_in_bits {
4745 u8 opcode[0x10];
4746 u8 reserved_0[0x10];
4747
4748 u8 reserved_1[0x10];
4749 u8 op_mod[0x10];
4750
4751 u8 reserved_2[0x8];
4752 u8 rqtn[0x18];
4753
4754 u8 reserved_3[0x20];
4755};
4756
4757struct mlx5_ifc_destroy_rq_out_bits {
4758 u8 status[0x8];
4759 u8 reserved_0[0x18];
4760
4761 u8 syndrome[0x20];
4762
4763 u8 reserved_1[0x40];
4764};
4765
4766struct mlx5_ifc_destroy_rq_in_bits {
4767 u8 opcode[0x10];
4768 u8 reserved_0[0x10];
4769
4770 u8 reserved_1[0x10];
4771 u8 op_mod[0x10];
4772
4773 u8 reserved_2[0x8];
4774 u8 rqn[0x18];
4775
4776 u8 reserved_3[0x20];
4777};
4778
4779struct mlx5_ifc_destroy_rmp_out_bits {
4780 u8 status[0x8];
4781 u8 reserved_0[0x18];
4782
4783 u8 syndrome[0x20];
4784
4785 u8 reserved_1[0x40];
4786};
4787
4788struct mlx5_ifc_destroy_rmp_in_bits {
4789 u8 opcode[0x10];
4790 u8 reserved_0[0x10];
4791
4792 u8 reserved_1[0x10];
4793 u8 op_mod[0x10];
4794
4795 u8 reserved_2[0x8];
4796 u8 rmpn[0x18];
4797
4798 u8 reserved_3[0x20];
4799};
4800
4801struct mlx5_ifc_destroy_qp_out_bits {
4802 u8 status[0x8];
4803 u8 reserved_0[0x18];
4804
4805 u8 syndrome[0x20];
4806
4807 u8 reserved_1[0x40];
4808};
4809
4810struct mlx5_ifc_destroy_qp_in_bits {
4811 u8 opcode[0x10];
4812 u8 reserved_0[0x10];
4813
4814 u8 reserved_1[0x10];
4815 u8 op_mod[0x10];
4816
4817 u8 reserved_2[0x8];
4818 u8 qpn[0x18];
4819
4820 u8 reserved_3[0x20];
4821};
4822
4823struct mlx5_ifc_destroy_psv_out_bits {
4824 u8 status[0x8];
4825 u8 reserved_0[0x18];
4826
4827 u8 syndrome[0x20];
4828
4829 u8 reserved_1[0x40];
4830};
4831
4832struct mlx5_ifc_destroy_psv_in_bits {
4833 u8 opcode[0x10];
4834 u8 reserved_0[0x10];
4835
4836 u8 reserved_1[0x10];
4837 u8 op_mod[0x10];
4838
4839 u8 reserved_2[0x8];
4840 u8 psvn[0x18];
4841
4842 u8 reserved_3[0x20];
4843};
4844
4845struct mlx5_ifc_destroy_mkey_out_bits {
4846 u8 status[0x8];
4847 u8 reserved_0[0x18];
4848
4849 u8 syndrome[0x20];
4850
4851 u8 reserved_1[0x40];
4852};
4853
4854struct mlx5_ifc_destroy_mkey_in_bits {
4855 u8 opcode[0x10];
4856 u8 reserved_0[0x10];
4857
4858 u8 reserved_1[0x10];
4859 u8 op_mod[0x10];
4860
4861 u8 reserved_2[0x8];
4862 u8 mkey_index[0x18];
4863
4864 u8 reserved_3[0x20];
4865};
4866
4867struct mlx5_ifc_destroy_flow_table_out_bits {
4868 u8 status[0x8];
4869 u8 reserved_0[0x18];
4870
4871 u8 syndrome[0x20];
4872
4873 u8 reserved_1[0x40];
4874};
4875
4876struct mlx5_ifc_destroy_flow_table_in_bits {
4877 u8 opcode[0x10];
4878 u8 reserved_0[0x10];
4879
4880 u8 reserved_1[0x10];
4881 u8 op_mod[0x10];
4882
4883 u8 reserved_2[0x40];
4884
4885 u8 table_type[0x8];
4886 u8 reserved_3[0x18];
4887
4888 u8 reserved_4[0x8];
4889 u8 table_id[0x18];
4890
4891 u8 reserved_5[0x140];
4892};
4893
4894struct mlx5_ifc_destroy_flow_group_out_bits {
4895 u8 status[0x8];
4896 u8 reserved_0[0x18];
4897
4898 u8 syndrome[0x20];
4899
4900 u8 reserved_1[0x40];
4901};
4902
4903struct mlx5_ifc_destroy_flow_group_in_bits {
4904 u8 opcode[0x10];
4905 u8 reserved_0[0x10];
4906
4907 u8 reserved_1[0x10];
4908 u8 op_mod[0x10];
4909
4910 u8 reserved_2[0x40];
4911
4912 u8 table_type[0x8];
4913 u8 reserved_3[0x18];
4914
4915 u8 reserved_4[0x8];
4916 u8 table_id[0x18];
4917
4918 u8 group_id[0x20];
4919
4920 u8 reserved_5[0x120];
4921};
4922
4923struct mlx5_ifc_destroy_eq_out_bits {
4924 u8 status[0x8];
4925 u8 reserved_0[0x18];
4926
4927 u8 syndrome[0x20];
4928
4929 u8 reserved_1[0x40];
4930};
4931
4932struct mlx5_ifc_destroy_eq_in_bits {
4933 u8 opcode[0x10];
4934 u8 reserved_0[0x10];
4935
4936 u8 reserved_1[0x10];
4937 u8 op_mod[0x10];
4938
4939 u8 reserved_2[0x18];
4940 u8 eq_number[0x8];
4941
4942 u8 reserved_3[0x20];
4943};
4944
4945struct mlx5_ifc_destroy_dct_out_bits {
4946 u8 status[0x8];
4947 u8 reserved_0[0x18];
4948
4949 u8 syndrome[0x20];
4950
4951 u8 reserved_1[0x40];
4952};
4953
4954struct mlx5_ifc_destroy_dct_in_bits {
4955 u8 opcode[0x10];
4956 u8 reserved_0[0x10];
4957
4958 u8 reserved_1[0x10];
4959 u8 op_mod[0x10];
4960
4961 u8 reserved_2[0x8];
4962 u8 dctn[0x18];
4963
4964 u8 reserved_3[0x20];
4965};
4966
4967struct mlx5_ifc_destroy_cq_out_bits {
4968 u8 status[0x8];
4969 u8 reserved_0[0x18];
4970
4971 u8 syndrome[0x20];
4972
4973 u8 reserved_1[0x40];
4974};
4975
4976struct mlx5_ifc_destroy_cq_in_bits {
4977 u8 opcode[0x10];
4978 u8 reserved_0[0x10];
4979
4980 u8 reserved_1[0x10];
4981 u8 op_mod[0x10];
4982
4983 u8 reserved_2[0x8];
4984 u8 cqn[0x18];
4985
4986 u8 reserved_3[0x20];
4987};
4988
4989struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
4990 u8 status[0x8];
4991 u8 reserved_0[0x18];
4992
4993 u8 syndrome[0x20];
4994
4995 u8 reserved_1[0x40];
4996};
4997
4998struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
4999 u8 opcode[0x10];
5000 u8 reserved_0[0x10];
5001
5002 u8 reserved_1[0x10];
5003 u8 op_mod[0x10];
5004
5005 u8 reserved_2[0x20];
5006
5007 u8 reserved_3[0x10];
5008 u8 vxlan_udp_port[0x10];
5009};
5010
5011struct mlx5_ifc_delete_l2_table_entry_out_bits {
5012 u8 status[0x8];
5013 u8 reserved_0[0x18];
5014
5015 u8 syndrome[0x20];
5016
5017 u8 reserved_1[0x40];
5018};
5019
5020struct mlx5_ifc_delete_l2_table_entry_in_bits {
5021 u8 opcode[0x10];
5022 u8 reserved_0[0x10];
5023
5024 u8 reserved_1[0x10];
5025 u8 op_mod[0x10];
5026
5027 u8 reserved_2[0x60];
5028
5029 u8 reserved_3[0x8];
5030 u8 table_index[0x18];
5031
5032 u8 reserved_4[0x140];
5033};
5034
5035struct mlx5_ifc_delete_fte_out_bits {
5036 u8 status[0x8];
5037 u8 reserved_0[0x18];
5038
5039 u8 syndrome[0x20];
5040
5041 u8 reserved_1[0x40];
5042};
5043
5044struct mlx5_ifc_delete_fte_in_bits {
5045 u8 opcode[0x10];
5046 u8 reserved_0[0x10];
5047
5048 u8 reserved_1[0x10];
5049 u8 op_mod[0x10];
5050
5051 u8 reserved_2[0x40];
5052
5053 u8 table_type[0x8];
5054 u8 reserved_3[0x18];
5055
5056 u8 reserved_4[0x8];
5057 u8 table_id[0x18];
5058
5059 u8 reserved_5[0x40];
5060
5061 u8 flow_index[0x20];
5062
5063 u8 reserved_6[0xe0];
5064};
5065
5066struct mlx5_ifc_dealloc_xrcd_out_bits {
5067 u8 status[0x8];
5068 u8 reserved_0[0x18];
5069
5070 u8 syndrome[0x20];
5071
5072 u8 reserved_1[0x40];
5073};
5074
5075struct mlx5_ifc_dealloc_xrcd_in_bits {
5076 u8 opcode[0x10];
5077 u8 reserved_0[0x10];
5078
5079 u8 reserved_1[0x10];
5080 u8 op_mod[0x10];
5081
5082 u8 reserved_2[0x8];
5083 u8 xrcd[0x18];
5084
5085 u8 reserved_3[0x20];
5086};
5087
5088struct mlx5_ifc_dealloc_uar_out_bits {
5089 u8 status[0x8];
5090 u8 reserved_0[0x18];
5091
5092 u8 syndrome[0x20];
5093
5094 u8 reserved_1[0x40];
5095};
5096
5097struct mlx5_ifc_dealloc_uar_in_bits {
5098 u8 opcode[0x10];
5099 u8 reserved_0[0x10];
5100
5101 u8 reserved_1[0x10];
5102 u8 op_mod[0x10];
5103
5104 u8 reserved_2[0x8];
5105 u8 uar[0x18];
5106
5107 u8 reserved_3[0x20];
5108};
5109
5110struct mlx5_ifc_dealloc_transport_domain_out_bits {
5111 u8 status[0x8];
5112 u8 reserved_0[0x18];
5113
5114 u8 syndrome[0x20];
5115
5116 u8 reserved_1[0x40];
5117};
5118
5119struct mlx5_ifc_dealloc_transport_domain_in_bits {
5120 u8 opcode[0x10];
5121 u8 reserved_0[0x10];
5122
5123 u8 reserved_1[0x10];
5124 u8 op_mod[0x10];
5125
5126 u8 reserved_2[0x8];
5127 u8 transport_domain[0x18];
5128
5129 u8 reserved_3[0x20];
5130};
5131
5132struct mlx5_ifc_dealloc_q_counter_out_bits {
5133 u8 status[0x8];
5134 u8 reserved_0[0x18];
5135
5136 u8 syndrome[0x20];
5137
5138 u8 reserved_1[0x40];
5139};
5140
5141struct mlx5_ifc_dealloc_q_counter_in_bits {
5142 u8 opcode[0x10];
5143 u8 reserved_0[0x10];
5144
5145 u8 reserved_1[0x10];
5146 u8 op_mod[0x10];
5147
5148 u8 reserved_2[0x18];
5149 u8 counter_set_id[0x8];
5150
5151 u8 reserved_3[0x20];
5152};
5153
5154struct mlx5_ifc_dealloc_pd_out_bits {
5155 u8 status[0x8];
5156 u8 reserved_0[0x18];
5157
5158 u8 syndrome[0x20];
5159
5160 u8 reserved_1[0x40];
5161};
5162
5163struct mlx5_ifc_dealloc_pd_in_bits {
5164 u8 opcode[0x10];
5165 u8 reserved_0[0x10];
5166
5167 u8 reserved_1[0x10];
5168 u8 op_mod[0x10];
5169
5170 u8 reserved_2[0x8];
5171 u8 pd[0x18];
5172
5173 u8 reserved_3[0x20];
5174};
5175
5176struct mlx5_ifc_create_xrc_srq_out_bits {
5177 u8 status[0x8];
5178 u8 reserved_0[0x18];
5179
5180 u8 syndrome[0x20];
5181
5182 u8 reserved_1[0x8];
5183 u8 xrc_srqn[0x18];
5184
5185 u8 reserved_2[0x20];
5186};
5187
5188struct mlx5_ifc_create_xrc_srq_in_bits {
5189 u8 opcode[0x10];
5190 u8 reserved_0[0x10];
5191
5192 u8 reserved_1[0x10];
5193 u8 op_mod[0x10];
5194
5195 u8 reserved_2[0x40];
5196
5197 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
5198
5199 u8 reserved_3[0x600];
5200
5201 u8 pas[0][0x40];
5202};
5203
5204struct mlx5_ifc_create_tis_out_bits {
5205 u8 status[0x8];
5206 u8 reserved_0[0x18];
5207
5208 u8 syndrome[0x20];
5209
5210 u8 reserved_1[0x8];
5211 u8 tisn[0x18];
5212
5213 u8 reserved_2[0x20];
5214};
5215
5216struct mlx5_ifc_create_tis_in_bits {
5217 u8 opcode[0x10];
5218 u8 reserved_0[0x10];
5219
5220 u8 reserved_1[0x10];
5221 u8 op_mod[0x10];
5222
5223 u8 reserved_2[0xc0];
5224
5225 struct mlx5_ifc_tisc_bits ctx;
5226};
5227
5228struct mlx5_ifc_create_tir_out_bits {
5229 u8 status[0x8];
5230 u8 reserved_0[0x18];
5231
5232 u8 syndrome[0x20];
5233
5234 u8 reserved_1[0x8];
5235 u8 tirn[0x18];
5236
5237 u8 reserved_2[0x20];
5238};
5239
5240struct mlx5_ifc_create_tir_in_bits {
5241 u8 opcode[0x10];
5242 u8 reserved_0[0x10];
5243
5244 u8 reserved_1[0x10];
5245 u8 op_mod[0x10];
5246
5247 u8 reserved_2[0xc0];
5248
5249 struct mlx5_ifc_tirc_bits ctx;
5250};
5251
5252struct mlx5_ifc_create_srq_out_bits {
5253 u8 status[0x8];
5254 u8 reserved_0[0x18];
5255
5256 u8 syndrome[0x20];
5257
5258 u8 reserved_1[0x8];
5259 u8 srqn[0x18];
5260
5261 u8 reserved_2[0x20];
5262};
5263
5264struct mlx5_ifc_create_srq_in_bits {
5265 u8 opcode[0x10];
5266 u8 reserved_0[0x10];
5267
5268 u8 reserved_1[0x10];
5269 u8 op_mod[0x10];
5270
5271 u8 reserved_2[0x40];
5272
5273 struct mlx5_ifc_srqc_bits srq_context_entry;
5274
5275 u8 reserved_3[0x600];
5276
5277 u8 pas[0][0x40];
5278};
5279
5280struct mlx5_ifc_create_sq_out_bits {
5281 u8 status[0x8];
5282 u8 reserved_0[0x18];
5283
5284 u8 syndrome[0x20];
5285
5286 u8 reserved_1[0x8];
5287 u8 sqn[0x18];
5288
5289 u8 reserved_2[0x20];
5290};
5291
5292struct mlx5_ifc_create_sq_in_bits {
5293 u8 opcode[0x10];
5294 u8 reserved_0[0x10];
5295
5296 u8 reserved_1[0x10];
5297 u8 op_mod[0x10];
5298
5299 u8 reserved_2[0xc0];
5300
5301 struct mlx5_ifc_sqc_bits ctx;
5302};
5303
5304struct mlx5_ifc_create_rqt_out_bits {
5305 u8 status[0x8];
5306 u8 reserved_0[0x18];
5307
5308 u8 syndrome[0x20];
5309
5310 u8 reserved_1[0x8];
5311 u8 rqtn[0x18];
5312
5313 u8 reserved_2[0x20];
5314};
5315
5316struct mlx5_ifc_create_rqt_in_bits {
5317 u8 opcode[0x10];
5318 u8 reserved_0[0x10];
5319
5320 u8 reserved_1[0x10];
5321 u8 op_mod[0x10];
5322
5323 u8 reserved_2[0xc0];
5324
5325 struct mlx5_ifc_rqtc_bits rqt_context;
5326};
5327
5328struct mlx5_ifc_create_rq_out_bits {
5329 u8 status[0x8];
5330 u8 reserved_0[0x18];
5331
5332 u8 syndrome[0x20];
5333
5334 u8 reserved_1[0x8];
5335 u8 rqn[0x18];
5336
5337 u8 reserved_2[0x20];
5338};
5339
5340struct mlx5_ifc_create_rq_in_bits {
5341 u8 opcode[0x10];
5342 u8 reserved_0[0x10];
5343
5344 u8 reserved_1[0x10];
5345 u8 op_mod[0x10];
5346
5347 u8 reserved_2[0xc0];
5348
5349 struct mlx5_ifc_rqc_bits ctx;
5350};
5351
5352struct mlx5_ifc_create_rmp_out_bits {
5353 u8 status[0x8];
5354 u8 reserved_0[0x18];
5355
5356 u8 syndrome[0x20];
5357
5358 u8 reserved_1[0x8];
5359 u8 rmpn[0x18];
5360
5361 u8 reserved_2[0x20];
5362};
5363
5364struct mlx5_ifc_create_rmp_in_bits {
5365 u8 opcode[0x10];
5366 u8 reserved_0[0x10];
5367
5368 u8 reserved_1[0x10];
5369 u8 op_mod[0x10];
5370
5371 u8 reserved_2[0xc0];
5372
5373 struct mlx5_ifc_rmpc_bits ctx;
5374};
5375
5376struct mlx5_ifc_create_qp_out_bits {
5377 u8 status[0x8];
5378 u8 reserved_0[0x18];
5379
5380 u8 syndrome[0x20];
5381
5382 u8 reserved_1[0x8];
5383 u8 qpn[0x18];
5384
5385 u8 reserved_2[0x20];
5386};
5387
5388struct mlx5_ifc_create_qp_in_bits {
5389 u8 opcode[0x10];
5390 u8 reserved_0[0x10];
5391
5392 u8 reserved_1[0x10];
5393 u8 op_mod[0x10];
5394
5395 u8 reserved_2[0x40];
5396
5397 u8 opt_param_mask[0x20];
5398
5399 u8 reserved_3[0x20];
5400
5401 struct mlx5_ifc_qpc_bits qpc;
5402
5403 u8 reserved_4[0x80];
5404
5405 u8 pas[0][0x40];
5406};
5407
5408struct mlx5_ifc_create_psv_out_bits {
5409 u8 status[0x8];
5410 u8 reserved_0[0x18];
5411
5412 u8 syndrome[0x20];
5413
5414 u8 reserved_1[0x40];
5415
5416 u8 reserved_2[0x8];
5417 u8 psv0_index[0x18];
5418
5419 u8 reserved_3[0x8];
5420 u8 psv1_index[0x18];
5421
5422 u8 reserved_4[0x8];
5423 u8 psv2_index[0x18];
5424
5425 u8 reserved_5[0x8];
5426 u8 psv3_index[0x18];
5427};
5428
5429struct mlx5_ifc_create_psv_in_bits {
5430 u8 opcode[0x10];
5431 u8 reserved_0[0x10];
5432
5433 u8 reserved_1[0x10];
5434 u8 op_mod[0x10];
5435
5436 u8 num_psv[0x4];
5437 u8 reserved_2[0x4];
5438 u8 pd[0x18];
5439
5440 u8 reserved_3[0x20];
5441};
5442
5443struct mlx5_ifc_create_mkey_out_bits {
5444 u8 status[0x8];
5445 u8 reserved_0[0x18];
5446
5447 u8 syndrome[0x20];
5448
5449 u8 reserved_1[0x8];
5450 u8 mkey_index[0x18];
5451
5452 u8 reserved_2[0x20];
5453};
5454
5455struct mlx5_ifc_create_mkey_in_bits {
5456 u8 opcode[0x10];
5457 u8 reserved_0[0x10];
5458
5459 u8 reserved_1[0x10];
5460 u8 op_mod[0x10];
5461
5462 u8 reserved_2[0x20];
5463
5464 u8 pg_access[0x1];
5465 u8 reserved_3[0x1f];
5466
5467 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
5468
5469 u8 reserved_4[0x80];
5470
5471 u8 translations_octword_actual_size[0x20];
5472
5473 u8 reserved_5[0x560];
5474
5475 u8 klm_pas_mtt[0][0x20];
5476};
5477
5478struct mlx5_ifc_create_flow_table_out_bits {
5479 u8 status[0x8];
5480 u8 reserved_0[0x18];
5481
5482 u8 syndrome[0x20];
5483
5484 u8 reserved_1[0x8];
5485 u8 table_id[0x18];
5486
5487 u8 reserved_2[0x20];
5488};
5489
5490struct mlx5_ifc_create_flow_table_in_bits {
5491 u8 opcode[0x10];
5492 u8 reserved_0[0x10];
5493
5494 u8 reserved_1[0x10];
5495 u8 op_mod[0x10];
5496
5497 u8 reserved_2[0x40];
5498
5499 u8 table_type[0x8];
5500 u8 reserved_3[0x18];
5501
5502 u8 reserved_4[0x20];
5503
5504 u8 reserved_5[0x8];
5505 u8 level[0x8];
5506 u8 reserved_6[0x8];
5507 u8 log_size[0x8];
5508
5509 u8 reserved_7[0x120];
5510};
5511
5512struct mlx5_ifc_create_flow_group_out_bits {
5513 u8 status[0x8];
5514 u8 reserved_0[0x18];
5515
5516 u8 syndrome[0x20];
5517
5518 u8 reserved_1[0x8];
5519 u8 group_id[0x18];
5520
5521 u8 reserved_2[0x20];
5522};
5523
5524enum {
5525 MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
5526 MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
5527 MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
5528};
5529
5530struct mlx5_ifc_create_flow_group_in_bits {
5531 u8 opcode[0x10];
5532 u8 reserved_0[0x10];
5533
5534 u8 reserved_1[0x10];
5535 u8 op_mod[0x10];
5536
5537 u8 reserved_2[0x40];
5538
5539 u8 table_type[0x8];
5540 u8 reserved_3[0x18];
5541
5542 u8 reserved_4[0x8];
5543 u8 table_id[0x18];
5544
5545 u8 reserved_5[0x20];
5546
5547 u8 start_flow_index[0x20];
5548
5549 u8 reserved_6[0x20];
5550
5551 u8 end_flow_index[0x20];
5552
5553 u8 reserved_7[0xa0];
5554
5555 u8 reserved_8[0x18];
5556 u8 match_criteria_enable[0x8];
5557
5558 struct mlx5_ifc_fte_match_param_bits match_criteria;
5559
5560 u8 reserved_9[0xe00];
5561};
5562
5563struct mlx5_ifc_create_eq_out_bits {
5564 u8 status[0x8];
5565 u8 reserved_0[0x18];
5566
5567 u8 syndrome[0x20];
5568
5569 u8 reserved_1[0x18];
5570 u8 eq_number[0x8];
5571
5572 u8 reserved_2[0x20];
5573};
5574
5575struct mlx5_ifc_create_eq_in_bits {
5576 u8 opcode[0x10];
5577 u8 reserved_0[0x10];
5578
5579 u8 reserved_1[0x10];
5580 u8 op_mod[0x10];
5581
5582 u8 reserved_2[0x40];
5583
5584 struct mlx5_ifc_eqc_bits eq_context_entry;
5585
5586 u8 reserved_3[0x40];
5587
5588 u8 event_bitmask[0x40];
5589
5590 u8 reserved_4[0x580];
5591
5592 u8 pas[0][0x40];
5593};
5594
5595struct mlx5_ifc_create_dct_out_bits {
5596 u8 status[0x8];
5597 u8 reserved_0[0x18];
5598
5599 u8 syndrome[0x20];
5600
5601 u8 reserved_1[0x8];
5602 u8 dctn[0x18];
5603
5604 u8 reserved_2[0x20];
5605};
5606
5607struct mlx5_ifc_create_dct_in_bits {
5608 u8 opcode[0x10];
5609 u8 reserved_0[0x10];
5610
5611 u8 reserved_1[0x10];
5612 u8 op_mod[0x10];
5613
5614 u8 reserved_2[0x40];
5615
5616 struct mlx5_ifc_dctc_bits dct_context_entry;
5617
5618 u8 reserved_3[0x180];
5619};
5620
5621struct mlx5_ifc_create_cq_out_bits {
5622 u8 status[0x8];
5623 u8 reserved_0[0x18];
5624
5625 u8 syndrome[0x20];
5626
5627 u8 reserved_1[0x8];
5628 u8 cqn[0x18];
5629
5630 u8 reserved_2[0x20];
5631};
5632
5633struct mlx5_ifc_create_cq_in_bits {
5634 u8 opcode[0x10];
5635 u8 reserved_0[0x10];
5636
5637 u8 reserved_1[0x10];
5638 u8 op_mod[0x10];
5639
5640 u8 reserved_2[0x40];
5641
5642 struct mlx5_ifc_cqc_bits cq_context;
5643
5644 u8 reserved_3[0x600];
5645
5646 u8 pas[0][0x40];
5647};
5648
5649struct mlx5_ifc_config_int_moderation_out_bits {
5650 u8 status[0x8];
5651 u8 reserved_0[0x18];
5652
5653 u8 syndrome[0x20];
5654
5655 u8 reserved_1[0x4];
5656 u8 min_delay[0xc];
5657 u8 int_vector[0x10];
5658
5659 u8 reserved_2[0x20];
5660};
5661
5662enum {
5663 MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0,
5664 MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1,
5665};
5666
5667struct mlx5_ifc_config_int_moderation_in_bits {
5668 u8 opcode[0x10];
5669 u8 reserved_0[0x10];
5670
5671 u8 reserved_1[0x10];
5672 u8 op_mod[0x10];
5673
5674 u8 reserved_2[0x4];
5675 u8 min_delay[0xc];
5676 u8 int_vector[0x10];
5677
5678 u8 reserved_3[0x20];
5679};
5680
5681struct mlx5_ifc_attach_to_mcg_out_bits {
5682 u8 status[0x8];
5683 u8 reserved_0[0x18];
5684
5685 u8 syndrome[0x20];
5686
5687 u8 reserved_1[0x40];
5688};
5689
5690struct mlx5_ifc_attach_to_mcg_in_bits {
5691 u8 opcode[0x10];
5692 u8 reserved_0[0x10];
5693
5694 u8 reserved_1[0x10];
5695 u8 op_mod[0x10];
5696
5697 u8 reserved_2[0x8];
5698 u8 qpn[0x18];
5699
5700 u8 reserved_3[0x20];
5701
5702 u8 multicast_gid[16][0x8];
5703};
5704
5705struct mlx5_ifc_arm_xrc_srq_out_bits {
5706 u8 status[0x8];
5707 u8 reserved_0[0x18];
5708
5709 u8 syndrome[0x20];
5710
5711 u8 reserved_1[0x40];
5712};
5713
5714enum {
5715 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1,
5716};
5717
5718struct mlx5_ifc_arm_xrc_srq_in_bits {
5719 u8 opcode[0x10];
5720 u8 reserved_0[0x10];
5721
5722 u8 reserved_1[0x10];
5723 u8 op_mod[0x10];
5724
5725 u8 reserved_2[0x8];
5726 u8 xrc_srqn[0x18];
5727
5728 u8 reserved_3[0x10];
5729 u8 lwm[0x10];
5730};
5731
5732struct mlx5_ifc_arm_rq_out_bits {
5733 u8 status[0x8];
5734 u8 reserved_0[0x18];
5735
5736 u8 syndrome[0x20];
5737
5738 u8 reserved_1[0x40];
5739};
5740
5741enum {
5742 MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1,
5743};
5744
5745struct mlx5_ifc_arm_rq_in_bits {
5746 u8 opcode[0x10];
5747 u8 reserved_0[0x10];
5748
5749 u8 reserved_1[0x10];
5750 u8 op_mod[0x10];
5751
5752 u8 reserved_2[0x8];
5753 u8 srq_number[0x18];
5754
5755 u8 reserved_3[0x10];
5756 u8 lwm[0x10];
5757};
5758
5759struct mlx5_ifc_arm_dct_out_bits {
5760 u8 status[0x8];
5761 u8 reserved_0[0x18];
5762
5763 u8 syndrome[0x20];
5764
5765 u8 reserved_1[0x40];
5766};
5767
5768struct mlx5_ifc_arm_dct_in_bits {
5769 u8 opcode[0x10];
5770 u8 reserved_0[0x10];
5771
5772 u8 reserved_1[0x10];
5773 u8 op_mod[0x10];
5774
5775 u8 reserved_2[0x8];
5776 u8 dct_number[0x18];
5777
5778 u8 reserved_3[0x20];
5779};
5780
5781struct mlx5_ifc_alloc_xrcd_out_bits {
5782 u8 status[0x8];
5783 u8 reserved_0[0x18];
5784
5785 u8 syndrome[0x20];
5786
5787 u8 reserved_1[0x8];
5788 u8 xrcd[0x18];
5789
5790 u8 reserved_2[0x20];
5791};
5792
5793struct mlx5_ifc_alloc_xrcd_in_bits {
5794 u8 opcode[0x10];
5795 u8 reserved_0[0x10];
5796
5797 u8 reserved_1[0x10];
5798 u8 op_mod[0x10];
5799
5800 u8 reserved_2[0x40];
5801};
5802
5803struct mlx5_ifc_alloc_uar_out_bits {
5804 u8 status[0x8];
5805 u8 reserved_0[0x18];
5806
5807 u8 syndrome[0x20];
5808
5809 u8 reserved_1[0x8];
5810 u8 uar[0x18];
5811
5812 u8 reserved_2[0x20];
5813};
5814
5815struct mlx5_ifc_alloc_uar_in_bits {
5816 u8 opcode[0x10];
5817 u8 reserved_0[0x10];
5818
5819 u8 reserved_1[0x10];
5820 u8 op_mod[0x10];
5821
5822 u8 reserved_2[0x40];
5823};
5824
5825struct mlx5_ifc_alloc_transport_domain_out_bits {
5826 u8 status[0x8];
5827 u8 reserved_0[0x18];
5828
5829 u8 syndrome[0x20];
5830
5831 u8 reserved_1[0x8];
5832 u8 transport_domain[0x18];
5833
5834 u8 reserved_2[0x20];
5835};
5836
5837struct mlx5_ifc_alloc_transport_domain_in_bits {
5838 u8 opcode[0x10];
5839 u8 reserved_0[0x10];
5840
5841 u8 reserved_1[0x10];
5842 u8 op_mod[0x10];
5843
5844 u8 reserved_2[0x40];
5845};
5846
5847struct mlx5_ifc_alloc_q_counter_out_bits {
5848 u8 status[0x8];
5849 u8 reserved_0[0x18];
5850
5851 u8 syndrome[0x20];
5852
5853 u8 reserved_1[0x18];
5854 u8 counter_set_id[0x8];
5855
5856 u8 reserved_2[0x20];
5857};
5858
5859struct mlx5_ifc_alloc_q_counter_in_bits {
5860 u8 opcode[0x10];
5861 u8 reserved_0[0x10];
5862
5863 u8 reserved_1[0x10];
5864 u8 op_mod[0x10];
5865
5866 u8 reserved_2[0x40];
5867};
5868
5869struct mlx5_ifc_alloc_pd_out_bits {
5870 u8 status[0x8];
5871 u8 reserved_0[0x18];
5872
5873 u8 syndrome[0x20];
5874
5875 u8 reserved_1[0x8];
5876 u8 pd[0x18];
5877
5878 u8 reserved_2[0x20];
5879};
5880
5881struct mlx5_ifc_alloc_pd_in_bits {
5882 u8 opcode[0x10];
5883 u8 reserved_0[0x10];
5884
5885 u8 reserved_1[0x10];
5886 u8 op_mod[0x10];
5887
5888 u8 reserved_2[0x40];
5889};
5890
5891struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
5892 u8 status[0x8];
5893 u8 reserved_0[0x18];
5894
5895 u8 syndrome[0x20];
5896
5897 u8 reserved_1[0x40];
5898};
5899
5900struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
5901 u8 opcode[0x10];
5902 u8 reserved_0[0x10];
5903
5904 u8 reserved_1[0x10];
5905 u8 op_mod[0x10];
5906
5907 u8 reserved_2[0x20];
5908
5909 u8 reserved_3[0x10];
5910 u8 vxlan_udp_port[0x10];
5911};
5912
5913struct mlx5_ifc_access_register_out_bits {
5914 u8 status[0x8];
5915 u8 reserved_0[0x18];
5916
5917 u8 syndrome[0x20];
5918
5919 u8 reserved_1[0x40];
5920
5921 u8 register_data[0][0x20];
5922};
5923
5924enum {
5925 MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0,
5926 MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1,
5927};
5928
5929struct mlx5_ifc_access_register_in_bits {
5930 u8 opcode[0x10];
5931 u8 reserved_0[0x10];
5932
5933 u8 reserved_1[0x10];
5934 u8 op_mod[0x10];
5935
5936 u8 reserved_2[0x10];
5937 u8 register_id[0x10];
5938
5939 u8 argument[0x20];
5940
5941 u8 register_data[0][0x20];
5942};
5943
5944struct mlx5_ifc_sltp_reg_bits {
5945 u8 status[0x4];
5946 u8 version[0x4];
5947 u8 local_port[0x8];
5948 u8 pnat[0x2];
5949 u8 reserved_0[0x2];
5950 u8 lane[0x4];
5951 u8 reserved_1[0x8];
5952
5953 u8 reserved_2[0x20];
5954
5955 u8 reserved_3[0x7];
5956 u8 polarity[0x1];
5957 u8 ob_tap0[0x8];
5958 u8 ob_tap1[0x8];
5959 u8 ob_tap2[0x8];
5960
5961 u8 reserved_4[0xc];
5962 u8 ob_preemp_mode[0x4];
5963 u8 ob_reg[0x8];
5964 u8 ob_bias[0x8];
5965
5966 u8 reserved_5[0x20];
5967};
5968
5969struct mlx5_ifc_slrg_reg_bits {
5970 u8 status[0x4];
5971 u8 version[0x4];
5972 u8 local_port[0x8];
5973 u8 pnat[0x2];
5974 u8 reserved_0[0x2];
5975 u8 lane[0x4];
5976 u8 reserved_1[0x8];
5977
5978 u8 time_to_link_up[0x10];
5979 u8 reserved_2[0xc];
5980 u8 grade_lane_speed[0x4];
5981
5982 u8 grade_version[0x8];
5983 u8 grade[0x18];
5984
5985 u8 reserved_3[0x4];
5986 u8 height_grade_type[0x4];
5987 u8 height_grade[0x18];
5988
5989 u8 height_dz[0x10];
5990 u8 height_dv[0x10];
5991
5992 u8 reserved_4[0x10];
5993 u8 height_sigma[0x10];
5994
5995 u8 reserved_5[0x20];
5996
5997 u8 reserved_6[0x4];
5998 u8 phase_grade_type[0x4];
5999 u8 phase_grade[0x18];
6000
6001 u8 reserved_7[0x8];
6002 u8 phase_eo_pos[0x8];
6003 u8 reserved_8[0x8];
6004 u8 phase_eo_neg[0x8];
6005
6006 u8 ffe_set_tested[0x10];
6007 u8 test_errors_per_lane[0x10];
6008};
6009
6010struct mlx5_ifc_pvlc_reg_bits {
6011 u8 reserved_0[0x8];
6012 u8 local_port[0x8];
6013 u8 reserved_1[0x10];
6014
6015 u8 reserved_2[0x1c];
6016 u8 vl_hw_cap[0x4];
6017
6018 u8 reserved_3[0x1c];
6019 u8 vl_admin[0x4];
6020
6021 u8 reserved_4[0x1c];
6022 u8 vl_operational[0x4];
6023};
6024
6025struct mlx5_ifc_pude_reg_bits {
6026 u8 swid[0x8];
6027 u8 local_port[0x8];
6028 u8 reserved_0[0x4];
6029 u8 admin_status[0x4];
6030 u8 reserved_1[0x4];
6031 u8 oper_status[0x4];
6032
6033 u8 reserved_2[0x60];
6034};
6035
6036struct mlx5_ifc_ptys_reg_bits {
6037 u8 reserved_0[0x8];
6038 u8 local_port[0x8];
6039 u8 reserved_1[0xd];
6040 u8 proto_mask[0x3];
6041
6042 u8 reserved_2[0x40];
6043
6044 u8 eth_proto_capability[0x20];
6045
6046 u8 ib_link_width_capability[0x10];
6047 u8 ib_proto_capability[0x10];
6048
6049 u8 reserved_3[0x20];
6050
6051 u8 eth_proto_admin[0x20];
6052
6053 u8 ib_link_width_admin[0x10];
6054 u8 ib_proto_admin[0x10];
6055
6056 u8 reserved_4[0x20];
6057
6058 u8 eth_proto_oper[0x20];
6059
6060 u8 ib_link_width_oper[0x10];
6061 u8 ib_proto_oper[0x10];
6062
6063 u8 reserved_5[0x20];
6064
6065 u8 eth_proto_lp_advertise[0x20];
6066
6067 u8 reserved_6[0x60];
6068};
6069
6070struct mlx5_ifc_ptas_reg_bits {
6071 u8 reserved_0[0x20];
6072
6073 u8 algorithm_options[0x10];
6074 u8 reserved_1[0x4];
6075 u8 repetitions_mode[0x4];
6076 u8 num_of_repetitions[0x8];
6077
6078 u8 grade_version[0x8];
6079 u8 height_grade_type[0x4];
6080 u8 phase_grade_type[0x4];
6081 u8 height_grade_weight[0x8];
6082 u8 phase_grade_weight[0x8];
6083
6084 u8 gisim_measure_bits[0x10];
6085 u8 adaptive_tap_measure_bits[0x10];
6086
6087 u8 ber_bath_high_error_threshold[0x10];
6088 u8 ber_bath_mid_error_threshold[0x10];
6089
6090 u8 ber_bath_low_error_threshold[0x10];
6091 u8 one_ratio_high_threshold[0x10];
6092
6093 u8 one_ratio_high_mid_threshold[0x10];
6094 u8 one_ratio_low_mid_threshold[0x10];
6095
6096 u8 one_ratio_low_threshold[0x10];
6097 u8 ndeo_error_threshold[0x10];
6098
6099 u8 mixer_offset_step_size[0x10];
6100 u8 reserved_2[0x8];
6101 u8 mix90_phase_for_voltage_bath[0x8];
6102
6103 u8 mixer_offset_start[0x10];
6104 u8 mixer_offset_end[0x10];
6105
6106 u8 reserved_3[0x15];
6107 u8 ber_test_time[0xb];
6108};
6109
6110struct mlx5_ifc_pspa_reg_bits {
6111 u8 swid[0x8];
6112 u8 local_port[0x8];
6113 u8 sub_port[0x8];
6114 u8 reserved_0[0x8];
6115
6116 u8 reserved_1[0x20];
6117};
6118
6119struct mlx5_ifc_pqdr_reg_bits {
6120 u8 reserved_0[0x8];
6121 u8 local_port[0x8];
6122 u8 reserved_1[0x5];
6123 u8 prio[0x3];
6124 u8 reserved_2[0x6];
6125 u8 mode[0x2];
6126
6127 u8 reserved_3[0x20];
6128
6129 u8 reserved_4[0x10];
6130 u8 min_threshold[0x10];
6131
6132 u8 reserved_5[0x10];
6133 u8 max_threshold[0x10];
6134
6135 u8 reserved_6[0x10];
6136 u8 mark_probability_denominator[0x10];
6137
6138 u8 reserved_7[0x60];
6139};
6140
6141struct mlx5_ifc_ppsc_reg_bits {
6142 u8 reserved_0[0x8];
6143 u8 local_port[0x8];
6144 u8 reserved_1[0x10];
6145
6146 u8 reserved_2[0x60];
6147
6148 u8 reserved_3[0x1c];
6149 u8 wrps_admin[0x4];
6150
6151 u8 reserved_4[0x1c];
6152 u8 wrps_status[0x4];
6153
6154 u8 reserved_5[0x8];
6155 u8 up_threshold[0x8];
6156 u8 reserved_6[0x8];
6157 u8 down_threshold[0x8];
6158
6159 u8 reserved_7[0x20];
6160
6161 u8 reserved_8[0x1c];
6162 u8 srps_admin[0x4];
6163
6164 u8 reserved_9[0x1c];
6165 u8 srps_status[0x4];
6166
6167 u8 reserved_10[0x40];
6168};
6169
6170struct mlx5_ifc_pplr_reg_bits {
6171 u8 reserved_0[0x8];
6172 u8 local_port[0x8];
6173 u8 reserved_1[0x10];
6174
6175 u8 reserved_2[0x8];
6176 u8 lb_cap[0x8];
6177 u8 reserved_3[0x8];
6178 u8 lb_en[0x8];
6179};
6180
6181struct mlx5_ifc_pplm_reg_bits {
6182 u8 reserved_0[0x8];
6183 u8 local_port[0x8];
6184 u8 reserved_1[0x10];
6185
6186 u8 reserved_2[0x20];
6187
6188 u8 port_profile_mode[0x8];
6189 u8 static_port_profile[0x8];
6190 u8 active_port_profile[0x8];
6191 u8 reserved_3[0x8];
6192
6193 u8 retransmission_active[0x8];
6194 u8 fec_mode_active[0x18];
6195
6196 u8 reserved_4[0x20];
6197};
6198
6199struct mlx5_ifc_ppcnt_reg_bits {
6200 u8 swid[0x8];
6201 u8 local_port[0x8];
6202 u8 pnat[0x2];
6203 u8 reserved_0[0x8];
6204 u8 grp[0x6];
6205
6206 u8 clr[0x1];
6207 u8 reserved_1[0x1c];
6208 u8 prio_tc[0x3];
6209
6210 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
6211};
6212
6213struct mlx5_ifc_ppad_reg_bits {
6214 u8 reserved_0[0x3];
6215 u8 single_mac[0x1];
6216 u8 reserved_1[0x4];
6217 u8 local_port[0x8];
6218 u8 mac_47_32[0x10];
6219
6220 u8 mac_31_0[0x20];
6221
6222 u8 reserved_2[0x40];
6223};
6224
6225struct mlx5_ifc_pmtu_reg_bits {
6226 u8 reserved_0[0x8];
6227 u8 local_port[0x8];
6228 u8 reserved_1[0x10];
6229
6230 u8 max_mtu[0x10];
6231 u8 reserved_2[0x10];
6232
6233 u8 admin_mtu[0x10];
6234 u8 reserved_3[0x10];
6235
6236 u8 oper_mtu[0x10];
6237 u8 reserved_4[0x10];
6238};
6239
6240struct mlx5_ifc_pmpr_reg_bits {
6241 u8 reserved_0[0x8];
6242 u8 module[0x8];
6243 u8 reserved_1[0x10];
6244
6245 u8 reserved_2[0x18];
6246 u8 attenuation_5g[0x8];
6247
6248 u8 reserved_3[0x18];
6249 u8 attenuation_7g[0x8];
6250
6251 u8 reserved_4[0x18];
6252 u8 attenuation_12g[0x8];
6253};
6254
6255struct mlx5_ifc_pmpe_reg_bits {
6256 u8 reserved_0[0x8];
6257 u8 module[0x8];
6258 u8 reserved_1[0xc];
6259 u8 module_status[0x4];
6260
6261 u8 reserved_2[0x60];
6262};
6263
6264struct mlx5_ifc_pmpc_reg_bits {
6265 u8 module_state_updated[32][0x8];
6266};
6267
6268struct mlx5_ifc_pmlpn_reg_bits {
6269 u8 reserved_0[0x4];
6270 u8 mlpn_status[0x4];
6271 u8 local_port[0x8];
6272 u8 reserved_1[0x10];
6273
6274 u8 e[0x1];
6275 u8 reserved_2[0x1f];
6276};
6277
6278struct mlx5_ifc_pmlp_reg_bits {
6279 u8 rxtx[0x1];
6280 u8 reserved_0[0x7];
6281 u8 local_port[0x8];
6282 u8 reserved_1[0x8];
6283 u8 width[0x8];
6284
6285 u8 lane0_module_mapping[0x20];
6286
6287 u8 lane1_module_mapping[0x20];
6288
6289 u8 lane2_module_mapping[0x20];
6290
6291 u8 lane3_module_mapping[0x20];
6292
6293 u8 reserved_2[0x160];
6294};
6295
6296struct mlx5_ifc_pmaos_reg_bits {
6297 u8 reserved_0[0x8];
6298 u8 module[0x8];
6299 u8 reserved_1[0x4];
6300 u8 admin_status[0x4];
6301 u8 reserved_2[0x4];
6302 u8 oper_status[0x4];
6303
6304 u8 ase[0x1];
6305 u8 ee[0x1];
6306 u8 reserved_3[0x1c];
6307 u8 e[0x2];
6308
6309 u8 reserved_4[0x40];
6310};
6311
6312struct mlx5_ifc_plpc_reg_bits {
6313 u8 reserved_0[0x4];
6314 u8 profile_id[0xc];
6315 u8 reserved_1[0x4];
6316 u8 proto_mask[0x4];
6317 u8 reserved_2[0x8];
6318
6319 u8 reserved_3[0x10];
6320 u8 lane_speed[0x10];
6321
6322 u8 reserved_4[0x17];
6323 u8 lpbf[0x1];
6324 u8 fec_mode_policy[0x8];
6325
6326 u8 retransmission_capability[0x8];
6327 u8 fec_mode_capability[0x18];
6328
6329 u8 retransmission_support_admin[0x8];
6330 u8 fec_mode_support_admin[0x18];
6331
6332 u8 retransmission_request_admin[0x8];
6333 u8 fec_mode_request_admin[0x18];
6334
6335 u8 reserved_5[0x80];
6336};
6337
6338struct mlx5_ifc_plib_reg_bits {
6339 u8 reserved_0[0x8];
6340 u8 local_port[0x8];
6341 u8 reserved_1[0x8];
6342 u8 ib_port[0x8];
6343
6344 u8 reserved_2[0x60];
6345};
6346
6347struct mlx5_ifc_plbf_reg_bits {
6348 u8 reserved_0[0x8];
6349 u8 local_port[0x8];
6350 u8 reserved_1[0xd];
6351 u8 lbf_mode[0x3];
6352
6353 u8 reserved_2[0x20];
6354};
6355
6356struct mlx5_ifc_pipg_reg_bits {
6357 u8 reserved_0[0x8];
6358 u8 local_port[0x8];
6359 u8 reserved_1[0x10];
6360
6361 u8 dic[0x1];
6362 u8 reserved_2[0x19];
6363 u8 ipg[0x4];
6364 u8 reserved_3[0x2];
6365};
6366
6367struct mlx5_ifc_pifr_reg_bits {
6368 u8 reserved_0[0x8];
6369 u8 local_port[0x8];
6370 u8 reserved_1[0x10];
6371
6372 u8 reserved_2[0xe0];
6373
6374 u8 port_filter[8][0x20];
6375
6376 u8 port_filter_update_en[8][0x20];
6377};
6378
6379struct mlx5_ifc_pfcc_reg_bits {
6380 u8 reserved_0[0x8];
6381 u8 local_port[0x8];
6382 u8 reserved_1[0x10];
6383
6384 u8 ppan[0x4];
6385 u8 reserved_2[0x4];
6386 u8 prio_mask_tx[0x8];
6387 u8 reserved_3[0x8];
6388 u8 prio_mask_rx[0x8];
6389
6390 u8 pptx[0x1];
6391 u8 aptx[0x1];
6392 u8 reserved_4[0x6];
6393 u8 pfctx[0x8];
6394 u8 reserved_5[0x10];
6395
6396 u8 pprx[0x1];
6397 u8 aprx[0x1];
6398 u8 reserved_6[0x6];
6399 u8 pfcrx[0x8];
6400 u8 reserved_7[0x10];
6401
6402 u8 reserved_8[0x80];
6403};
6404
6405struct mlx5_ifc_pelc_reg_bits {
6406 u8 op[0x4];
6407 u8 reserved_0[0x4];
6408 u8 local_port[0x8];
6409 u8 reserved_1[0x10];
6410
6411 u8 op_admin[0x8];
6412 u8 op_capability[0x8];
6413 u8 op_request[0x8];
6414 u8 op_active[0x8];
6415
6416 u8 admin[0x40];
6417
6418 u8 capability[0x40];
6419
6420 u8 request[0x40];
6421
6422 u8 active[0x40];
6423
6424 u8 reserved_2[0x80];
6425};
6426
6427struct mlx5_ifc_peir_reg_bits {
6428 u8 reserved_0[0x8];
6429 u8 local_port[0x8];
6430 u8 reserved_1[0x10];
6431
6432 u8 reserved_2[0xc];
6433 u8 error_count[0x4];
6434 u8 reserved_3[0x10];
6435
6436 u8 reserved_4[0xc];
6437 u8 lane[0x4];
6438 u8 reserved_5[0x8];
6439 u8 error_type[0x8];
6440};
6441
6442struct mlx5_ifc_pcap_reg_bits {
6443 u8 reserved_0[0x8];
6444 u8 local_port[0x8];
6445 u8 reserved_1[0x10];
6446
6447 u8 port_capability_mask[4][0x20];
6448};
6449
6450struct mlx5_ifc_paos_reg_bits {
6451 u8 swid[0x8];
6452 u8 local_port[0x8];
6453 u8 reserved_0[0x4];
6454 u8 admin_status[0x4];
6455 u8 reserved_1[0x4];
6456 u8 oper_status[0x4];
6457
6458 u8 ase[0x1];
6459 u8 ee[0x1];
6460 u8 reserved_2[0x1c];
6461 u8 e[0x2];
6462
6463 u8 reserved_3[0x40];
6464};
6465
6466struct mlx5_ifc_pamp_reg_bits {
6467 u8 reserved_0[0x8];
6468 u8 opamp_group[0x8];
6469 u8 reserved_1[0xc];
6470 u8 opamp_group_type[0x4];
6471
6472 u8 start_index[0x10];
6473 u8 reserved_2[0x4];
6474 u8 num_of_indices[0xc];
6475
6476 u8 index_data[18][0x10];
6477};
6478
6479struct mlx5_ifc_lane_2_module_mapping_bits {
6480 u8 reserved_0[0x6];
6481 u8 rx_lane[0x2];
6482 u8 reserved_1[0x6];
6483 u8 tx_lane[0x2];
6484 u8 reserved_2[0x8];
6485 u8 module[0x8];
6486};
6487
6488struct mlx5_ifc_bufferx_reg_bits {
6489 u8 reserved_0[0x6];
6490 u8 lossy[0x1];
6491 u8 epsb[0x1];
6492 u8 reserved_1[0xc];
6493 u8 size[0xc];
6494
6495 u8 xoff_threshold[0x10];
6496 u8 xon_threshold[0x10];
6497};
6498
6499struct mlx5_ifc_set_node_in_bits {
6500 u8 node_description[64][0x8];
6501};
6502
6503struct mlx5_ifc_register_power_settings_bits {
6504 u8 reserved_0[0x18];
6505 u8 power_settings_level[0x8];
6506
6507 u8 reserved_1[0x60];
6508};
6509
6510struct mlx5_ifc_register_host_endianness_bits {
6511 u8 he[0x1];
6512 u8 reserved_0[0x1f];
6513
6514 u8 reserved_1[0x60];
6515};
6516
6517struct mlx5_ifc_umr_pointer_desc_argument_bits {
6518 u8 reserved_0[0x20];
6519
6520 u8 mkey[0x20];
6521
6522 u8 addressh_63_32[0x20];
6523
6524 u8 addressl_31_0[0x20];
6525};
6526
6527struct mlx5_ifc_ud_adrs_vector_bits {
6528 u8 dc_key[0x40];
6529
6530 u8 ext[0x1];
6531 u8 reserved_0[0x7];
6532 u8 destination_qp_dct[0x18];
6533
6534 u8 static_rate[0x4];
6535 u8 sl_eth_prio[0x4];
6536 u8 fl[0x1];
6537 u8 mlid[0x7];
6538 u8 rlid_udp_sport[0x10];
6539
6540 u8 reserved_1[0x20];
6541
6542 u8 rmac_47_16[0x20];
6543
6544 u8 rmac_15_0[0x10];
6545 u8 tclass[0x8];
6546 u8 hop_limit[0x8];
6547
6548 u8 reserved_2[0x1];
6549 u8 grh[0x1];
6550 u8 reserved_3[0x2];
6551 u8 src_addr_index[0x8];
6552 u8 flow_label[0x14];
6553
6554 u8 rgid_rip[16][0x8];
6555};
6556
6557struct mlx5_ifc_pages_req_event_bits {
6558 u8 reserved_0[0x10];
6559 u8 function_id[0x10];
6560
6561 u8 num_pages[0x20];
6562
6563 u8 reserved_1[0xa0];
6564};
6565
6566struct mlx5_ifc_eqe_bits {
6567 u8 reserved_0[0x8];
6568 u8 event_type[0x8];
6569 u8 reserved_1[0x8];
6570 u8 event_sub_type[0x8];
6571
6572 u8 reserved_2[0xe0];
6573
6574 union mlx5_ifc_event_auto_bits event_data;
6575
6576 u8 reserved_3[0x10];
6577 u8 signature[0x8];
6578 u8 reserved_4[0x7];
6579 u8 owner[0x1];
6580};
6581
6582enum {
6583 MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7,
6584};
6585
6586struct mlx5_ifc_cmd_queue_entry_bits {
6587 u8 type[0x8];
6588 u8 reserved_0[0x18];
6589
6590 u8 input_length[0x20];
6591
6592 u8 input_mailbox_pointer_63_32[0x20];
6593
6594 u8 input_mailbox_pointer_31_9[0x17];
6595 u8 reserved_1[0x9];
6596
6597 u8 command_input_inline_data[16][0x8];
6598
6599 u8 command_output_inline_data[16][0x8];
6600
6601 u8 output_mailbox_pointer_63_32[0x20];
6602
6603 u8 output_mailbox_pointer_31_9[0x17];
6604 u8 reserved_2[0x9];
6605
6606 u8 output_length[0x20];
6607
6608 u8 token[0x8];
6609 u8 signature[0x8];
6610 u8 reserved_3[0x8];
6611 u8 status[0x7];
6612 u8 ownership[0x1];
6613};
6614
6615struct mlx5_ifc_cmd_out_bits {
6616 u8 status[0x8];
6617 u8 reserved_0[0x18];
6618
6619 u8 syndrome[0x20];
6620
6621 u8 command_output[0x20];
6622};
6623
6624struct mlx5_ifc_cmd_in_bits {
6625 u8 opcode[0x10];
6626 u8 reserved_0[0x10];
6627
6628 u8 reserved_1[0x10];
6629 u8 op_mod[0x10];
6630
6631 u8 command[0][0x20];
6632};
6633
6634struct mlx5_ifc_cmd_if_box_bits {
6635 u8 mailbox_data[512][0x8];
6636
6637 u8 reserved_0[0x180];
6638
6639 u8 next_pointer_63_32[0x20];
6640
6641 u8 next_pointer_31_10[0x16];
6642 u8 reserved_1[0xa];
6643
6644 u8 block_number[0x20];
6645
6646 u8 reserved_2[0x8];
6647 u8 token[0x8];
6648 u8 ctrl_signature[0x8];
6649 u8 signature[0x8];
6650};
6651
6652struct mlx5_ifc_mtt_bits {
6653 u8 ptag_63_32[0x20];
6654
6655 u8 ptag_31_8[0x18];
6656 u8 reserved_0[0x6];
6657 u8 wr_en[0x1];
6658 u8 rd_en[0x1];
6659};
6660
6661enum {
6662 MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
6663 MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
6664 MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
6665};
6666
6667enum {
6668 MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0,
6669 MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1,
6670 MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2,
6671};
6672
6673enum {
6674 MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1,
6675 MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7,
6676 MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8,
6677 MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9,
6678 MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa,
6679 MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb,
6680 MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc,
6681 MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd,
6682 MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
6683 MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
6684 MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
6685};
6686
6687struct mlx5_ifc_initial_seg_bits {
6688 u8 fw_rev_minor[0x10];
6689 u8 fw_rev_major[0x10];
6690
6691 u8 cmd_interface_rev[0x10];
6692 u8 fw_rev_subminor[0x10];
6693
6694 u8 reserved_0[0x40];
6695
6696 u8 cmdq_phy_addr_63_32[0x20];
6697
6698 u8 cmdq_phy_addr_31_12[0x14];
6699 u8 reserved_1[0x2];
6700 u8 nic_interface[0x2];
6701 u8 log_cmdq_size[0x4];
6702 u8 log_cmdq_stride[0x4];
6703
6704 u8 command_doorbell_vector[0x20];
6705
6706 u8 reserved_2[0xf00];
6707
6708 u8 initializing[0x1];
6709 u8 reserved_3[0x4];
6710 u8 nic_interface_supported[0x3];
6711 u8 reserved_4[0x18];
6712
6713 struct mlx5_ifc_health_buffer_bits health_buffer;
6714
6715 u8 no_dram_nic_offset[0x20];
6716
6717 u8 reserved_5[0x6e40];
6718
6719 u8 reserved_6[0x1f];
6720 u8 clear_int[0x1];
6721
6722 u8 health_syndrome[0x8];
6723 u8 health_counter[0x18];
6724
6725 u8 reserved_7[0x17fc0];
6726};
6727
6728union mlx5_ifc_ports_control_registers_document_bits {
6729 struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
6730 struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
6731 struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
6732 struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
6733 struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
6734 struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
6735 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
6736 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
6737 struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
6738 struct mlx5_ifc_pamp_reg_bits pamp_reg;
6739 struct mlx5_ifc_paos_reg_bits paos_reg;
6740 struct mlx5_ifc_pcap_reg_bits pcap_reg;
6741 struct mlx5_ifc_peir_reg_bits peir_reg;
6742 struct mlx5_ifc_pelc_reg_bits pelc_reg;
6743 struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
6744 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
6745 struct mlx5_ifc_pifr_reg_bits pifr_reg;
6746 struct mlx5_ifc_pipg_reg_bits pipg_reg;
6747 struct mlx5_ifc_plbf_reg_bits plbf_reg;
6748 struct mlx5_ifc_plib_reg_bits plib_reg;
6749 struct mlx5_ifc_plpc_reg_bits plpc_reg;
6750 struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
6751 struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
6752 struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
6753 struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
6754 struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
6755 struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
6756 struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
6757 struct mlx5_ifc_ppad_reg_bits ppad_reg;
6758 struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
6759 struct mlx5_ifc_pplm_reg_bits pplm_reg;
6760 struct mlx5_ifc_pplr_reg_bits pplr_reg;
6761 struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
6762 struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
6763 struct mlx5_ifc_pspa_reg_bits pspa_reg;
6764 struct mlx5_ifc_ptas_reg_bits ptas_reg;
6765 struct mlx5_ifc_ptys_reg_bits ptys_reg;
6766 struct mlx5_ifc_pude_reg_bits pude_reg;
6767 struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
6768 struct mlx5_ifc_slrg_reg_bits slrg_reg;
6769 struct mlx5_ifc_sltp_reg_bits sltp_reg;
6770 u8 reserved_0[0x60e0];
6771};
6772
6773union mlx5_ifc_debug_enhancements_document_bits {
6774 struct mlx5_ifc_health_buffer_bits health_buffer;
6775 u8 reserved_0[0x200];
6776};
6777
6778union mlx5_ifc_uplink_pci_interface_document_bits {
6779 struct mlx5_ifc_initial_seg_bits initial_seg;
6780 u8 reserved_0[0x20060];
347}; 6781};
348 6782
349#endif /* MLX5_IFC_H */ 6783#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 310b5f7fd6ae..f079fb1a31f7 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -134,13 +134,21 @@ enum {
134 134
135enum { 135enum {
136 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, 136 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
137 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
137 MLX5_WQE_CTRL_SOLICITED = 1 << 1, 138 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
138}; 139};
139 140
140enum { 141enum {
142 MLX5_SEND_WQE_DS = 16,
141 MLX5_SEND_WQE_BB = 64, 143 MLX5_SEND_WQE_BB = 64,
142}; 144};
143 145
146#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
147
148enum {
149 MLX5_SEND_WQE_MAX_WQEBBS = 16,
150};
151
144enum { 152enum {
145 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, 153 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
146 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, 154 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg {
200#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 208#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
201#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 209#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
202 210
211enum {
212 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
213 MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
214 MLX5_ETH_WQE_L3_CSUM = 1 << 6,
215 MLX5_ETH_WQE_L4_CSUM = 1 << 7,
216};
217
218struct mlx5_wqe_eth_seg {
219 u8 rsvd0[4];
220 u8 cs_flags;
221 u8 rsvd1;
222 __be16 mss;
223 __be32 rsvd2;
224 __be16 inline_hdr_sz;
225 u8 inline_hdr_start[2];
226};
227
203struct mlx5_wqe_xrc_seg { 228struct mlx5_wqe_xrc_seg {
204 __be32 xrc_srqn; 229 __be32 xrc_srqn;
205 u8 rsvd[12]; 230 u8 rsvd[12];
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
new file mode 100644
index 000000000000..967e0fd06e89
--- /dev/null
+++ b/include/linux/mlx5/vport.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_VPORT_H__
34#define __MLX5_VPORT_H__
35
36#include <linux/mlx5/driver.h>
37
38u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
39void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
40int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
41 u8 port_num, u16 vf_num, u16 gid_index,
42 union ib_gid *gid);
43int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
44 u8 port_num, u16 vf_num, u16 pkey_index,
45 u16 *pkey);
46int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
47 u8 other_vport, u8 port_num,
48 u16 vf_num,
49 struct mlx5_hca_vport_context *rep);
50int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
51 u64 *sys_image_guid);
52int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
53 u64 *node_guid);
54
55#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h
new file mode 100644
index 000000000000..4efc3f56e6df
--- /dev/null
+++ b/include/linux/mm-arch-hooks.h
@@ -0,0 +1,25 @@
1/*
2 * Generic mm no-op hooks.
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef _LINUX_MM_ARCH_HOOKS_H
12#define _LINUX_MM_ARCH_HOOKS_H
13
14#include <asm/mm-arch-hooks.h>
15
16#ifndef arch_remap
17static inline void arch_remap(struct mm_struct *mm,
18 unsigned long old_start, unsigned long old_end,
19 unsigned long new_start, unsigned long new_end)
20{
21}
22#define arch_remap arch_remap
23#endif
24
25#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0755b9fd03a7..99959a34f4f1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -27,6 +27,7 @@ struct anon_vma_chain;
27struct file_ra_state; 27struct file_ra_state;
28struct user_struct; 28struct user_struct;
29struct writeback_control; 29struct writeback_control;
30struct bdi_writeback;
30 31
31#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 32#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
32extern unsigned long max_mapnr; 33extern unsigned long max_mapnr;
@@ -499,7 +500,7 @@ static inline int page_count(struct page *page)
499 500
500static inline bool __compound_tail_refcounted(struct page *page) 501static inline bool __compound_tail_refcounted(struct page *page)
501{ 502{
502 return !PageSlab(page) && !PageHeadHuge(page); 503 return PageAnon(page) && !PageSlab(page) && !PageHeadHuge(page);
503} 504}
504 505
505/* 506/*
@@ -1211,10 +1212,13 @@ int __set_page_dirty_nobuffers(struct page *page);
1211int __set_page_dirty_no_writeback(struct page *page); 1212int __set_page_dirty_no_writeback(struct page *page);
1212int redirty_page_for_writepage(struct writeback_control *wbc, 1213int redirty_page_for_writepage(struct writeback_control *wbc,
1213 struct page *page); 1214 struct page *page);
1214void account_page_dirtied(struct page *page, struct address_space *mapping); 1215void account_page_dirtied(struct page *page, struct address_space *mapping,
1215void account_page_cleaned(struct page *page, struct address_space *mapping); 1216 struct mem_cgroup *memcg);
1217void account_page_cleaned(struct page *page, struct address_space *mapping,
1218 struct mem_cgroup *memcg, struct bdi_writeback *wb);
1216int set_page_dirty(struct page *page); 1219int set_page_dirty(struct page *page);
1217int set_page_dirty_lock(struct page *page); 1220int set_page_dirty_lock(struct page *page);
1221void cancel_dirty_page(struct page *page);
1218int clear_page_dirty_for_io(struct page *page); 1222int clear_page_dirty_for_io(struct page *page);
1219 1223
1220int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1224int get_cmdline(struct task_struct *task, char *buffer, int buflen);
@@ -2146,12 +2150,47 @@ enum mf_flags {
2146extern int memory_failure(unsigned long pfn, int trapno, int flags); 2150extern int memory_failure(unsigned long pfn, int trapno, int flags);
2147extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2151extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2148extern int unpoison_memory(unsigned long pfn); 2152extern int unpoison_memory(unsigned long pfn);
2153extern int get_hwpoison_page(struct page *page);
2149extern int sysctl_memory_failure_early_kill; 2154extern int sysctl_memory_failure_early_kill;
2150extern int sysctl_memory_failure_recovery; 2155extern int sysctl_memory_failure_recovery;
2151extern void shake_page(struct page *p, int access); 2156extern void shake_page(struct page *p, int access);
2152extern atomic_long_t num_poisoned_pages; 2157extern atomic_long_t num_poisoned_pages;
2153extern int soft_offline_page(struct page *page, int flags); 2158extern int soft_offline_page(struct page *page, int flags);
2154 2159
2160
2161/*
2162 * Error handlers for various types of pages.
2163 */
2164enum mf_result {
2165 MF_IGNORED, /* Error: cannot be handled */
2166 MF_FAILED, /* Error: handling failed */
2167 MF_DELAYED, /* Will be handled later */
2168 MF_RECOVERED, /* Successfully recovered */
2169};
2170
2171enum mf_action_page_type {
2172 MF_MSG_KERNEL,
2173 MF_MSG_KERNEL_HIGH_ORDER,
2174 MF_MSG_SLAB,
2175 MF_MSG_DIFFERENT_COMPOUND,
2176 MF_MSG_POISONED_HUGE,
2177 MF_MSG_HUGE,
2178 MF_MSG_FREE_HUGE,
2179 MF_MSG_UNMAP_FAILED,
2180 MF_MSG_DIRTY_SWAPCACHE,
2181 MF_MSG_CLEAN_SWAPCACHE,
2182 MF_MSG_DIRTY_MLOCKED_LRU,
2183 MF_MSG_CLEAN_MLOCKED_LRU,
2184 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2185 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2186 MF_MSG_DIRTY_LRU,
2187 MF_MSG_CLEAN_LRU,
2188 MF_MSG_TRUNCATED_LRU,
2189 MF_MSG_BUDDY,
2190 MF_MSG_BUDDY_2ND,
2191 MF_MSG_UNKNOWN,
2192};
2193
2155#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2194#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2156extern void clear_huge_page(struct page *page, 2195extern void clear_huge_page(struct page *page,
2157 unsigned long addr, 2196 unsigned long addr,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8d37e26a1007..0038ac7466fd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -226,6 +226,24 @@ struct page_frag {
226#endif 226#endif
227}; 227};
228 228
229#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
230#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
231
232struct page_frag_cache {
233 void * va;
234#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
235 __u16 offset;
236 __u16 size;
237#else
238 __u32 offset;
239#endif
240 /* we maintain a pagecount bias, so that we dont dirty cache line
241 * containing page->_count every time we allocate a fragment.
242 */
243 unsigned int pagecnt_bias;
244 bool pfmemalloc;
245};
246
229typedef unsigned long __nocast vm_flags_t; 247typedef unsigned long __nocast vm_flags_t;
230 248
231/* 249/*
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 19f0175c0afa..4d3776d25925 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -97,6 +97,7 @@ struct mmc_ext_csd {
97 u8 raw_erased_mem_count; /* 181 */ 97 u8 raw_erased_mem_count; /* 181 */
98 u8 raw_ext_csd_structure; /* 194 */ 98 u8 raw_ext_csd_structure; /* 194 */
99 u8 raw_card_type; /* 196 */ 99 u8 raw_card_type; /* 196 */
100 u8 raw_driver_strength; /* 197 */
100 u8 out_of_int_time; /* 198 */ 101 u8 out_of_int_time; /* 198 */
101 u8 raw_pwr_cl_52_195; /* 200 */ 102 u8 raw_pwr_cl_52_195; /* 200 */
102 u8 raw_pwr_cl_26_195; /* 201 */ 103 u8 raw_pwr_cl_26_195; /* 201 */
@@ -305,6 +306,7 @@ struct mmc_card {
305 306
306 unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */ 307 unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */
307 unsigned int mmc_avail_type; /* supported device type by both host and card */ 308 unsigned int mmc_avail_type; /* supported device type by both host and card */
309 unsigned int drive_strength; /* for UHS-I, HS200 or HS400 */
308 310
309 struct dentry *debugfs_root; 311 struct dentry *debugfs_root;
310 struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ 312 struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index de722d4e9d61..258daf914c6d 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -121,6 +121,7 @@ struct mmc_data {
121 struct mmc_request *mrq; /* associated request */ 121 struct mmc_request *mrq; /* associated request */
122 122
123 unsigned int sg_len; /* size of scatter list */ 123 unsigned int sg_len; /* size of scatter list */
124 int sg_count; /* mapped sg entries */
124 struct scatterlist *sg; /* I/O scatter list */ 125 struct scatterlist *sg; /* I/O scatter list */
125 s32 host_cookie; /* host private data */ 126 s32 host_cookie; /* host private data */
126}; 127};
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 12111993a317..5be97676f1fa 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -226,12 +226,6 @@ struct dw_mci_dma_ops {
226#define DW_MCI_QUIRK_HIGHSPEED BIT(2) 226#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
227/* Unreliable card detection */ 227/* Unreliable card detection */
228#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) 228#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
229/* No write protect */
230#define DW_MCI_QUIRK_NO_WRITE_PROTECT BIT(4)
231
232/* Slot level quirks */
233/* This slot has no write protect */
234#define DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT BIT(0)
235 229
236struct dma_pdata; 230struct dma_pdata;
237 231
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index b5bedaec6223..1369e54faeb7 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/leds.h> 13#include <linux/leds.h>
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/timer.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/device.h> 17#include <linux/device.h>
17#include <linux/fault-inject.h> 18#include <linux/fault-inject.h>
@@ -131,7 +132,9 @@ struct mmc_host_ops {
131 132
132 /* Prepare HS400 target operating frequency depending host driver */ 133 /* Prepare HS400 target operating frequency depending host driver */
133 int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); 134 int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
134 int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); 135 int (*select_drive_strength)(struct mmc_card *card,
136 unsigned int max_dtr, int host_drv,
137 int card_drv, int *drv_type);
135 void (*hw_reset)(struct mmc_host *host); 138 void (*hw_reset)(struct mmc_host *host);
136 void (*card_event)(struct mmc_host *host); 139 void (*card_event)(struct mmc_host *host);
137 140
@@ -285,6 +288,7 @@ struct mmc_host {
285 MMC_CAP2_HS400_1_2V) 288 MMC_CAP2_HS400_1_2V)
286#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) 289#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
287#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) 290#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
291#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
288 292
289 mmc_pm_flag_t pm_caps; /* supported pm features */ 293 mmc_pm_flag_t pm_caps; /* supported pm features */
290 294
@@ -321,10 +325,18 @@ struct mmc_host {
321#ifdef CONFIG_MMC_DEBUG 325#ifdef CONFIG_MMC_DEBUG
322 unsigned int removed:1; /* host is being removed */ 326 unsigned int removed:1; /* host is being removed */
323#endif 327#endif
328 unsigned int can_retune:1; /* re-tuning can be used */
329 unsigned int doing_retune:1; /* re-tuning in progress */
330 unsigned int retune_now:1; /* do re-tuning at next req */
324 331
325 int rescan_disable; /* disable card detection */ 332 int rescan_disable; /* disable card detection */
326 int rescan_entered; /* used with nonremovable devices */ 333 int rescan_entered; /* used with nonremovable devices */
327 334
335 int need_retune; /* re-tuning is needed */
336 int hold_retune; /* hold off re-tuning */
337 unsigned int retune_period; /* re-tuning period in secs */
338 struct timer_list retune_timer; /* for periodic re-tuning */
339
328 bool trigger_card_event; /* card_event necessary */ 340 bool trigger_card_event; /* card_event necessary */
329 341
330 struct mmc_card *card; /* device attached to this host */ 342 struct mmc_card *card; /* device attached to this host */
@@ -513,4 +525,18 @@ static inline bool mmc_card_hs400(struct mmc_card *card)
513 return card->host->ios.timing == MMC_TIMING_MMC_HS400; 525 return card->host->ios.timing == MMC_TIMING_MMC_HS400;
514} 526}
515 527
528void mmc_retune_timer_stop(struct mmc_host *host);
529
530static inline void mmc_retune_needed(struct mmc_host *host)
531{
532 if (host->can_retune)
533 host->need_retune = 1;
534}
535
536static inline void mmc_retune_recheck(struct mmc_host *host)
537{
538 if (host->hold_retune <= 1)
539 host->retune_now = 1;
540}
541
516#endif /* LINUX_MMC_HOST_H */ 542#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 124f562118b8..15f2c4a0a62c 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -302,6 +302,7 @@ struct _mmc_csd {
302#define EXT_CSD_REV 192 /* RO */ 302#define EXT_CSD_REV 192 /* RO */
303#define EXT_CSD_STRUCTURE 194 /* RO */ 303#define EXT_CSD_STRUCTURE 194 /* RO */
304#define EXT_CSD_CARD_TYPE 196 /* RO */ 304#define EXT_CSD_CARD_TYPE 196 /* RO */
305#define EXT_CSD_DRIVER_STRENGTH 197 /* RO */
305#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */ 306#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */
306#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */ 307#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
307#define EXT_CSD_PWR_CL_52_195 200 /* RO */ 308#define EXT_CSD_PWR_CL_52_195 200 /* RO */
@@ -390,6 +391,7 @@ struct _mmc_csd {
390#define EXT_CSD_TIMING_HS 1 /* High speed */ 391#define EXT_CSD_TIMING_HS 1 /* High speed */
391#define EXT_CSD_TIMING_HS200 2 /* HS200 */ 392#define EXT_CSD_TIMING_HS200 2 /* HS200 */
392#define EXT_CSD_TIMING_HS400 3 /* HS400 */ 393#define EXT_CSD_TIMING_HS400 3 /* HS400 */
394#define EXT_CSD_DRV_STR_SHIFT 4 /* Driver Strength shift */
393 395
394#define EXT_CSD_SEC_ER_EN BIT(0) 396#define EXT_CSD_SEC_ER_EN BIT(0)
395#define EXT_CSD_SEC_BD_BLK_EN BIT(2) 397#define EXT_CSD_SEC_BD_BLK_EN BIT(2)
@@ -441,4 +443,6 @@ struct _mmc_csd {
441#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */ 443#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
442#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */ 444#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
443 445
446#define mmc_driver_type_mask(n) (1 << (n))
447
444#endif /* LINUX_MMC_MMC_H */ 448#endif /* LINUX_MMC_MMC_H */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
index 8959604a13d3..fda15b6d4135 100644
--- a/include/linux/mmc/sdhci-pci-data.h
+++ b/include/linux/mmc/sdhci-pci-data.h
@@ -15,4 +15,6 @@ struct sdhci_pci_data {
15extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, 15extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
16 int slotno); 16 int slotno);
17 17
18extern int sdhci_pci_spt_drive_strength;
19
18#endif 20#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 95243d28a0ee..61cd67f4d788 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -324,25 +324,25 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
324 ___pte; \ 324 ___pte; \
325}) 325})
326 326
327#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \ 327#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
328({ \ 328({ \
329 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ 329 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
330 struct mm_struct *___mm = (__vma)->vm_mm; \ 330 struct mm_struct *___mm = (__vma)->vm_mm; \
331 pmd_t ___pmd; \ 331 pmd_t ___pmd; \
332 \ 332 \
333 ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \ 333 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
334 mmu_notifier_invalidate_range(___mm, ___haddr, \ 334 mmu_notifier_invalidate_range(___mm, ___haddr, \
335 ___haddr + HPAGE_PMD_SIZE); \ 335 ___haddr + HPAGE_PMD_SIZE); \
336 \ 336 \
337 ___pmd; \ 337 ___pmd; \
338}) 338})
339 339
340#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \ 340#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
341({ \ 341({ \
342 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ 342 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
343 pmd_t ___pmd; \ 343 pmd_t ___pmd; \
344 \ 344 \
345 ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \ 345 ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
346 mmu_notifier_invalidate_range(__mm, ___haddr, \ 346 mmu_notifier_invalidate_range(__mm, ___haddr, \
347 ___haddr + HPAGE_PMD_SIZE); \ 347 ___haddr + HPAGE_PMD_SIZE); \
348 \ 348 \
@@ -428,8 +428,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
428#define ptep_clear_flush_young_notify ptep_clear_flush_young 428#define ptep_clear_flush_young_notify ptep_clear_flush_young
429#define pmdp_clear_flush_young_notify pmdp_clear_flush_young 429#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
430#define ptep_clear_flush_notify ptep_clear_flush 430#define ptep_clear_flush_notify ptep_clear_flush
431#define pmdp_clear_flush_notify pmdp_clear_flush 431#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
432#define pmdp_get_and_clear_notify pmdp_get_and_clear 432#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
433#define set_pte_at_notify set_pte_at 433#define set_pte_at_notify set_pte_at
434 434
435#endif /* CONFIG_MMU_NOTIFIER */ 435#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/module.h b/include/linux/module.h
index c883b86ea964..1e5436042eb0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -655,4 +655,16 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
655static inline void module_bug_cleanup(struct module *mod) {} 655static inline void module_bug_cleanup(struct module *mod) {}
656#endif /* CONFIG_GENERIC_BUG */ 656#endif /* CONFIG_GENERIC_BUG */
657 657
658#ifdef CONFIG_MODULE_SIG
659static inline bool module_sig_ok(struct module *module)
660{
661 return module->sig_ok;
662}
663#else /* !CONFIG_MODULE_SIG */
664static inline bool module_sig_ok(struct module *module)
665{
666 return true;
667}
668#endif /* CONFIG_MODULE_SIG */
669
658#endif /* _LINUX_MODULE_H */ 670#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 5af1b81def49..641b7d6fd096 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -81,6 +81,8 @@ MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
81int mpi_fromstr(MPI val, const char *str); 81int mpi_fromstr(MPI val, const char *str);
82u32 mpi_get_keyid(MPI a, u32 *keyid); 82u32 mpi_get_keyid(MPI a, u32 *keyid);
83void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); 83void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
84int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
85 int *sign);
84void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); 86void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
85int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); 87int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign);
86 88
@@ -142,4 +144,17 @@ int mpi_rshift(MPI x, MPI a, unsigned n);
142/*-- mpi-inv.c --*/ 144/*-- mpi-inv.c --*/
143int mpi_invm(MPI x, MPI u, MPI v); 145int mpi_invm(MPI x, MPI u, MPI v);
144 146
147/* inline functions */
148
149/**
150 * mpi_get_size() - returns max size required to store the number
151 *
152 * @a: A multi precision integer for which we want to allocate a bufer
153 *
154 * Return: size required to store the number
155 */
156static inline unsigned int mpi_get_size(MPI a)
157{
158 return a->nlimbs * BYTES_PER_MPI_LIMB;
159}
145#endif /*G10_MPI_H */ 160#endif /*G10_MPI_H */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 299d7d31fe53..9b57a9b1b081 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -296,183 +296,19 @@ struct cfi_private {
296 struct flchip chips[0]; /* per-chip data structure for each chip */ 296 struct flchip chips[0]; /* per-chip data structure for each chip */
297}; 297};
298 298
299/* 299uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
300 * Returns the command address according to the given geometry. 300 struct map_info *map, struct cfi_private *cfi);
301 */
302static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
303 struct map_info *map, struct cfi_private *cfi)
304{
305 unsigned bankwidth = map_bankwidth(map);
306 unsigned interleave = cfi_interleave(cfi);
307 unsigned type = cfi->device_type;
308 uint32_t addr;
309
310 addr = (cmd_ofs * type) * interleave;
311
312 /* Modify the unlock address if we are in compatibility mode.
313 * For 16bit devices on 8 bit busses
314 * and 32bit devices on 16 bit busses
315 * set the low bit of the alternating bit sequence of the address.
316 */
317 if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
318 addr |= (type >> 1)*interleave;
319
320 return addr;
321}
322
323/*
324 * Transforms the CFI command for the given geometry (bus width & interleave).
325 * It looks too long to be inline, but in the common case it should almost all
326 * get optimised away.
327 */
328static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
329{
330 map_word val = { {0} };
331 int wordwidth, words_per_bus, chip_mode, chips_per_word;
332 unsigned long onecmd;
333 int i;
334
335 /* We do it this way to give the compiler a fighting chance
336 of optimising away all the crap for 'bankwidth' larger than
337 an unsigned long, in the common case where that support is
338 disabled */
339 if (map_bankwidth_is_large(map)) {
340 wordwidth = sizeof(unsigned long);
341 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
342 } else {
343 wordwidth = map_bankwidth(map);
344 words_per_bus = 1;
345 }
346
347 chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
348 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
349
350 /* First, determine what the bit-pattern should be for a single
351 device, according to chip mode and endianness... */
352 switch (chip_mode) {
353 default: BUG();
354 case 1:
355 onecmd = cmd;
356 break;
357 case 2:
358 onecmd = cpu_to_cfi16(map, cmd);
359 break;
360 case 4:
361 onecmd = cpu_to_cfi32(map, cmd);
362 break;
363 }
364
365 /* Now replicate it across the size of an unsigned long, or
366 just to the bus width as appropriate */
367 switch (chips_per_word) {
368 default: BUG();
369#if BITS_PER_LONG >= 64
370 case 8:
371 onecmd |= (onecmd << (chip_mode * 32));
372#endif
373 case 4:
374 onecmd |= (onecmd << (chip_mode * 16));
375 case 2:
376 onecmd |= (onecmd << (chip_mode * 8));
377 case 1:
378 ;
379 }
380 301
381 /* And finally, for the multi-word case, replicate it 302map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
382 in all words in the structure */
383 for (i=0; i < words_per_bus; i++) {
384 val.x[i] = onecmd;
385 }
386
387 return val;
388}
389#define CMD(x) cfi_build_cmd((x), map, cfi) 303#define CMD(x) cfi_build_cmd((x), map, cfi)
390 304
391 305unsigned long cfi_merge_status(map_word val, struct map_info *map,
392static inline unsigned long cfi_merge_status(map_word val, struct map_info *map, 306 struct cfi_private *cfi);
393 struct cfi_private *cfi)
394{
395 int wordwidth, words_per_bus, chip_mode, chips_per_word;
396 unsigned long onestat, res = 0;
397 int i;
398
399 /* We do it this way to give the compiler a fighting chance
400 of optimising away all the crap for 'bankwidth' larger than
401 an unsigned long, in the common case where that support is
402 disabled */
403 if (map_bankwidth_is_large(map)) {
404 wordwidth = sizeof(unsigned long);
405 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
406 } else {
407 wordwidth = map_bankwidth(map);
408 words_per_bus = 1;
409 }
410
411 chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
412 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
413
414 onestat = val.x[0];
415 /* Or all status words together */
416 for (i=1; i < words_per_bus; i++) {
417 onestat |= val.x[i];
418 }
419
420 res = onestat;
421 switch(chips_per_word) {
422 default: BUG();
423#if BITS_PER_LONG >= 64
424 case 8:
425 res |= (onestat >> (chip_mode * 32));
426#endif
427 case 4:
428 res |= (onestat >> (chip_mode * 16));
429 case 2:
430 res |= (onestat >> (chip_mode * 8));
431 case 1:
432 ;
433 }
434
435 /* Last, determine what the bit-pattern should be for a single
436 device, according to chip mode and endianness... */
437 switch (chip_mode) {
438 case 1:
439 break;
440 case 2:
441 res = cfi16_to_cpu(map, res);
442 break;
443 case 4:
444 res = cfi32_to_cpu(map, res);
445 break;
446 default: BUG();
447 }
448 return res;
449}
450
451#define MERGESTATUS(x) cfi_merge_status((x), map, cfi) 307#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
452 308
453 309uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
454/*
455 * Sends a CFI command to a bank of flash for the given geometry.
456 *
457 * Returns the offset in flash where the command was written.
458 * If prev_val is non-null, it will be set to the value at the command address,
459 * before the command was written.
460 */
461static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
462 struct map_info *map, struct cfi_private *cfi, 310 struct map_info *map, struct cfi_private *cfi,
463 int type, map_word *prev_val) 311 int type, map_word *prev_val);
464{
465 map_word val;
466 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
467 val = cfi_build_cmd(cmd, map, cfi);
468
469 if (prev_val)
470 *prev_val = map_read(map, addr);
471
472 map_write(map, val, addr);
473
474 return addr - base;
475}
476 312
477static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) 313static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
478{ 314{
@@ -506,15 +342,7 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
506 } 342 }
507} 343}
508 344
509static inline void cfi_udelay(int us) 345void cfi_udelay(int us);
510{
511 if (us >= 1000) {
512 msleep((us+999)/1000);
513 } else {
514 udelay(us);
515 cond_resched();
516 }
517}
518 346
519int __xipram cfi_qry_present(struct map_info *map, __u32 base, 347int __xipram cfi_qry_present(struct map_info *map, __u32 base,
520 struct cfi_private *cfi); 348 struct cfi_private *cfi);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 3d4ea7eb2b68..f25e2bdd188c 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -26,6 +26,8 @@
26 26
27struct mtd_info; 27struct mtd_info;
28struct nand_flash_dev; 28struct nand_flash_dev;
29struct device_node;
30
29/* Scan and identify a NAND device */ 31/* Scan and identify a NAND device */
30extern int nand_scan(struct mtd_info *mtd, int max_chips); 32extern int nand_scan(struct mtd_info *mtd, int max_chips);
31/* 33/*
@@ -542,6 +544,7 @@ struct nand_buffers {
542 * flash device 544 * flash device
543 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the 545 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
544 * flash device. 546 * flash device.
547 * @dn: [BOARDSPECIFIC] device node describing this instance
545 * @read_byte: [REPLACEABLE] read one byte from the chip 548 * @read_byte: [REPLACEABLE] read one byte from the chip
546 * @read_word: [REPLACEABLE] read one word from the chip 549 * @read_word: [REPLACEABLE] read one word from the chip
547 * @write_byte: [REPLACEABLE] write a single byte to the chip on the 550 * @write_byte: [REPLACEABLE] write a single byte to the chip on the
@@ -644,6 +647,8 @@ struct nand_chip {
644 void __iomem *IO_ADDR_R; 647 void __iomem *IO_ADDR_R;
645 void __iomem *IO_ADDR_W; 648 void __iomem *IO_ADDR_W;
646 649
650 struct device_node *dn;
651
647 uint8_t (*read_byte)(struct mtd_info *mtd); 652 uint8_t (*read_byte)(struct mtd_info *mtd);
648 u16 (*read_word)(struct mtd_info *mtd); 653 u16 (*read_word)(struct mtd_info *mtd);
649 void (*write_byte)(struct mtd_info *mtd, uint8_t byte); 654 void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
@@ -833,7 +838,6 @@ struct nand_manufacturers {
833extern struct nand_flash_dev nand_flash_ids[]; 838extern struct nand_flash_dev nand_flash_ids[];
834extern struct nand_manufacturers nand_manuf_ids[]; 839extern struct nand_manufacturers nand_manuf_ids[];
835 840
836extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
837extern int nand_default_bbt(struct mtd_info *mtd); 841extern int nand_default_bbt(struct mtd_info *mtd);
838extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); 842extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
839extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); 843extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index c8990779f0c3..d8c6334cd150 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -1,16 +1,15 @@
1#ifndef _LINUX_NAMEI_H 1#ifndef _LINUX_NAMEI_H
2#define _LINUX_NAMEI_H 2#define _LINUX_NAMEI_H
3 3
4#include <linux/dcache.h> 4#include <linux/kernel.h>
5#include <linux/errno.h>
6#include <linux/linkage.h>
7#include <linux/path.h> 5#include <linux/path.h>
8 6#include <linux/fcntl.h>
9struct vfsmount; 7#include <linux/errno.h>
10struct nameidata;
11 8
12enum { MAX_NESTED_LINKS = 8 }; 9enum { MAX_NESTED_LINKS = 8 };
13 10
11#define MAXSYMLINKS 40
12
14/* 13/*
15 * Type of the last component on LOOKUP_PARENT 14 * Type of the last component on LOOKUP_PARENT
16 */ 15 */
@@ -45,13 +44,29 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
45#define LOOKUP_ROOT 0x2000 44#define LOOKUP_ROOT 0x2000
46#define LOOKUP_EMPTY 0x4000 45#define LOOKUP_EMPTY 0x4000
47 46
48extern int user_path_at(int, const char __user *, unsigned, struct path *);
49extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); 47extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
50 48
51#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path) 49static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
52#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path) 50 struct path *path)
53#define user_path_dir(name, path) \ 51{
54 user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path) 52 return user_path_at_empty(dfd, name, flags, path, NULL);
53}
54
55static inline int user_path(const char __user *name, struct path *path)
56{
57 return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL);
58}
59
60static inline int user_lpath(const char __user *name, struct path *path)
61{
62 return user_path_at_empty(AT_FDCWD, name, 0, path, NULL);
63}
64
65static inline int user_path_dir(const char __user *name, struct path *path)
66{
67 return user_path_at_empty(AT_FDCWD, name,
68 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL);
69}
55 70
56extern int kern_path(const char *, unsigned, struct path *); 71extern int kern_path(const char *, unsigned, struct path *);
57 72
@@ -70,9 +85,7 @@ extern int follow_up(struct path *);
70extern struct dentry *lock_rename(struct dentry *, struct dentry *); 85extern struct dentry *lock_rename(struct dentry *, struct dentry *);
71extern void unlock_rename(struct dentry *, struct dentry *); 86extern void unlock_rename(struct dentry *, struct dentry *);
72 87
73extern void nd_jump_link(struct nameidata *nd, struct path *path); 88extern void nd_jump_link(struct path *path);
74extern void nd_set_link(struct nameidata *nd, char *path);
75extern char *nd_get_link(struct nameidata *nd);
76 89
77static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) 90static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
78{ 91{
diff --git a/include/linux/net.h b/include/linux/net.h
index 738ea48be889..04aa06852771 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -38,7 +38,6 @@ struct net;
38#define SOCK_NOSPACE 2 38#define SOCK_NOSPACE 2
39#define SOCK_PASSCRED 3 39#define SOCK_PASSCRED 3
40#define SOCK_PASSSEC 4 40#define SOCK_PASSSEC 4
41#define SOCK_EXTERNALLY_ALLOCATED 5
42 41
43#ifndef ARCH_HAS_SOCKET_TYPES 42#ifndef ARCH_HAS_SOCKET_TYPES
44/** 43/**
@@ -208,7 +207,7 @@ void sock_unregister(int family);
208int __sock_create(struct net *net, int family, int type, int proto, 207int __sock_create(struct net *net, int family, int type, int proto,
209 struct socket **res, int kern); 208 struct socket **res, int kern);
210int sock_create(int family, int type, int proto, struct socket **res); 209int sock_create(int family, int type, int proto, struct socket **res);
211int sock_create_kern(int family, int type, int proto, struct socket **res); 210int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
212int sock_create_lite(int family, int type, int proto, struct socket **res); 211int sock_create_lite(int family, int type, int proto, struct socket **res);
213void sock_release(struct socket *sock); 212void sock_release(struct socket *sock);
214int sock_sendmsg(struct socket *sock, struct msghdr *msg); 213int sock_sendmsg(struct socket *sock, struct msghdr *msg);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 7d59dc6ab789..9672781c593d 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -66,7 +66,6 @@ enum {
66 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ 66 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
67 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ 67 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
68 NETIF_F_BUSY_POLL_BIT, /* Busy poll */ 68 NETIF_F_BUSY_POLL_BIT, /* Busy poll */
69 NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */
70 69
71 /* 70 /*
72 * Add your fresh new feature above and remember to update 71 * Add your fresh new feature above and remember to update
@@ -125,7 +124,6 @@ enum {
125#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 124#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
126#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) 125#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
127#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) 126#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
128#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD)
129 127
130/* Features valid for ethtool to change */ 128/* Features valid for ethtool to change */
131/* = all defined minus driver/device-class-related */ 129/* = all defined minus driver/device-class-related */
@@ -161,8 +159,7 @@ enum {
161 */ 159 */
162#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ 160#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
163 NETIF_F_SG | NETIF_F_HIGHDMA | \ 161 NETIF_F_SG | NETIF_F_HIGHDMA | \
164 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \ 162 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
165 NETIF_F_HW_SWITCH_OFFLOAD)
166 163
167/* 164/*
168 * If one device doesn't support one of these features, then disable it 165 * If one device doesn't support one of these features, then disable it
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 05b9a694e213..e20979dfd6a9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1100,6 +1100,10 @@ struct net_device_ops {
1100 struct ifla_vf_info *ivf); 1100 struct ifla_vf_info *ivf);
1101 int (*ndo_set_vf_link_state)(struct net_device *dev, 1101 int (*ndo_set_vf_link_state)(struct net_device *dev,
1102 int vf, int link_state); 1102 int vf, int link_state);
1103 int (*ndo_get_vf_stats)(struct net_device *dev,
1104 int vf,
1105 struct ifla_vf_stats
1106 *vf_stats);
1103 int (*ndo_set_vf_port)(struct net_device *dev, 1107 int (*ndo_set_vf_port)(struct net_device *dev,
1104 int vf, 1108 int vf,
1105 struct nlattr *port[]); 1109 struct nlattr *port[]);
@@ -1564,7 +1568,7 @@ struct net_device {
1564 const struct net_device_ops *netdev_ops; 1568 const struct net_device_ops *netdev_ops;
1565 const struct ethtool_ops *ethtool_ops; 1569 const struct ethtool_ops *ethtool_ops;
1566#ifdef CONFIG_NET_SWITCHDEV 1570#ifdef CONFIG_NET_SWITCHDEV
1567 const struct swdev_ops *swdev_ops; 1571 const struct switchdev_ops *switchdev_ops;
1568#endif 1572#endif
1569 1573
1570 const struct header_ops *header_ops; 1574 const struct header_ops *header_ops;
@@ -1652,7 +1656,14 @@ struct net_device {
1652 rx_handler_func_t __rcu *rx_handler; 1656 rx_handler_func_t __rcu *rx_handler;
1653 void __rcu *rx_handler_data; 1657 void __rcu *rx_handler_data;
1654 1658
1659#ifdef CONFIG_NET_CLS_ACT
1660 struct tcf_proto __rcu *ingress_cl_list;
1661#endif
1655 struct netdev_queue __rcu *ingress_queue; 1662 struct netdev_queue __rcu *ingress_queue;
1663#ifdef CONFIG_NETFILTER_INGRESS
1664 struct list_head nf_hooks_ingress;
1665#endif
1666
1656 unsigned char broadcast[MAX_ADDR_LEN]; 1667 unsigned char broadcast[MAX_ADDR_LEN];
1657#ifdef CONFIG_RFS_ACCEL 1668#ifdef CONFIG_RFS_ACCEL
1658 struct cpu_rmap *rx_cpu_rmap; 1669 struct cpu_rmap *rx_cpu_rmap;
@@ -1990,6 +2001,7 @@ struct offload_callbacks {
1990 2001
1991struct packet_offload { 2002struct packet_offload {
1992 __be16 type; /* This is really htons(ether_type). */ 2003 __be16 type; /* This is really htons(ether_type). */
2004 u16 priority;
1993 struct offload_callbacks callbacks; 2005 struct offload_callbacks callbacks;
1994 struct list_head list; 2006 struct list_head list;
1995}; 2007};
@@ -2552,10 +2564,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
2552 2564
2553static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 2565static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2554{ 2566{
2555 if (WARN_ON(!dev_queue)) {
2556 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2557 return;
2558 }
2559 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 2567 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2560} 2568}
2561 2569
@@ -2571,15 +2579,7 @@ static inline void netif_stop_queue(struct net_device *dev)
2571 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 2579 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2572} 2580}
2573 2581
2574static inline void netif_tx_stop_all_queues(struct net_device *dev) 2582void netif_tx_stop_all_queues(struct net_device *dev);
2575{
2576 unsigned int i;
2577
2578 for (i = 0; i < dev->num_tx_queues; i++) {
2579 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2580 netif_tx_stop_queue(txq);
2581 }
2582}
2583 2583
2584static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 2584static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2585{ 2585{
@@ -2840,6 +2840,9 @@ static inline int netif_set_xps_queue(struct net_device *dev,
2840} 2840}
2841#endif 2841#endif
2842 2842
2843u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2844 unsigned int num_tx_queues);
2845
2843/* 2846/*
2844 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used 2847 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2845 * as a distribution range limit for the returned value. 2848 * as a distribution range limit for the returned value.
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 63560d0a8dfe..00050dfd9f23 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -10,7 +10,8 @@
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/static_key.h> 12#include <linux/static_key.h>
13#include <uapi/linux/netfilter.h> 13#include <linux/netfilter_defs.h>
14
14#ifdef CONFIG_NETFILTER 15#ifdef CONFIG_NETFILTER
15static inline int NF_DROP_GETERR(int verdict) 16static inline int NF_DROP_GETERR(int verdict)
16{ 17{
@@ -38,9 +39,6 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
38 39
39int netfilter_init(void); 40int netfilter_init(void);
40 41
41/* Largest hook number + 1 */
42#define NF_MAX_HOOKS 8
43
44struct sk_buff; 42struct sk_buff;
45 43
46struct nf_hook_ops; 44struct nf_hook_ops;
@@ -54,10 +52,12 @@ struct nf_hook_state {
54 struct net_device *in; 52 struct net_device *in;
55 struct net_device *out; 53 struct net_device *out;
56 struct sock *sk; 54 struct sock *sk;
55 struct list_head *hook_list;
57 int (*okfn)(struct sock *, struct sk_buff *); 56 int (*okfn)(struct sock *, struct sk_buff *);
58}; 57};
59 58
60static inline void nf_hook_state_init(struct nf_hook_state *p, 59static inline void nf_hook_state_init(struct nf_hook_state *p,
60 struct list_head *hook_list,
61 unsigned int hook, 61 unsigned int hook,
62 int thresh, u_int8_t pf, 62 int thresh, u_int8_t pf,
63 struct net_device *indev, 63 struct net_device *indev,
@@ -71,6 +71,7 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
71 p->in = indev; 71 p->in = indev;
72 p->out = outdev; 72 p->out = outdev;
73 p->sk = sk; 73 p->sk = sk;
74 p->hook_list = hook_list;
74 p->okfn = okfn; 75 p->okfn = okfn;
75} 76}
76 77
@@ -79,16 +80,17 @@ typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
79 const struct nf_hook_state *state); 80 const struct nf_hook_state *state);
80 81
81struct nf_hook_ops { 82struct nf_hook_ops {
82 struct list_head list; 83 struct list_head list;
83 84
84 /* User fills in from here down. */ 85 /* User fills in from here down. */
85 nf_hookfn *hook; 86 nf_hookfn *hook;
86 struct module *owner; 87 struct net_device *dev;
87 void *priv; 88 struct module *owner;
88 u_int8_t pf; 89 void *priv;
89 unsigned int hooknum; 90 u_int8_t pf;
91 unsigned int hooknum;
90 /* Hooks are ordered in ascending priority. */ 92 /* Hooks are ordered in ascending priority. */
91 int priority; 93 int priority;
92}; 94};
93 95
94struct nf_sockopt_ops { 96struct nf_sockopt_ops {
@@ -131,26 +133,33 @@ extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
131#ifdef HAVE_JUMP_LABEL 133#ifdef HAVE_JUMP_LABEL
132extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 134extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
133 135
134static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) 136static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
137 u_int8_t pf, unsigned int hook)
135{ 138{
136 if (__builtin_constant_p(pf) && 139 if (__builtin_constant_p(pf) &&
137 __builtin_constant_p(hook)) 140 __builtin_constant_p(hook))
138 return static_key_false(&nf_hooks_needed[pf][hook]); 141 return static_key_false(&nf_hooks_needed[pf][hook]);
139 142
140 return !list_empty(&nf_hooks[pf][hook]); 143 return !list_empty(nf_hook_list);
141} 144}
142#else 145#else
143static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) 146static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
147 u_int8_t pf, unsigned int hook)
144{ 148{
145 return !list_empty(&nf_hooks[pf][hook]); 149 return !list_empty(nf_hook_list);
146} 150}
147#endif 151#endif
148 152
153static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
154{
155 return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
156}
157
149int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); 158int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
150 159
151/** 160/**
152 * nf_hook_thresh - call a netfilter hook 161 * nf_hook_thresh - call a netfilter hook
153 * 162 *
154 * Returns 1 if the hook has allowed the packet to pass. The function 163 * Returns 1 if the hook has allowed the packet to pass. The function
155 * okfn must be invoked by the caller in this case. Any other return 164 * okfn must be invoked by the caller in this case. Any other return
156 * value indicates the packet has been consumed by the hook. 165 * value indicates the packet has been consumed by the hook.
@@ -166,8 +175,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
166 if (nf_hooks_active(pf, hook)) { 175 if (nf_hooks_active(pf, hook)) {
167 struct nf_hook_state state; 176 struct nf_hook_state state;
168 177
169 nf_hook_state_init(&state, hook, thresh, pf, 178 nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
170 indev, outdev, sk, okfn); 179 pf, indev, outdev, sk, okfn);
171 return nf_hook_slow(skb, &state); 180 return nf_hook_slow(skb, &state);
172 } 181 }
173 return 1; 182 return 1;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34b172301558..48bb01edcf30 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -108,8 +108,13 @@ struct ip_set_counter {
108 atomic64_t packets; 108 atomic64_t packets;
109}; 109};
110 110
111struct ip_set_comment_rcu {
112 struct rcu_head rcu;
113 char str[0];
114};
115
111struct ip_set_comment { 116struct ip_set_comment {
112 char *str; 117 struct ip_set_comment_rcu __rcu *c;
113}; 118};
114 119
115struct ip_set_skbinfo { 120struct ip_set_skbinfo {
@@ -122,13 +127,13 @@ struct ip_set_skbinfo {
122struct ip_set; 127struct ip_set;
123 128
124#define ext_timeout(e, s) \ 129#define ext_timeout(e, s) \
125(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]) 130((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
126#define ext_counter(e, s) \ 131#define ext_counter(e, s) \
127(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]) 132((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
128#define ext_comment(e, s) \ 133#define ext_comment(e, s) \
129(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]) 134((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
130#define ext_skbinfo(e, s) \ 135#define ext_skbinfo(e, s) \
131(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]) 136((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
132 137
133typedef int (*ipset_adtfn)(struct ip_set *set, void *value, 138typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
134 const struct ip_set_ext *ext, 139 const struct ip_set_ext *ext,
@@ -176,6 +181,9 @@ struct ip_set_type_variant {
176 /* List elements */ 181 /* List elements */
177 int (*list)(const struct ip_set *set, struct sk_buff *skb, 182 int (*list)(const struct ip_set *set, struct sk_buff *skb,
178 struct netlink_callback *cb); 183 struct netlink_callback *cb);
184 /* Keep listing private when resizing runs parallel */
185 void (*uref)(struct ip_set *set, struct netlink_callback *cb,
186 bool start);
179 187
180 /* Return true if "b" set is the same as "a" 188 /* Return true if "b" set is the same as "a"
181 * according to the create set parameters */ 189 * according to the create set parameters */
@@ -223,7 +231,7 @@ struct ip_set {
223 /* The name of the set */ 231 /* The name of the set */
224 char name[IPSET_MAXNAMELEN]; 232 char name[IPSET_MAXNAMELEN];
225 /* Lock protecting the set data */ 233 /* Lock protecting the set data */
226 rwlock_t lock; 234 spinlock_t lock;
227 /* References to the set */ 235 /* References to the set */
228 u32 ref; 236 u32 ref;
229 /* The core set type */ 237 /* The core set type */
@@ -341,12 +349,11 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
341 cpu_to_be64((u64)skbinfo->skbmark << 32 | 349 cpu_to_be64((u64)skbinfo->skbmark << 32 |
342 skbinfo->skbmarkmask))) || 350 skbinfo->skbmarkmask))) ||
343 (skbinfo->skbprio && 351 (skbinfo->skbprio &&
344 nla_put_net32(skb, IPSET_ATTR_SKBPRIO, 352 nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
345 cpu_to_be32(skbinfo->skbprio))) || 353 cpu_to_be32(skbinfo->skbprio))) ||
346 (skbinfo->skbqueue && 354 (skbinfo->skbqueue &&
347 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, 355 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
348 cpu_to_be16(skbinfo->skbqueue))); 356 cpu_to_be16(skbinfo->skbqueue)));
349
350} 357}
351 358
352static inline void 359static inline void
@@ -380,12 +387,12 @@ ip_set_init_counter(struct ip_set_counter *counter,
380 387
381/* Netlink CB args */ 388/* Netlink CB args */
382enum { 389enum {
383 IPSET_CB_NET = 0, 390 IPSET_CB_NET = 0, /* net namespace */
384 IPSET_CB_DUMP, 391 IPSET_CB_DUMP, /* dump single set/all sets */
385 IPSET_CB_INDEX, 392 IPSET_CB_INDEX, /* set index */
386 IPSET_CB_ARG0, 393 IPSET_CB_PRIVATE, /* set private data */
394 IPSET_CB_ARG0, /* type specific */
387 IPSET_CB_ARG1, 395 IPSET_CB_ARG1,
388 IPSET_CB_ARG2,
389}; 396};
390 397
391/* register and unregister set references */ 398/* register and unregister set references */
@@ -533,29 +540,9 @@ bitmap_bytes(u32 a, u32 b)
533#include <linux/netfilter/ipset/ip_set_timeout.h> 540#include <linux/netfilter/ipset/ip_set_timeout.h>
534#include <linux/netfilter/ipset/ip_set_comment.h> 541#include <linux/netfilter/ipset/ip_set_comment.h>
535 542
536static inline int 543int
537ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, 544ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
538 const void *e, bool active) 545 const void *e, bool active);
539{
540 if (SET_WITH_TIMEOUT(set)) {
541 unsigned long *timeout = ext_timeout(e, set);
542
543 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
544 htonl(active ? ip_set_timeout_get(timeout)
545 : *timeout)))
546 return -EMSGSIZE;
547 }
548 if (SET_WITH_COUNTER(set) &&
549 ip_set_put_counter(skb, ext_counter(e, set)))
550 return -EMSGSIZE;
551 if (SET_WITH_COMMENT(set) &&
552 ip_set_put_comment(skb, ext_comment(e, set)))
553 return -EMSGSIZE;
554 if (SET_WITH_SKBINFO(set) &&
555 ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
556 return -EMSGSIZE;
557 return 0;
558}
559 546
560#define IP_SET_INIT_KEXT(skb, opt, set) \ 547#define IP_SET_INIT_KEXT(skb, opt, set) \
561 { .bytes = (skb)->len, .packets = 1, \ 548 { .bytes = (skb)->len, .packets = 1, \
@@ -565,8 +552,6 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
565 { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \ 552 { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
566 .timeout = (set)->timeout } 553 .timeout = (set)->timeout }
567 554
568#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
569
570#define IPSET_CONCAT(a, b) a##b 555#define IPSET_CONCAT(a, b) a##b
571#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b) 556#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
572 557
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 21217ea008d7..8d0248525957 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -16,41 +16,57 @@ ip_set_comment_uget(struct nlattr *tb)
16 return nla_data(tb); 16 return nla_data(tb);
17} 17}
18 18
19/* Called from uadd only, protected by the set spinlock.
20 * The kadt functions don't use the comment extensions in any way.
21 */
19static inline void 22static inline void
20ip_set_init_comment(struct ip_set_comment *comment, 23ip_set_init_comment(struct ip_set_comment *comment,
21 const struct ip_set_ext *ext) 24 const struct ip_set_ext *ext)
22{ 25{
26 struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
23 size_t len = ext->comment ? strlen(ext->comment) : 0; 27 size_t len = ext->comment ? strlen(ext->comment) : 0;
24 28
25 if (unlikely(comment->str)) { 29 if (unlikely(c)) {
26 kfree(comment->str); 30 kfree_rcu(c, rcu);
27 comment->str = NULL; 31 rcu_assign_pointer(comment->c, NULL);
28 } 32 }
29 if (!len) 33 if (!len)
30 return; 34 return;
31 if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) 35 if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
32 len = IPSET_MAX_COMMENT_SIZE; 36 len = IPSET_MAX_COMMENT_SIZE;
33 comment->str = kzalloc(len + 1, GFP_ATOMIC); 37 c = kzalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
34 if (unlikely(!comment->str)) 38 if (unlikely(!c))
35 return; 39 return;
36 strlcpy(comment->str, ext->comment, len + 1); 40 strlcpy(c->str, ext->comment, len + 1);
41 rcu_assign_pointer(comment->c, c);
37} 42}
38 43
44/* Used only when dumping a set, protected by rcu_read_lock_bh() */
39static inline int 45static inline int
40ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment) 46ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
41{ 47{
42 if (!comment->str) 48 struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
49
50 if (!c)
43 return 0; 51 return 0;
44 return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str); 52 return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
45} 53}
46 54
55/* Called from uadd/udel, flush or the garbage collectors protected
56 * by the set spinlock.
57 * Called when the set is destroyed and when there can't be any user
58 * of the set data anymore.
59 */
47static inline void 60static inline void
48ip_set_comment_free(struct ip_set_comment *comment) 61ip_set_comment_free(struct ip_set_comment *comment)
49{ 62{
50 if (unlikely(!comment->str)) 63 struct ip_set_comment_rcu *c;
64
65 c = rcu_dereference_protected(comment->c, 1);
66 if (unlikely(!c))
51 return; 67 return;
52 kfree(comment->str); 68 kfree_rcu(c, rcu);
53 comment->str = NULL; 69 rcu_assign_pointer(comment->c, NULL);
54} 70}
55 71
56#endif 72#endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 83c2f9e0886c..1d6a935c1ac5 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -40,38 +40,33 @@ ip_set_timeout_uget(struct nlattr *tb)
40} 40}
41 41
42static inline bool 42static inline bool
43ip_set_timeout_test(unsigned long timeout) 43ip_set_timeout_expired(unsigned long *t)
44{ 44{
45 return timeout == IPSET_ELEM_PERMANENT || 45 return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
46 time_is_after_jiffies(timeout);
47}
48
49static inline bool
50ip_set_timeout_expired(unsigned long *timeout)
51{
52 return *timeout != IPSET_ELEM_PERMANENT &&
53 time_is_before_jiffies(*timeout);
54} 46}
55 47
56static inline void 48static inline void
57ip_set_timeout_set(unsigned long *timeout, u32 t) 49ip_set_timeout_set(unsigned long *timeout, u32 value)
58{ 50{
59 if (!t) { 51 unsigned long t;
52
53 if (!value) {
60 *timeout = IPSET_ELEM_PERMANENT; 54 *timeout = IPSET_ELEM_PERMANENT;
61 return; 55 return;
62 } 56 }
63 57
64 *timeout = msecs_to_jiffies(t * 1000) + jiffies; 58 t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
65 if (*timeout == IPSET_ELEM_PERMANENT) 59 if (t == IPSET_ELEM_PERMANENT)
66 /* Bingo! :-) */ 60 /* Bingo! :-) */
67 (*timeout)--; 61 t--;
62 *timeout = t;
68} 63}
69 64
70static inline u32 65static inline u32
71ip_set_timeout_get(unsigned long *timeout) 66ip_set_timeout_get(unsigned long *timeout)
72{ 67{
73 return *timeout == IPSET_ELEM_PERMANENT ? 0 : 68 return *timeout == IPSET_ELEM_PERMANENT ? 0 :
74 jiffies_to_msecs(*timeout - jiffies)/1000; 69 jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
75} 70}
76 71
77#endif /* __KERNEL__ */ 72#endif /* __KERNEL__ */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index a3e215bb0241..286098a5667f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -62,6 +62,7 @@ struct xt_mtchk_param {
62 void *matchinfo; 62 void *matchinfo;
63 unsigned int hook_mask; 63 unsigned int hook_mask;
64 u_int8_t family; 64 u_int8_t family;
65 bool nft_compat;
65}; 66};
66 67
67/** 68/**
@@ -92,6 +93,7 @@ struct xt_tgchk_param {
92 void *targinfo; 93 void *targinfo;
93 unsigned int hook_mask; 94 unsigned int hook_mask;
94 u_int8_t family; 95 u_int8_t family;
96 bool nft_compat;
95}; 97};
96 98
97/* Target destructor parameters */ 99/* Target destructor parameters */
@@ -222,13 +224,10 @@ struct xt_table_info {
222 unsigned int stacksize; 224 unsigned int stacksize;
223 unsigned int __percpu *stackptr; 225 unsigned int __percpu *stackptr;
224 void ***jumpstack; 226 void ***jumpstack;
225 /* ipt_entry tables: one per CPU */ 227
226 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ 228 unsigned char entries[0] __aligned(8);
227 void *entries[1];
228}; 229};
229 230
230#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
231 + nr_cpu_ids * sizeof(char *))
232int xt_register_target(struct xt_target *target); 231int xt_register_target(struct xt_target *target);
233void xt_unregister_target(struct xt_target *target); 232void xt_unregister_target(struct xt_target *target);
234int xt_register_targets(struct xt_target *target, unsigned int n); 233int xt_register_targets(struct xt_target *target, unsigned int n);
@@ -351,6 +350,57 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
351 return ret; 350 return ret;
352} 351}
353 352
353
354/* On SMP, ip(6)t_entry->counters.pcnt holds address of the
355 * real (percpu) counter. On !SMP, its just the packet count,
356 * so nothing needs to be done there.
357 *
358 * xt_percpu_counter_alloc returns the address of the percpu
359 * counter, or 0 on !SMP. We force an alignment of 16 bytes
360 * so that bytes/packets share a common cache line.
361 *
362 * Hence caller must use IS_ERR_VALUE to check for error, this
363 * allows us to return 0 for single core systems without forcing
364 * callers to deal with SMP vs. NONSMP issues.
365 */
366static inline u64 xt_percpu_counter_alloc(void)
367{
368 if (nr_cpu_ids > 1) {
369 void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
370 sizeof(struct xt_counters));
371
372 if (res == NULL)
373 return (u64) -ENOMEM;
374
375 return (u64) (__force unsigned long) res;
376 }
377
378 return 0;
379}
380static inline void xt_percpu_counter_free(u64 pcnt)
381{
382 if (nr_cpu_ids > 1)
383 free_percpu((void __percpu *) (unsigned long) pcnt);
384}
385
386static inline struct xt_counters *
387xt_get_this_cpu_counter(struct xt_counters *cnt)
388{
389 if (nr_cpu_ids > 1)
390 return this_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt);
391
392 return cnt;
393}
394
395static inline struct xt_counters *
396xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
397{
398 if (nr_cpu_ids > 1)
399 return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu);
400
401 return cnt;
402}
403
354struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); 404struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
355void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); 405void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
356 406
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index f2fdb5a52070..6d80fc686323 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -20,13 +20,6 @@ enum nf_br_hook_priorities {
20#define BRNF_BRIDGED_DNAT 0x02 20#define BRNF_BRIDGED_DNAT 0x02
21#define BRNF_NF_BRIDGE_PREROUTING 0x08 21#define BRNF_NF_BRIDGE_PREROUTING 0x08
22 22
23static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
24{
25 if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
26 return PPPOE_SES_HLEN;
27 return 0;
28}
29
30int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); 23int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
31 24
32static inline void br_drop_fake_rtable(struct sk_buff *skb) 25static inline void br_drop_fake_rtable(struct sk_buff *skb)
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index f1bd3962e6b6..8ca6d6464ea3 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -6,7 +6,7 @@
6 * 6 *
7 * ebtables.c,v 2.0, April, 2002 7 * ebtables.c,v 2.0, April, 2002
8 * 8 *
9 * This code is stongly inspired on the iptables code which is 9 * This code is strongly inspired by the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 */ 11 */
12#ifndef __LINUX_BRIDGE_EFF_H 12#ifndef __LINUX_BRIDGE_EFF_H
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
new file mode 100644
index 000000000000..d3a7f8597e82
--- /dev/null
+++ b/include/linux/netfilter_defs.h
@@ -0,0 +1,9 @@
1#ifndef __LINUX_NETFILTER_CORE_H_
2#define __LINUX_NETFILTER_CORE_H_
3
4#include <uapi/linux/netfilter.h>
5
6/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
7#define NF_MAX_HOOKS 8
8
9#endif
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
new file mode 100644
index 000000000000..cb0727fe2b3d
--- /dev/null
+++ b/include/linux/netfilter_ingress.h
@@ -0,0 +1,41 @@
1#ifndef _NETFILTER_INGRESS_H_
2#define _NETFILTER_INGRESS_H_
3
4#include <linux/netfilter.h>
5#include <linux/netdevice.h>
6
7#ifdef CONFIG_NETFILTER_INGRESS
8static inline int nf_hook_ingress_active(struct sk_buff *skb)
9{
10 return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
11 NFPROTO_NETDEV, NF_NETDEV_INGRESS);
12}
13
14static inline int nf_hook_ingress(struct sk_buff *skb)
15{
16 struct nf_hook_state state;
17
18 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
19 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
20 skb->dev, NULL, NULL);
21 return nf_hook_slow(skb, &state);
22}
23
24static inline void nf_hook_ingress_init(struct net_device *dev)
25{
26 INIT_LIST_HEAD(&dev->nf_hooks_ingress);
27}
28#else /* CONFIG_NETFILTER_INGRESS */
29static inline int nf_hook_ingress_active(struct sk_buff *skb)
30{
31 return 0;
32}
33
34static inline int nf_hook_ingress(struct sk_buff *skb)
35{
36 return 0;
37}
38
39static inline void nf_hook_ingress_init(struct net_device *dev) {}
40#endif /* CONFIG_NETFILTER_INGRESS */
41#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 64dad1cc1a4b..8b7d28f3aada 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -25,6 +25,9 @@ void ipv6_netfilter_fini(void);
25struct nf_ipv6_ops { 25struct nf_ipv6_ops {
26 int (*chk_addr)(struct net *net, const struct in6_addr *addr, 26 int (*chk_addr)(struct net *net, const struct in6_addr *addr,
27 const struct net_device *dev, int strict); 27 const struct net_device *dev, int strict);
28 void (*route_input)(struct sk_buff *skb);
29 int (*fragment)(struct sock *sk, struct sk_buff *skb,
30 int (*output)(struct sock *, struct sk_buff *));
28}; 31};
29 32
30extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; 33extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 6835c1279df7..9120edb650a0 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -28,6 +28,8 @@ struct netlink_skb_parms {
28 __u32 dst_group; 28 __u32 dst_group;
29 __u32 flags; 29 __u32 flags;
30 struct sock *sk; 30 struct sock *sk;
31 bool nsid_is_set;
32 int nsid;
31}; 33};
32 34
33#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) 35#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 3d46fb4708e0..f94da0e65dea 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -67,6 +67,7 @@ extern int nmi_watchdog_enabled;
67extern int soft_watchdog_enabled; 67extern int soft_watchdog_enabled;
68extern int watchdog_user_enabled; 68extern int watchdog_user_enabled;
69extern int watchdog_thresh; 69extern int watchdog_thresh;
70extern unsigned long *watchdog_cpumask_bits;
70extern int sysctl_softlockup_all_cpu_backtrace; 71extern int sysctl_softlockup_all_cpu_backtrace;
71struct ctl_table; 72struct ctl_table;
72extern int proc_watchdog(struct ctl_table *, int , 73extern int proc_watchdog(struct ctl_table *, int ,
@@ -77,6 +78,8 @@ extern int proc_soft_watchdog(struct ctl_table *, int ,
77 void __user *, size_t *, loff_t *); 78 void __user *, size_t *, loff_t *);
78extern int proc_watchdog_thresh(struct ctl_table *, int , 79extern int proc_watchdog_thresh(struct ctl_table *, int ,
79 void __user *, size_t *, loff_t *); 80 void __user *, size_t *, loff_t *);
81extern int proc_watchdog_cpumask(struct ctl_table *, int,
82 void __user *, size_t *, loff_t *);
80#endif 83#endif
81 84
82#ifdef CONFIG_HAVE_ACPI_APEI_NMI 85#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 8dbd05e70f09..c0d94ed8ce9a 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -74,7 +74,7 @@ struct nvme_dev {
74 struct blk_mq_tag_set tagset; 74 struct blk_mq_tag_set tagset;
75 struct blk_mq_tag_set admin_tagset; 75 struct blk_mq_tag_set admin_tagset;
76 u32 __iomem *dbs; 76 u32 __iomem *dbs;
77 struct pci_dev *pci_dev; 77 struct device *dev;
78 struct dma_pool *prp_page_pool; 78 struct dma_pool *prp_page_pool;
79 struct dma_pool *prp_small_pool; 79 struct dma_pool *prp_small_pool;
80 int instance; 80 int instance;
@@ -92,6 +92,7 @@ struct nvme_dev {
92 work_func_t reset_workfn; 92 work_func_t reset_workfn;
93 struct work_struct reset_work; 93 struct work_struct reset_work;
94 struct work_struct probe_work; 94 struct work_struct probe_work;
95 struct work_struct scan_work;
95 char name[12]; 96 char name[12];
96 char serial[20]; 97 char serial[20];
97 char model[40]; 98 char model[40];
@@ -146,25 +147,15 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
146 return (sector >> (ns->lba_shift - 9)); 147 return (sector >> (ns->lba_shift - 9));
147} 148}
148 149
149/** 150int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
150 * nvme_free_iod - frees an nvme_iod 151 void *buf, unsigned bufflen);
151 * @dev: The device that the I/O was submitted to 152int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
152 * @iod: The memory to free 153 void *buffer, void __user *ubuffer, unsigned bufflen,
153 */ 154 u32 *result, unsigned timeout);
154void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); 155int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
155 156int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
156int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t); 157 struct nvme_id_ns **id);
157struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 158int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
158 unsigned long addr, unsigned length);
159void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
160 struct nvme_iod *iod);
161int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
162 struct nvme_command *, u32 *);
163int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
164int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
165 u32 *result);
166int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
167 dma_addr_t dma_addr);
168int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, 159int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
169 dma_addr_t dma_addr, u32 *result); 160 dma_addr_t dma_addr, u32 *result);
170int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, 161int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
diff --git a/include/linux/nx842.h b/include/linux/nx842.h
deleted file mode 100644
index a4d324c6406a..000000000000
--- a/include/linux/nx842.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __NX842_H__
2#define __NX842_H__
3
4int nx842_get_workmem_size(void);
5int nx842_get_workmem_size_aligned(void);
6int nx842_compress(const unsigned char *in, unsigned int in_len,
7 unsigned char *out, unsigned int *out_len, void *wrkmem);
8int nx842_decompress(const unsigned char *in, unsigned int in_len,
9 unsigned char *out, unsigned int *out_len, void *wrkmem);
10
11#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index ddeaae6d2083..b871ff9d81d7 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -121,6 +121,8 @@ extern struct device_node *of_stdout;
121extern raw_spinlock_t devtree_lock; 121extern raw_spinlock_t devtree_lock;
122 122
123#ifdef CONFIG_OF 123#ifdef CONFIG_OF
124void of_core_init(void);
125
124static inline bool is_of_node(struct fwnode_handle *fwnode) 126static inline bool is_of_node(struct fwnode_handle *fwnode)
125{ 127{
126 return fwnode && fwnode->type == FWNODE_OF; 128 return fwnode && fwnode->type == FWNODE_OF;
@@ -376,6 +378,10 @@ bool of_console_check(struct device_node *dn, char *name, int index);
376 378
377#else /* CONFIG_OF */ 379#else /* CONFIG_OF */
378 380
381static inline void of_core_init(void)
382{
383}
384
379static inline bool is_of_node(struct fwnode_handle *fwnode) 385static inline bool is_of_node(struct fwnode_handle *fwnode)
380{ 386{
381 return false; 387 return false;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 587ee507965d..fd627a58068f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -64,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
64extern int early_init_dt_scan_memory(unsigned long node, const char *uname, 64extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
65 int depth, void *data); 65 int depth, void *data);
66extern void early_init_fdt_scan_reserved_mem(void); 66extern void early_init_fdt_scan_reserved_mem(void);
67extern void early_init_fdt_reserve_self(void);
67extern void early_init_dt_add_memory_arch(u64 base, u64 size); 68extern void early_init_dt_add_memory_arch(u64 base, u64 size);
68extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, 69extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
69 bool no_map); 70 bool no_map);
@@ -91,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset);
91extern void of_fdt_limit_memory(int limit); 92extern void of_fdt_limit_memory(int limit);
92#else /* CONFIG_OF_FLATTREE */ 93#else /* CONFIG_OF_FLATTREE */
93static inline void early_init_fdt_scan_reserved_mem(void) {} 94static inline void early_init_fdt_scan_reserved_mem(void) {}
95static inline void early_init_fdt_reserve_self(void) {}
94static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } 96static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
95static inline void unflatten_device_tree(void) {} 97static inline void unflatten_device_tree(void) {}
96static inline void unflatten_and_copy_device_tree(void) {} 98static inline void unflatten_and_copy_device_tree(void) {}
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 44b2f6f7bbd8..7deecb7bca5e 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -32,6 +32,8 @@ enum oom_scan_t {
32/* Thread is the potential origin of an oom condition; kill first on oom */ 32/* Thread is the potential origin of an oom condition; kill first on oom */
33#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1) 33#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
34 34
35extern struct mutex oom_lock;
36
35static inline void set_current_oom_origin(void) 37static inline void set_current_oom_origin(void)
36{ 38{
37 current->signal->oom_flags |= OOM_FLAG_ORIGIN; 39 current->signal->oom_flags |= OOM_FLAG_ORIGIN;
@@ -47,9 +49,7 @@ static inline bool oom_task_origin(const struct task_struct *p)
47 return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); 49 return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
48} 50}
49 51
50extern void mark_tsk_oom_victim(struct task_struct *tsk); 52extern void mark_oom_victim(struct task_struct *tsk);
51
52extern void unmark_oom_victim(void);
53 53
54extern unsigned long oom_badness(struct task_struct *p, 54extern unsigned long oom_badness(struct task_struct *p,
55 struct mem_cgroup *memcg, const nodemask_t *nodemask, 55 struct mem_cgroup *memcg, const nodemask_t *nodemask,
@@ -62,9 +62,6 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
62 struct mem_cgroup *memcg, nodemask_t *nodemask, 62 struct mem_cgroup *memcg, nodemask_t *nodemask,
63 const char *message); 63 const char *message);
64 64
65extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
66extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
67
68extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 65extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
69 int order, const nodemask_t *nodemask, 66 int order, const nodemask_t *nodemask,
70 struct mem_cgroup *memcg); 67 struct mem_cgroup *memcg);
@@ -75,6 +72,9 @@ extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
75 72
76extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 73extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
77 int order, nodemask_t *mask, bool force_kill); 74 int order, nodemask_t *mask, bool force_kill);
75
76extern void exit_oom_victim(void);
77
78extern int register_oom_notifier(struct notifier_block *nb); 78extern int register_oom_notifier(struct notifier_block *nb);
79extern int unregister_oom_notifier(struct notifier_block *nb); 79extern int unregister_oom_notifier(struct notifier_block *nb);
80 80
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 3a6490e81b28..703ea5c30a33 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -32,4 +32,9 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
32extern bool osq_lock(struct optimistic_spin_queue *lock); 32extern bool osq_lock(struct optimistic_spin_queue *lock);
33extern void osq_unlock(struct optimistic_spin_queue *lock); 33extern void osq_unlock(struct optimistic_spin_queue *lock);
34 34
35static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
36{
37 return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
38}
39
35#endif 40#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b3736f7065c..fb0814ca65c7 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -651,7 +651,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
651int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 651int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
652 pgoff_t index, gfp_t gfp_mask); 652 pgoff_t index, gfp_t gfp_mask);
653extern void delete_from_page_cache(struct page *page); 653extern void delete_from_page_cache(struct page *page);
654extern void __delete_from_page_cache(struct page *page, void *shadow); 654extern void __delete_from_page_cache(struct page *page, void *shadow,
655 struct mem_cgroup *memcg);
655int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); 656int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
656 657
657/* 658/*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 353db8dc4c6e..8a0321a8fb59 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -355,6 +355,7 @@ struct pci_dev {
355 unsigned int broken_intx_masking:1; 355 unsigned int broken_intx_masking:1;
356 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ 356 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
357 unsigned int irq_managed:1; 357 unsigned int irq_managed:1;
358 unsigned int has_secondary_link:1;
358 pci_dev_flags_t dev_flags; 359 pci_dev_flags_t dev_flags;
359 atomic_t enable_cnt; /* pci_enable_device has been called */ 360 atomic_t enable_cnt; /* pci_enable_device has been called */
360 361
@@ -577,9 +578,15 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
577int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, 578int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
578 int reg, int len, u32 val); 579 int reg, int len, u32 val);
579 580
581#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
582typedef u64 pci_bus_addr_t;
583#else
584typedef u32 pci_bus_addr_t;
585#endif
586
580struct pci_bus_region { 587struct pci_bus_region {
581 dma_addr_t start; 588 pci_bus_addr_t start;
582 dma_addr_t end; 589 pci_bus_addr_t end;
583}; 590};
584 591
585struct pci_dynids { 592struct pci_dynids {
@@ -773,8 +780,6 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
773void pcibios_scan_specific_bus(int busn); 780void pcibios_scan_specific_bus(int busn);
774struct pci_bus *pci_find_bus(int domain, int busnr); 781struct pci_bus *pci_find_bus(int domain, int busnr);
775void pci_bus_add_devices(const struct pci_bus *bus); 782void pci_bus_add_devices(const struct pci_bus *bus);
776struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
777 struct pci_ops *ops, void *sysdata);
778struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); 783struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
779struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 784struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
780 struct pci_ops *ops, void *sysdata, 785 struct pci_ops *ops, void *sysdata,
@@ -974,7 +979,6 @@ void pci_intx(struct pci_dev *dev, int enable);
974bool pci_intx_mask_supported(struct pci_dev *dev); 979bool pci_intx_mask_supported(struct pci_dev *dev);
975bool pci_check_and_mask_intx(struct pci_dev *dev); 980bool pci_check_and_mask_intx(struct pci_dev *dev);
976bool pci_check_and_unmask_intx(struct pci_dev *dev); 981bool pci_check_and_unmask_intx(struct pci_dev *dev);
977void pci_msi_off(struct pci_dev *dev);
978int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size); 982int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
979int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask); 983int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
980int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); 984int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
@@ -1006,6 +1010,7 @@ int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1006int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); 1010int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1007int pci_select_bars(struct pci_dev *dev, unsigned long flags); 1011int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1008bool pci_device_is_present(struct pci_dev *pdev); 1012bool pci_device_is_present(struct pci_dev *pdev);
1013void pci_ignore_hotplug(struct pci_dev *dev);
1009 1014
1010/* ROM control related routines */ 1015/* ROM control related routines */
1011int pci_enable_rom(struct pci_dev *pdev); 1016int pci_enable_rom(struct pci_dev *pdev);
@@ -1043,11 +1048,6 @@ bool pci_dev_run_wake(struct pci_dev *dev);
1043bool pci_check_pme_status(struct pci_dev *dev); 1048bool pci_check_pme_status(struct pci_dev *dev);
1044void pci_pme_wakeup_bus(struct pci_bus *bus); 1049void pci_pme_wakeup_bus(struct pci_bus *bus);
1045 1050
1046static inline void pci_ignore_hotplug(struct pci_dev *dev)
1047{
1048 dev->ignore_hotplug = 1;
1049}
1050
1051static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1051static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1052 bool enable) 1052 bool enable)
1053{ 1053{
@@ -1128,7 +1128,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1128 1128
1129int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); 1129int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1130 1130
1131static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) 1131static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1132{ 1132{
1133 struct pci_bus_region region; 1133 struct pci_bus_region region;
1134 1134
@@ -1197,15 +1197,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1197#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) 1197#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
1198#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) 1198#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1199 1199
1200enum pci_dma_burst_strategy {
1201 PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
1202 strategy_parameter is N/A */
1203 PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
1204 byte boundaries */
1205 PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
1206 strategy_parameter byte boundaries */
1207};
1208
1209struct msix_entry { 1200struct msix_entry {
1210 u32 vector; /* kernel uses to write allocated vector */ 1201 u32 vector; /* kernel uses to write allocated vector */
1211 u16 entry; /* driver uses to specify entry, OS writes */ 1202 u16 entry; /* driver uses to specify entry, OS writes */
@@ -1430,8 +1421,6 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
1430{ return -EIO; } 1421{ return -EIO; }
1431static inline void pci_release_regions(struct pci_dev *dev) { } 1422static inline void pci_release_regions(struct pci_dev *dev) { }
1432 1423
1433#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
1434
1435static inline void pci_block_cfg_access(struct pci_dev *dev) { } 1424static inline void pci_block_cfg_access(struct pci_dev *dev) { }
1436static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) 1425static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
1437{ return 0; } 1426{ return 0; }
@@ -1905,4 +1894,15 @@ static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
1905{ 1894{
1906 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED; 1895 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
1907} 1896}
1897
1898/**
1899 * pci_ari_enabled - query ARI forwarding status
1900 * @bus: the PCI bus
1901 *
1902 * Returns true if ARI forwarding is enabled.
1903 */
1904static inline bool pci_ari_enabled(struct pci_bus *bus)
1905{
1906 return bus->self && bus->self->ari_enabled;
1907}
1908#endif /* LINUX_PCI_H */ 1908#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2f7b9a40f627..fcff8f865341 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -579,6 +579,7 @@
579#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800 579#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800
580#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b 580#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b
581#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c 581#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c
582#define PCI_DEVICE_ID_AMD_KERNCZ_SMBUS 0x790b
582 583
583#define PCI_VENDOR_ID_TRIDENT 0x1023 584#define PCI_VENDOR_ID_TRIDENT 0x1023
584#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 585#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
@@ -2329,6 +2330,8 @@
2329#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea 2330#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea
2330#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb 2331#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb
2331 2332
2333#define PCI_VENDOR_ID_CAVIUM 0x177d
2334
2332#define PCI_VENDOR_ID_BELKIN 0x1799 2335#define PCI_VENDOR_ID_BELKIN 0x1799
2333#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f 2336#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
2334 2337
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 61992cf2e977..1b82d44b0a02 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -92,8 +92,6 @@ struct hw_perf_event_extra {
92 int idx; /* index in shared_regs->regs[] */ 92 int idx; /* index in shared_regs->regs[] */
93}; 93};
94 94
95struct event_constraint;
96
97/** 95/**
98 * struct hw_perf_event - performance event hardware details: 96 * struct hw_perf_event - performance event hardware details:
99 */ 97 */
@@ -112,8 +110,6 @@ struct hw_perf_event {
112 110
113 struct hw_perf_event_extra extra_reg; 111 struct hw_perf_event_extra extra_reg;
114 struct hw_perf_event_extra branch_reg; 112 struct hw_perf_event_extra branch_reg;
115
116 struct event_constraint *constraint;
117 }; 113 };
118 struct { /* software */ 114 struct { /* software */
119 struct hrtimer hrtimer; 115 struct hrtimer hrtimer;
@@ -124,7 +120,7 @@ struct hw_perf_event {
124 }; 120 };
125 struct { /* intel_cqm */ 121 struct { /* intel_cqm */
126 int cqm_state; 122 int cqm_state;
127 int cqm_rmid; 123 u32 cqm_rmid;
128 struct list_head cqm_events_entry; 124 struct list_head cqm_events_entry;
129 struct list_head cqm_groups_entry; 125 struct list_head cqm_groups_entry;
130 struct list_head cqm_group_entry; 126 struct list_head cqm_group_entry;
@@ -566,8 +562,12 @@ struct perf_cpu_context {
566 struct perf_event_context *task_ctx; 562 struct perf_event_context *task_ctx;
567 int active_oncpu; 563 int active_oncpu;
568 int exclusive; 564 int exclusive;
565
566 raw_spinlock_t hrtimer_lock;
569 struct hrtimer hrtimer; 567 struct hrtimer hrtimer;
570 ktime_t hrtimer_interval; 568 ktime_t hrtimer_interval;
569 unsigned int hrtimer_active;
570
571 struct pmu *unique_pmu; 571 struct pmu *unique_pmu;
572 struct perf_cgroup *cgrp; 572 struct perf_cgroup *cgrp;
573}; 573};
@@ -734,6 +734,22 @@ extern int perf_event_overflow(struct perf_event *event,
734 struct perf_sample_data *data, 734 struct perf_sample_data *data,
735 struct pt_regs *regs); 735 struct pt_regs *regs);
736 736
737extern void perf_event_output(struct perf_event *event,
738 struct perf_sample_data *data,
739 struct pt_regs *regs);
740
741extern void
742perf_event_header__init_id(struct perf_event_header *header,
743 struct perf_sample_data *data,
744 struct perf_event *event);
745extern void
746perf_event__output_id_sample(struct perf_event *event,
747 struct perf_output_handle *handle,
748 struct perf_sample_data *sample);
749
750extern void
751perf_log_lost_samples(struct perf_event *event, u64 lost);
752
737static inline bool is_sampling_event(struct perf_event *event) 753static inline bool is_sampling_event(struct perf_event *event)
738{ 754{
739 return event->attr.sample_period != 0; 755 return event->attr.sample_period != 0;
@@ -798,11 +814,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
798 814
799extern struct static_key_deferred perf_sched_events; 815extern struct static_key_deferred perf_sched_events;
800 816
817static __always_inline bool
818perf_sw_migrate_enabled(void)
819{
820 if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
821 return true;
822 return false;
823}
824
825static inline void perf_event_task_migrate(struct task_struct *task)
826{
827 if (perf_sw_migrate_enabled())
828 task->sched_migrated = 1;
829}
830
801static inline void perf_event_task_sched_in(struct task_struct *prev, 831static inline void perf_event_task_sched_in(struct task_struct *prev,
802 struct task_struct *task) 832 struct task_struct *task)
803{ 833{
804 if (static_key_false(&perf_sched_events.key)) 834 if (static_key_false(&perf_sched_events.key))
805 __perf_event_task_sched_in(prev, task); 835 __perf_event_task_sched_in(prev, task);
836
837 if (perf_sw_migrate_enabled() && task->sched_migrated) {
838 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
839
840 perf_fetch_caller_regs(regs);
841 ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
842 task->sched_migrated = 0;
843 }
806} 844}
807 845
808static inline void perf_event_task_sched_out(struct task_struct *prev, 846static inline void perf_event_task_sched_out(struct task_struct *prev,
@@ -925,6 +963,8 @@ perf_aux_output_skip(struct perf_output_handle *handle,
925static inline void * 963static inline void *
926perf_get_aux(struct perf_output_handle *handle) { return NULL; } 964perf_get_aux(struct perf_output_handle *handle) { return NULL; }
927static inline void 965static inline void
966perf_event_task_migrate(struct task_struct *task) { }
967static inline void
928perf_event_task_sched_in(struct task_struct *prev, 968perf_event_task_sched_in(struct task_struct *prev,
929 struct task_struct *task) { } 969 struct task_struct *task) { }
930static inline void 970static inline void
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 685809835b5c..a26c3f84b8dd 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -181,6 +181,9 @@ struct mii_bus {
181 /* PHY addresses to be ignored when probing */ 181 /* PHY addresses to be ignored when probing */
182 u32 phy_mask; 182 u32 phy_mask;
183 183
184 /* PHY addresses to ignore the TA/read failure */
185 u32 phy_ignore_ta_mask;
186
184 /* 187 /*
185 * Pointer to an array of interrupts, each PHY's 188 * Pointer to an array of interrupts, each PHY's
186 * interrupt at the index matching its address 189 * interrupt at the index matching its address
@@ -675,6 +678,17 @@ static inline bool phy_is_internal(struct phy_device *phydev)
675} 678}
676 679
677/** 680/**
681 * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
682 * is RGMII (all variants)
683 * @phydev: the phy_device struct
684 */
685static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
686{
687 return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
688 phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
689}
690
691/**
678 * phy_write_mmd - Convenience function for writing a register 692 * phy_write_mmd - Convenience function for writing a register
679 * on an MMD on a given PHY. 693 * on an MMD on a given PHY.
680 * @phydev: The phy_device struct 694 * @phydev: The phy_device struct
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 18eccefea06e..d7e5d608faa7 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -142,7 +142,7 @@ static inline struct pinctrl * __must_check pinctrl_get_select(
142 s = pinctrl_lookup_state(p, name); 142 s = pinctrl_lookup_state(p, name);
143 if (IS_ERR(s)) { 143 if (IS_ERR(s)) {
144 pinctrl_put(p); 144 pinctrl_put(p);
145 return ERR_PTR(PTR_ERR(s)); 145 return ERR_CAST(s);
146 } 146 }
147 147
148 ret = pinctrl_select_state(p, s); 148 ret = pinctrl_select_state(p, s);
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 66e4697516de..9ba59fcba549 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -127,7 +127,7 @@ struct pinctrl_ops {
127 */ 127 */
128struct pinctrl_desc { 128struct pinctrl_desc {
129 const char *name; 129 const char *name;
130 struct pinctrl_pin_desc const *pins; 130 const struct pinctrl_pin_desc *pins;
131 unsigned int npins; 131 unsigned int npins;
132 const struct pinctrl_ops *pctlops; 132 const struct pinctrl_ops *pctlops;
133 const struct pinmux_ops *pmxops; 133 const struct pinmux_ops *pmxops;
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 511bda9ed4bf..ace60d775b20 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -56,6 +56,9 @@ struct pinctrl_dev;
56 * depending on whether the GPIO is configured as input or output, 56 * depending on whether the GPIO is configured as input or output,
57 * a direction selector function may be implemented as a backing 57 * a direction selector function may be implemented as a backing
58 * to the GPIO controllers that need pin muxing. 58 * to the GPIO controllers that need pin muxing.
59 * @strict: do not allow simultaneous use of the same pin for GPIO and another
60 * function. Check both gpio_owner and mux_owner strictly before approving
61 * the pin request.
59 */ 62 */
60struct pinmux_ops { 63struct pinmux_ops {
61 int (*request) (struct pinctrl_dev *pctldev, unsigned offset); 64 int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
@@ -66,7 +69,7 @@ struct pinmux_ops {
66 int (*get_function_groups) (struct pinctrl_dev *pctldev, 69 int (*get_function_groups) (struct pinctrl_dev *pctldev,
67 unsigned selector, 70 unsigned selector,
68 const char * const **groups, 71 const char * const **groups,
69 unsigned * const num_groups); 72 unsigned *num_groups);
70 int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector, 73 int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
71 unsigned group_selector); 74 unsigned group_selector);
72 int (*gpio_request_enable) (struct pinctrl_dev *pctldev, 75 int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
@@ -79,6 +82,7 @@ struct pinmux_ops {
79 struct pinctrl_gpio_range *range, 82 struct pinctrl_gpio_range *range,
80 unsigned offset, 83 unsigned offset,
81 bool input); 84 bool input);
85 bool strict;
82}; 86};
83 87
84#endif /* CONFIG_PINMUX */ 88#endif /* CONFIG_PINMUX */
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 5d50b25a73d7..cb2618147c34 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -208,9 +208,17 @@ struct omap_gpio_platform_data {
208 int (*get_context_loss_count)(struct device *dev); 208 int (*get_context_loss_count)(struct device *dev);
209}; 209};
210 210
211#if IS_BUILTIN(CONFIG_GPIO_OMAP)
211extern void omap2_gpio_prepare_for_idle(int off_mode); 212extern void omap2_gpio_prepare_for_idle(int off_mode);
212extern void omap2_gpio_resume_after_idle(void); 213extern void omap2_gpio_resume_after_idle(void);
213extern void omap_set_gpio_debounce(int gpio, int enable); 214#else
214extern void omap_set_gpio_debounce_time(int gpio, int enable); 215static inline void omap2_gpio_prepare_for_idle(int off_mode)
216{
217}
218
219static inline void omap2_gpio_resume_after_idle(void)
220{
221}
222#endif
215 223
216#endif 224#endif
diff --git a/include/linux/platform_data/irq-renesas-irqc.h b/include/linux/platform_data/irq-renesas-irqc.h
deleted file mode 100644
index 3ae17b3e00ed..000000000000
--- a/include/linux/platform_data/irq-renesas-irqc.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Renesas IRQC Driver
3 *
4 * Copyright (C) 2013 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef __IRQ_RENESAS_IRQC_H__
21#define __IRQ_RENESAS_IRQC_H__
22
23struct renesas_irqc_config {
24 unsigned int irq_base;
25};
26
27#endif /* __IRQ_RENESAS_IRQC_H__ */
diff --git a/include/linux/platform_data/keyboard-spear.h b/include/linux/platform_data/keyboard-spear.h
index 9248e3a7e333..5e3ff653900c 100644
--- a/include/linux/platform_data/keyboard-spear.h
+++ b/include/linux/platform_data/keyboard-spear.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010 ST Microelectronics 2 * Copyright (C) 2010 ST Microelectronics
3 * Rajeev Kumar<rajeev-dlh.kumar@st.com> 3 * Rajeev Kumar <rajeevkumar.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h
new file mode 100644
index 000000000000..ac91707dabcb
--- /dev/null
+++ b/include/linux/platform_data/nfcmrvl.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) 2015, Marvell International Ltd.
3 *
4 * This software file (the "File") is distributed by Marvell International
5 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
6 * (the "License"). You may use, redistribute and/or modify this File in
7 * accordance with the terms and conditions of the License, a copy of which
8 * is available on the worldwide web at
9 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
10 *
11 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
12 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
13 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
14 * this warranty disclaimer.
15 */
16
17#ifndef _NFCMRVL_PTF_H_
18#define _NFCMRVL_PTF_H_
19
20struct nfcmrvl_platform_data {
21 /*
22 * Generic
23 */
24
25 /* GPIO that is wired to RESET_N signal */
26 unsigned int reset_n_io;
27 /* Tell if transport is muxed in HCI one */
28 unsigned int hci_muxed;
29
30 /*
31 * UART specific
32 */
33
34 /* Tell if UART needs flow control at init */
35 unsigned int flow_control;
36 /* Tell if firmware supports break control for power management */
37 unsigned int break_control;
38};
39
40#endif /* _NFCMRVL_PTF_H_ */
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index 0a6de4ca4930..aed170588b74 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -27,6 +27,7 @@ enum ntc_thermistor_type {
27 TYPE_NCPXXWB473, 27 TYPE_NCPXXWB473,
28 TYPE_NCPXXWL333, 28 TYPE_NCPXXWL333,
29 TYPE_B57330V2103, 29 TYPE_B57330V2103,
30 TYPE_NCPXXWF104,
30}; 31};
31 32
32struct ntc_thermistor_platform_data { 33struct ntc_thermistor_platform_data {
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st-nci.h
index b023373d9874..d9d400a297bd 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st-nci.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver include for the ST21NFCB NFC chip. 2 * Driver include for ST NCI NFC chip family.
3 * 3 *
4 * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. 4 * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -16,14 +16,14 @@
16 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#ifndef _ST21NFCB_NCI_H_ 19#ifndef _ST_NCI_H_
20#define _ST21NFCB_NCI_H_ 20#define _ST_NCI_H_
21 21
22#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" 22#define ST_NCI_DRIVER_NAME "st_nci"
23 23
24struct st21nfcb_nfc_platform_data { 24struct st_nci_nfc_platform_data {
25 unsigned int gpio_reset; 25 unsigned int gpio_reset;
26 unsigned int irq_polarity; 26 unsigned int irq_polarity;
27}; 27};
28 28
29#endif /* _ST21NFCB_NCI_H_ */ 29#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
new file mode 100644
index 000000000000..d9d400a297bd
--- /dev/null
+++ b/include/linux/platform_data/st_nci.h
@@ -0,0 +1,29 @@
1/*
2 * Driver include for ST NCI NFC chip family.
3 *
4 * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _ST_NCI_H_
20#define _ST_NCI_H_
21
22#define ST_NCI_DRIVER_NAME "st_nci"
23
24struct st_nci_nfc_platform_data {
25 unsigned int gpio_reset;
26 unsigned int irq_polarity;
27};
28
29#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/video-msm_fb.h b/include/linux/platform_data/video-msm_fb.h
deleted file mode 100644
index 31449be3eadb..000000000000
--- a/include/linux/platform_data/video-msm_fb.h
+++ /dev/null
@@ -1,146 +0,0 @@
1/*
2 * Internal shared definitions for various MSM framebuffer parts.
3 *
4 * Copyright (C) 2007 Google Incorporated
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef _MSM_FB_H_
17#define _MSM_FB_H_
18
19#include <linux/device.h>
20
21struct mddi_info;
22
23struct msm_fb_data {
24 int xres; /* x resolution in pixels */
25 int yres; /* y resolution in pixels */
26 int width; /* disply width in mm */
27 int height; /* display height in mm */
28 unsigned output_format;
29};
30
31struct msmfb_callback {
32 void (*func)(struct msmfb_callback *);
33};
34
35enum {
36 MSM_MDDI_PMDH_INTERFACE,
37 MSM_MDDI_EMDH_INTERFACE,
38 MSM_EBI2_INTERFACE,
39};
40
41#define MSMFB_CAP_PARTIAL_UPDATES (1 << 0)
42
43struct msm_panel_data {
44 /* turns off the fb memory */
45 int (*suspend)(struct msm_panel_data *);
46 /* turns on the fb memory */
47 int (*resume)(struct msm_panel_data *);
48 /* turns off the panel */
49 int (*blank)(struct msm_panel_data *);
50 /* turns on the panel */
51 int (*unblank)(struct msm_panel_data *);
52 void (*wait_vsync)(struct msm_panel_data *);
53 void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *);
54 void (*clear_vsync)(struct msm_panel_data *);
55 /* from the enum above */
56 unsigned interface_type;
57 /* data to be passed to the fb driver */
58 struct msm_fb_data *fb_data;
59
60 /* capabilities supported by the panel */
61 uint32_t caps;
62};
63
64struct msm_mddi_client_data {
65 void (*suspend)(struct msm_mddi_client_data *);
66 void (*resume)(struct msm_mddi_client_data *);
67 void (*activate_link)(struct msm_mddi_client_data *);
68 void (*remote_write)(struct msm_mddi_client_data *, uint32_t val,
69 uint32_t reg);
70 uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
71 void (*auto_hibernate)(struct msm_mddi_client_data *, int);
72 /* custom data that needs to be passed from the board file to a
73 * particular client */
74 void *private_client_data;
75 struct resource *fb_resource;
76 /* from the list above */
77 unsigned interface_type;
78};
79
80struct msm_mddi_platform_data {
81 unsigned int clk_rate;
82 void (*power_client)(struct msm_mddi_client_data *, int on);
83
84 /* fixup the mfr name, product id */
85 void (*fixup)(uint16_t *mfr_name, uint16_t *product_id);
86
87 struct resource *fb_resource; /*optional*/
88 /* number of clients in the list that follows */
89 int num_clients;
90 /* array of client information of clients */
91 struct {
92 unsigned product_id; /* mfr id in top 16 bits, product id
93 * in lower 16 bits
94 */
95 char *name; /* the device name will be the platform
96 * device name registered for the client,
97 * it should match the name of the associated
98 * driver
99 */
100 unsigned id; /* id for mddi client device node, will also
101 * be used as device id of panel devices, if
102 * the client device will have multiple panels
103 * space must be left here for them
104 */
105 void *client_data; /* required private client data */
106 unsigned int clk_rate; /* optional: if the client requires a
107 * different mddi clk rate
108 */
109 } client_platform_data[];
110};
111
112struct mdp_blit_req;
113struct fb_info;
114struct mdp_device {
115 struct device dev;
116 void (*dma)(struct mdp_device *mpd, uint32_t addr,
117 uint32_t stride, uint32_t w, uint32_t h, uint32_t x,
118 uint32_t y, struct msmfb_callback *callback, int interface);
119 void (*dma_wait)(struct mdp_device *mdp);
120 int (*blit)(struct mdp_device *mdp, struct fb_info *fb,
121 struct mdp_blit_req *req);
122 void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
123};
124
125struct class_interface;
126int register_mdp_client(struct class_interface *class_intf);
127
128/**** private client data structs go below this line ***/
129
130struct msm_mddi_bridge_platform_data {
131 /* from board file */
132 int (*init)(struct msm_mddi_bridge_platform_data *,
133 struct msm_mddi_client_data *);
134 int (*uninit)(struct msm_mddi_bridge_platform_data *,
135 struct msm_mddi_client_data *);
136 /* passed to panel for use by the fb driver */
137 int (*blank)(struct msm_mddi_bridge_platform_data *,
138 struct msm_mddi_client_data *);
139 int (*unblank)(struct msm_mddi_bridge_platform_data *,
140 struct msm_mddi_client_data *);
141 struct msm_fb_data fb_data;
142};
143
144
145
146#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 2d29c64f8fb1..35d599e7250d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -342,6 +342,18 @@ struct dev_pm_ops {
342#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 342#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
343#endif 343#endif
344 344
345#ifdef CONFIG_PM_SLEEP
346#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
347 .suspend_noirq = suspend_fn, \
348 .resume_noirq = resume_fn, \
349 .freeze_noirq = suspend_fn, \
350 .thaw_noirq = resume_fn, \
351 .poweroff_noirq = suspend_fn, \
352 .restore_noirq = resume_fn,
353#else
354#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
355#endif
356
345#ifdef CONFIG_PM 357#ifdef CONFIG_PM
346#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 358#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
347 .runtime_suspend = suspend_fn, \ 359 .runtime_suspend = suspend_fn, \
@@ -529,6 +541,7 @@ enum rpm_request {
529}; 541};
530 542
531struct wakeup_source; 543struct wakeup_source;
544struct wake_irq;
532struct pm_domain_data; 545struct pm_domain_data;
533 546
534struct pm_subsys_data { 547struct pm_subsys_data {
@@ -568,6 +581,7 @@ struct dev_pm_info {
568 unsigned long timer_expires; 581 unsigned long timer_expires;
569 struct work_struct work; 582 struct work_struct work;
570 wait_queue_head_t wait_queue; 583 wait_queue_head_t wait_queue;
584 struct wake_irq *wakeirq;
571 atomic_t usage_count; 585 atomic_t usage_count;
572 atomic_t child_count; 586 atomic_t child_count;
573 unsigned int disable_depth:3; 587 unsigned int disable_depth:3;
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 0b0039634410..25266c600021 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -20,6 +20,16 @@ struct pm_clk_notifier_block {
20 20
21struct clk; 21struct clk;
22 22
23#ifdef CONFIG_PM
24extern int pm_clk_runtime_suspend(struct device *dev);
25extern int pm_clk_runtime_resume(struct device *dev);
26#define USE_PM_CLK_RUNTIME_OPS \
27 .runtime_suspend = pm_clk_runtime_suspend, \
28 .runtime_resume = pm_clk_runtime_resume,
29#else
30#define USE_PM_CLK_RUNTIME_OPS
31#endif
32
23#ifdef CONFIG_PM_CLK 33#ifdef CONFIG_PM_CLK
24static inline bool pm_clk_no_clocks(struct device *dev) 34static inline bool pm_clk_no_clocks(struct device *dev)
25{ 35{
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
new file mode 100644
index 000000000000..cd5b62db9084
--- /dev/null
+++ b/include/linux/pm_wakeirq.h
@@ -0,0 +1,51 @@
1/*
2 * pm_wakeirq.h - Device wakeirq helper functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _LINUX_PM_WAKEIRQ_H
15#define _LINUX_PM_WAKEIRQ_H
16
17#ifdef CONFIG_PM
18
19extern int dev_pm_set_wake_irq(struct device *dev, int irq);
20extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
21 int irq);
22extern void dev_pm_clear_wake_irq(struct device *dev);
23extern void dev_pm_enable_wake_irq(struct device *dev);
24extern void dev_pm_disable_wake_irq(struct device *dev);
25
26#else /* !CONFIG_PM */
27
28static inline int dev_pm_set_wake_irq(struct device *dev, int irq)
29{
30 return 0;
31}
32
33static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
34{
35 return 0;
36}
37
38static inline void dev_pm_clear_wake_irq(struct device *dev)
39{
40}
41
42static inline void dev_pm_enable_wake_irq(struct device *dev)
43{
44}
45
46static inline void dev_pm_disable_wake_irq(struct device *dev)
47{
48}
49
50#endif /* CONFIG_PM */
51#endif /* _LINUX_PM_WAKEIRQ_H */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index a0f70808d7f4..a3447932df1f 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -28,9 +28,17 @@
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30 30
31struct wake_irq;
32
31/** 33/**
32 * struct wakeup_source - Representation of wakeup sources 34 * struct wakeup_source - Representation of wakeup sources
33 * 35 *
36 * @name: Name of the wakeup source
37 * @entry: Wakeup source list entry
38 * @lock: Wakeup source lock
39 * @wakeirq: Optional device specific wakeirq
40 * @timer: Wakeup timer list
41 * @timer_expires: Wakeup timer expiration
34 * @total_time: Total time this wakeup source has been active. 42 * @total_time: Total time this wakeup source has been active.
35 * @max_time: Maximum time this wakeup source has been continuously active. 43 * @max_time: Maximum time this wakeup source has been continuously active.
36 * @last_time: Monotonic clock when the wakeup source's was touched last time. 44 * @last_time: Monotonic clock when the wakeup source's was touched last time.
@@ -47,6 +55,7 @@ struct wakeup_source {
47 const char *name; 55 const char *name;
48 struct list_head entry; 56 struct list_head entry;
49 spinlock_t lock; 57 spinlock_t lock;
58 struct wake_irq *wakeirq;
50 struct timer_list timer; 59 struct timer_list timer;
51 unsigned long timer_expires; 60 unsigned long timer_expires;
52 ktime_t total_time; 61 ktime_t total_time;
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index cf112b4075c8..522757ac9cd4 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -215,6 +215,10 @@ struct max17042_platform_data {
215 * the datasheet although it can be changed by board designers. 215 * the datasheet although it can be changed by board designers.
216 */ 216 */
217 unsigned int r_sns; 217 unsigned int r_sns;
218 int vmin; /* in millivolts */
219 int vmax; /* in millivolts */
220 int temp_min; /* in tenths of degree Celsius */
221 int temp_max; /* in tenths of degree Celsius */
218}; 222};
219 223
220#endif /* __MAX17042_BATTERY_H_ */ 224#endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 75a1dd8dc56e..ef9f1592185d 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -206,6 +206,11 @@ struct power_supply_desc {
206 int (*set_property)(struct power_supply *psy, 206 int (*set_property)(struct power_supply *psy,
207 enum power_supply_property psp, 207 enum power_supply_property psp,
208 const union power_supply_propval *val); 208 const union power_supply_propval *val);
209 /*
210 * property_is_writeable() will be called during registration
211 * of power supply. If this happens during device probe then it must
212 * not access internal data of device (because probe did not end).
213 */
209 int (*property_is_writeable)(struct power_supply *psy, 214 int (*property_is_writeable)(struct power_supply *psy,
210 enum power_supply_property psp); 215 enum power_supply_property psp);
211 void (*external_power_changed)(struct power_supply *psy); 216 void (*external_power_changed)(struct power_supply *psy);
@@ -237,6 +242,7 @@ struct power_supply {
237 /* private */ 242 /* private */
238 struct device dev; 243 struct device dev;
239 struct work_struct changed_work; 244 struct work_struct changed_work;
245 struct delayed_work deferred_register_work;
240 spinlock_t changed_lock; 246 spinlock_t changed_lock;
241 bool changed; 247 bool changed;
242 atomic_t use_cnt; 248 atomic_t use_cnt;
@@ -286,10 +292,15 @@ extern void power_supply_put(struct power_supply *psy);
286#ifdef CONFIG_OF 292#ifdef CONFIG_OF
287extern struct power_supply *power_supply_get_by_phandle(struct device_node *np, 293extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
288 const char *property); 294 const char *property);
295extern struct power_supply *devm_power_supply_get_by_phandle(
296 struct device *dev, const char *property);
289#else /* !CONFIG_OF */ 297#else /* !CONFIG_OF */
290static inline struct power_supply * 298static inline struct power_supply *
291power_supply_get_by_phandle(struct device_node *np, const char *property) 299power_supply_get_by_phandle(struct device_node *np, const char *property)
292{ return NULL; } 300{ return NULL; }
301static inline struct power_supply *
302devm_power_supply_get_by_phandle(struct device *dev, const char *property)
303{ return NULL; }
293#endif /* CONFIG_OF */ 304#endif /* CONFIG_OF */
294extern void power_supply_changed(struct power_supply *psy); 305extern void power_supply_changed(struct power_supply *psy);
295extern int power_supply_am_i_supplied(struct power_supply *psy); 306extern int power_supply_am_i_supplied(struct power_supply *psy);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index de83b4eb1642..0f1534acaf60 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -10,13 +10,117 @@
10#include <linux/list.h> 10#include <linux/list.h>
11 11
12/* 12/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for 13 * We put the hardirq and softirq counter into the preemption
14 * the other bits -- can't include that header due to inclusion hell. 14 * counter. The bitmask has the following meaning:
15 *
16 * - bits 0-7 are the preemption count (max preemption depth: 256)
17 * - bits 8-15 are the softirq count (max # of softirqs: 256)
18 *
19 * The hardirq count could in theory be the same as the number of
20 * interrupts in the system, but we run all interrupt handlers with
21 * interrupts disabled, so we cannot have nesting interrupts. Though
22 * there are a few palaeontologic drivers which reenable interrupts in
23 * the handler, so we need more than one bit here.
24 *
25 * PREEMPT_MASK: 0x000000ff
26 * SOFTIRQ_MASK: 0x0000ff00
27 * HARDIRQ_MASK: 0x000f0000
28 * NMI_MASK: 0x00100000
29 * PREEMPT_ACTIVE: 0x00200000
30 * PREEMPT_NEED_RESCHED: 0x80000000
15 */ 31 */
32#define PREEMPT_BITS 8
33#define SOFTIRQ_BITS 8
34#define HARDIRQ_BITS 4
35#define NMI_BITS 1
36
37#define PREEMPT_SHIFT 0
38#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
39#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
40#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
41
42#define __IRQ_MASK(x) ((1UL << (x))-1)
43
44#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
45#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
46#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
47#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
48
49#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
50#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
51#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
52#define NMI_OFFSET (1UL << NMI_SHIFT)
53
54#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
55
56#define PREEMPT_ACTIVE_BITS 1
57#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
58#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
59
60/* We use the MSB mostly because its available */
16#define PREEMPT_NEED_RESCHED 0x80000000 61#define PREEMPT_NEED_RESCHED 0x80000000
17 62
63/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
18#include <asm/preempt.h> 64#include <asm/preempt.h>
19 65
66#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
67#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
68#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
69 | NMI_MASK))
70
71/*
72 * Are we doing bottom half or hardware interrupt processing?
73 * Are we in a softirq context? Interrupt context?
74 * in_softirq - Are we currently processing softirq or have bh disabled?
75 * in_serving_softirq - Are we currently processing softirq?
76 */
77#define in_irq() (hardirq_count())
78#define in_softirq() (softirq_count())
79#define in_interrupt() (irq_count())
80#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi() (preempt_count() & NMI_MASK)
86
87#if defined(CONFIG_PREEMPT_COUNT)
88# define PREEMPT_DISABLE_OFFSET 1
89#else
90# define PREEMPT_DISABLE_OFFSET 0
91#endif
92
93/*
94 * The preempt_count offset needed for things like:
95 *
96 * spin_lock_bh()
97 *
98 * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
99 * softirqs, such that unlock sequences of:
100 *
101 * spin_unlock();
102 * local_bh_enable();
103 *
104 * Work as expected.
105 */
106#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
107
108/*
109 * Are we running in atomic context? WARNING: this macro cannot
110 * always detect atomic context; in particular, it cannot know about
111 * held spinlocks in non-preemptible kernels. Thus it should not be
112 * used in the general case to determine whether sleeping is possible.
113 * Do not use in_atomic() in driver code.
114 */
115#define in_atomic() (preempt_count() != 0)
116
117/*
118 * Check whether we were atomic before we did preempt_disable():
119 * (used by the scheduler)
120 */
121#define in_atomic_preempt_off() \
122 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
123
20#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 124#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
21extern void preempt_count_add(int val); 125extern void preempt_count_add(int val);
22extern void preempt_count_sub(int val); 126extern void preempt_count_sub(int val);
@@ -33,6 +137,18 @@ extern void preempt_count_sub(int val);
33#define preempt_count_inc() preempt_count_add(1) 137#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1) 138#define preempt_count_dec() preempt_count_sub(1)
35 139
140#define preempt_active_enter() \
141do { \
142 preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
143 barrier(); \
144} while (0)
145
146#define preempt_active_exit() \
147do { \
148 barrier(); \
149 preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
150} while (0)
151
36#ifdef CONFIG_PREEMPT_COUNT 152#ifdef CONFIG_PREEMPT_COUNT
37 153
38#define preempt_disable() \ 154#define preempt_disable() \
@@ -49,6 +165,8 @@ do { \
49 165
50#define preempt_enable_no_resched() sched_preempt_enable_no_resched() 166#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
51 167
168#define preemptible() (preempt_count() == 0 && !irqs_disabled())
169
52#ifdef CONFIG_PREEMPT 170#ifdef CONFIG_PREEMPT
53#define preempt_enable() \ 171#define preempt_enable() \
54do { \ 172do { \
@@ -57,52 +175,46 @@ do { \
57 __preempt_schedule(); \ 175 __preempt_schedule(); \
58} while (0) 176} while (0)
59 177
178#define preempt_enable_notrace() \
179do { \
180 barrier(); \
181 if (unlikely(__preempt_count_dec_and_test())) \
182 __preempt_schedule_notrace(); \
183} while (0)
184
60#define preempt_check_resched() \ 185#define preempt_check_resched() \
61do { \ 186do { \
62 if (should_resched()) \ 187 if (should_resched()) \
63 __preempt_schedule(); \ 188 __preempt_schedule(); \
64} while (0) 189} while (0)
65 190
66#else 191#else /* !CONFIG_PREEMPT */
67#define preempt_enable() \ 192#define preempt_enable() \
68do { \ 193do { \
69 barrier(); \ 194 barrier(); \
70 preempt_count_dec(); \ 195 preempt_count_dec(); \
71} while (0) 196} while (0)
72#define preempt_check_resched() do { } while (0)
73#endif
74
75#define preempt_disable_notrace() \
76do { \
77 __preempt_count_inc(); \
78 barrier(); \
79} while (0)
80 197
81#define preempt_enable_no_resched_notrace() \ 198#define preempt_enable_notrace() \
82do { \ 199do { \
83 barrier(); \ 200 barrier(); \
84 __preempt_count_dec(); \ 201 __preempt_count_dec(); \
85} while (0) 202} while (0)
86 203
87#ifdef CONFIG_PREEMPT 204#define preempt_check_resched() do { } while (0)
88 205#endif /* CONFIG_PREEMPT */
89#ifndef CONFIG_CONTEXT_TRACKING
90#define __preempt_schedule_context() __preempt_schedule()
91#endif
92 206
93#define preempt_enable_notrace() \ 207#define preempt_disable_notrace() \
94do { \ 208do { \
209 __preempt_count_inc(); \
95 barrier(); \ 210 barrier(); \
96 if (unlikely(__preempt_count_dec_and_test())) \
97 __preempt_schedule_context(); \
98} while (0) 211} while (0)
99#else 212
100#define preempt_enable_notrace() \ 213#define preempt_enable_no_resched_notrace() \
101do { \ 214do { \
102 barrier(); \ 215 barrier(); \
103 __preempt_count_dec(); \ 216 __preempt_count_dec(); \
104} while (0) 217} while (0)
105#endif
106 218
107#else /* !CONFIG_PREEMPT_COUNT */ 219#else /* !CONFIG_PREEMPT_COUNT */
108 220
@@ -121,6 +233,7 @@ do { \
121#define preempt_disable_notrace() barrier() 233#define preempt_disable_notrace() barrier()
122#define preempt_enable_no_resched_notrace() barrier() 234#define preempt_enable_no_resched_notrace() barrier()
123#define preempt_enable_notrace() barrier() 235#define preempt_enable_notrace() barrier()
236#define preemptible() 0
124 237
125#endif /* CONFIG_PREEMPT_COUNT */ 238#endif /* CONFIG_PREEMPT_COUNT */
126 239
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
deleted file mode 100644
index dbeec4d4a3be..000000000000
--- a/include/linux/preempt_mask.h
+++ /dev/null
@@ -1,117 +0,0 @@
1#ifndef LINUX_PREEMPT_MASK_H
2#define LINUX_PREEMPT_MASK_H
3
4#include <linux/preempt.h>
5
6/*
7 * We put the hardirq and softirq counter into the preemption
8 * counter. The bitmask has the following meaning:
9 *
10 * - bits 0-7 are the preemption count (max preemption depth: 256)
11 * - bits 8-15 are the softirq count (max # of softirqs: 256)
12 *
13 * The hardirq count could in theory be the same as the number of
14 * interrupts in the system, but we run all interrupt handlers with
15 * interrupts disabled, so we cannot have nesting interrupts. Though
16 * there are a few palaeontologic drivers which reenable interrupts in
17 * the handler, so we need more than one bit here.
18 *
19 * PREEMPT_MASK: 0x000000ff
20 * SOFTIRQ_MASK: 0x0000ff00
21 * HARDIRQ_MASK: 0x000f0000
22 * NMI_MASK: 0x00100000
23 * PREEMPT_ACTIVE: 0x00200000
24 */
25#define PREEMPT_BITS 8
26#define SOFTIRQ_BITS 8
27#define HARDIRQ_BITS 4
28#define NMI_BITS 1
29
30#define PREEMPT_SHIFT 0
31#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
32#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
33#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
34
35#define __IRQ_MASK(x) ((1UL << (x))-1)
36
37#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
38#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
39#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
40#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
41
42#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
43#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
44#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
45#define NMI_OFFSET (1UL << NMI_SHIFT)
46
47#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
48
49#define PREEMPT_ACTIVE_BITS 1
50#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
51#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
52
53#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
54#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
55#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
56 | NMI_MASK))
57
58/*
59 * Are we doing bottom half or hardware interrupt processing?
60 * Are we in a softirq context? Interrupt context?
61 * in_softirq - Are we currently processing softirq or have bh disabled?
62 * in_serving_softirq - Are we currently processing softirq?
63 */
64#define in_irq() (hardirq_count())
65#define in_softirq() (softirq_count())
66#define in_interrupt() (irq_count())
67#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
68
69/*
70 * Are we in NMI context?
71 */
72#define in_nmi() (preempt_count() & NMI_MASK)
73
74#if defined(CONFIG_PREEMPT_COUNT)
75# define PREEMPT_CHECK_OFFSET 1
76#else
77# define PREEMPT_CHECK_OFFSET 0
78#endif
79
80/*
81 * The preempt_count offset needed for things like:
82 *
83 * spin_lock_bh()
84 *
85 * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
86 * softirqs, such that unlock sequences of:
87 *
88 * spin_unlock();
89 * local_bh_enable();
90 *
91 * Work as expected.
92 */
93#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
94
95/*
96 * Are we running in atomic context? WARNING: this macro cannot
97 * always detect atomic context; in particular, it cannot know about
98 * held spinlocks in non-preemptible kernels. Thus it should not be
99 * used in the general case to determine whether sleeping is possible.
100 * Do not use in_atomic() in driver code.
101 */
102#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
103
104/*
105 * Check whether we were atomic before we did preempt_disable():
106 * (used by the scheduler, *after* releasing the kernel lock)
107 */
108#define in_atomic_preempt_off() \
109 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
110
111#ifdef CONFIG_PREEMPT_COUNT
112# define preemptible() (preempt_count() == 0 && !irqs_disabled())
113#else
114# define preemptible() 0
115#endif
116
117#endif /* LINUX_PREEMPT_MASK_H */
diff --git a/include/linux/property.h b/include/linux/property.h
index de8bdf417a35..76ebde9c11d4 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -164,4 +164,6 @@ struct property_set {
164 164
165void device_add_property_set(struct device *dev, struct property_set *pset); 165void device_add_property_set(struct device *dev, struct property_set *pset);
166 166
167bool device_dma_is_coherent(struct device *dev);
168
167#endif /* _LINUX_PROPERTY_H_ */ 169#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index e90628cac8fa..36262d08a9da 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -182,6 +182,8 @@ struct pwm_chip {
182int pwm_set_chip_data(struct pwm_device *pwm, void *data); 182int pwm_set_chip_data(struct pwm_device *pwm, void *data);
183void *pwm_get_chip_data(struct pwm_device *pwm); 183void *pwm_get_chip_data(struct pwm_device *pwm);
184 184
185int pwmchip_add_with_polarity(struct pwm_chip *chip,
186 enum pwm_polarity polarity);
185int pwmchip_add(struct pwm_chip *chip); 187int pwmchip_add(struct pwm_chip *chip);
186int pwmchip_remove(struct pwm_chip *chip); 188int pwmchip_remove(struct pwm_chip *chip);
187struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, 189struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
@@ -217,6 +219,11 @@ static inline int pwmchip_add(struct pwm_chip *chip)
217 return -EINVAL; 219 return -EINVAL;
218} 220}
219 221
222static inline int pwmchip_add_inversed(struct pwm_chip *chip)
223{
224 return -EINVAL;
225}
226
220static inline int pwmchip_remove(struct pwm_chip *chip) 227static inline int pwmchip_remove(struct pwm_chip *chip)
221{ 228{
222 return -EINVAL; 229 return -EINVAL;
@@ -290,10 +297,15 @@ struct pwm_lookup {
290 297
291#if IS_ENABLED(CONFIG_PWM) 298#if IS_ENABLED(CONFIG_PWM)
292void pwm_add_table(struct pwm_lookup *table, size_t num); 299void pwm_add_table(struct pwm_lookup *table, size_t num);
300void pwm_remove_table(struct pwm_lookup *table, size_t num);
293#else 301#else
294static inline void pwm_add_table(struct pwm_lookup *table, size_t num) 302static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
295{ 303{
296} 304}
305
306static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
307{
308}
297#endif 309#endif
298 310
299#ifdef CONFIG_PWM_SYSFS 311#ifdef CONFIG_PWM_SYSFS
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index dab545bb66b3..0485bab061fd 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -194,8 +194,9 @@ enum pxa_ssp_type {
194 PXA168_SSP, 194 PXA168_SSP,
195 PXA910_SSP, 195 PXA910_SSP,
196 CE4100_SSP, 196 CE4100_SSP,
197 LPSS_SSP,
198 QUARK_X1000_SSP, 197 QUARK_X1000_SSP,
198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
199 LPSS_BYT_SSP,
199}; 200};
200 201
201struct ssp_device { 202struct ssp_device {
diff --git a/include/linux/random.h b/include/linux/random.h
index b05856e16b75..e651874df2c9 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -6,14 +6,23 @@
6#ifndef _LINUX_RANDOM_H 6#ifndef _LINUX_RANDOM_H
7#define _LINUX_RANDOM_H 7#define _LINUX_RANDOM_H
8 8
9#include <linux/list.h>
9#include <uapi/linux/random.h> 10#include <uapi/linux/random.h>
10 11
12struct random_ready_callback {
13 struct list_head list;
14 void (*func)(struct random_ready_callback *rdy);
15 struct module *owner;
16};
17
11extern void add_device_randomness(const void *, unsigned int); 18extern void add_device_randomness(const void *, unsigned int);
12extern void add_input_randomness(unsigned int type, unsigned int code, 19extern void add_input_randomness(unsigned int type, unsigned int code,
13 unsigned int value); 20 unsigned int value);
14extern void add_interrupt_randomness(int irq, int irq_flags); 21extern void add_interrupt_randomness(int irq, int irq_flags);
15 22
16extern void get_random_bytes(void *buf, int nbytes); 23extern void get_random_bytes(void *buf, int nbytes);
24extern int add_random_ready_callback(struct random_ready_callback *rdy);
25extern void del_random_ready_callback(struct random_ready_callback *rdy);
17extern void get_random_bytes_arch(void *buf, int nbytes); 26extern void get_random_bytes_arch(void *buf, int nbytes);
18void generate_random_uuid(unsigned char uuid_out[16]); 27void generate_random_uuid(unsigned char uuid_out[16]);
19extern int random_int_secret_init(void); 28extern int random_int_secret_init(void);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index a18b16f1dc0e..17c6b1f84a77 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -29,8 +29,8 @@
29 */ 29 */
30static inline void INIT_LIST_HEAD_RCU(struct list_head *list) 30static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
31{ 31{
32 ACCESS_ONCE(list->next) = list; 32 WRITE_ONCE(list->next, list);
33 ACCESS_ONCE(list->prev) = list; 33 WRITE_ONCE(list->prev, list);
34} 34}
35 35
36/* 36/*
@@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
288#define list_first_or_null_rcu(ptr, type, member) \ 288#define list_first_or_null_rcu(ptr, type, member) \
289({ \ 289({ \
290 struct list_head *__ptr = (ptr); \ 290 struct list_head *__ptr = (ptr); \
291 struct list_head *__next = ACCESS_ONCE(__ptr->next); \ 291 struct list_head *__next = READ_ONCE(__ptr->next); \
292 likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ 292 likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
293}) 293})
294 294
@@ -549,8 +549,8 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
549 */ 549 */
550#define hlist_for_each_entry_from_rcu(pos, member) \ 550#define hlist_for_each_entry_from_rcu(pos, member) \
551 for (; pos; \ 551 for (; pos; \
552 pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ 552 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
553 typeof(*(pos)), member)) 553 &(pos)->member)), typeof(*(pos)), member))
554 554
555#endif /* __KERNEL__ */ 555#endif /* __KERNEL__ */
556#endif 556#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 573a5afd5ed8..33a056bb886f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,6 +44,8 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/ktime.h>
48
47#include <asm/barrier.h> 49#include <asm/barrier.h>
48 50
49extern int rcu_expedited; /* for sysctl */ 51extern int rcu_expedited; /* for sysctl */
@@ -292,10 +294,6 @@ void rcu_sched_qs(void);
292void rcu_bh_qs(void); 294void rcu_bh_qs(void);
293void rcu_check_callbacks(int user); 295void rcu_check_callbacks(int user);
294struct notifier_block; 296struct notifier_block;
295void rcu_idle_enter(void);
296void rcu_idle_exit(void);
297void rcu_irq_enter(void);
298void rcu_irq_exit(void);
299int rcu_cpu_notify(struct notifier_block *self, 297int rcu_cpu_notify(struct notifier_block *self,
300 unsigned long action, void *hcpu); 298 unsigned long action, void *hcpu);
301 299
@@ -364,8 +362,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
364#define rcu_note_voluntary_context_switch(t) \ 362#define rcu_note_voluntary_context_switch(t) \
365 do { \ 363 do { \
366 rcu_all_qs(); \ 364 rcu_all_qs(); \
367 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ 365 if (READ_ONCE((t)->rcu_tasks_holdout)) \
368 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ 366 WRITE_ONCE((t)->rcu_tasks_holdout, false); \
369 } while (0) 367 } while (0)
370#else /* #ifdef CONFIG_TASKS_RCU */ 368#else /* #ifdef CONFIG_TASKS_RCU */
371#define TASKS_RCU(x) do { } while (0) 369#define TASKS_RCU(x) do { } while (0)
@@ -609,7 +607,7 @@ static inline void rcu_preempt_sleep_check(void)
609 607
610#define __rcu_access_pointer(p, space) \ 608#define __rcu_access_pointer(p, space) \
611({ \ 609({ \
612 typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ 610 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
613 rcu_dereference_sparse(p, space); \ 611 rcu_dereference_sparse(p, space); \
614 ((typeof(*p) __force __kernel *)(_________p1)); \ 612 ((typeof(*p) __force __kernel *)(_________p1)); \
615}) 613})
@@ -628,21 +626,6 @@ static inline void rcu_preempt_sleep_check(void)
628 ((typeof(*p) __force __kernel *)(p)); \ 626 ((typeof(*p) __force __kernel *)(p)); \
629}) 627})
630 628
631#define __rcu_access_index(p, space) \
632({ \
633 typeof(p) _________p1 = ACCESS_ONCE(p); \
634 rcu_dereference_sparse(p, space); \
635 (_________p1); \
636})
637#define __rcu_dereference_index_check(p, c) \
638({ \
639 /* Dependency order vs. p above. */ \
640 typeof(p) _________p1 = lockless_dereference(p); \
641 rcu_lockdep_assert(c, \
642 "suspicious rcu_dereference_index_check() usage"); \
643 (_________p1); \
644})
645
646/** 629/**
647 * RCU_INITIALIZER() - statically initialize an RCU-protected global variable 630 * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
648 * @v: The value to statically initialize with. 631 * @v: The value to statically initialize with.
@@ -659,7 +642,7 @@ static inline void rcu_preempt_sleep_check(void)
659 */ 642 */
660#define lockless_dereference(p) \ 643#define lockless_dereference(p) \
661({ \ 644({ \
662 typeof(p) _________p1 = ACCESS_ONCE(p); \ 645 typeof(p) _________p1 = READ_ONCE(p); \
663 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 646 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
664 (_________p1); \ 647 (_________p1); \
665}) 648})
@@ -702,7 +685,7 @@ static inline void rcu_preempt_sleep_check(void)
702 * @p: The pointer to read 685 * @p: The pointer to read
703 * 686 *
704 * Return the value of the specified RCU-protected pointer, but omit the 687 * Return the value of the specified RCU-protected pointer, but omit the
705 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful 688 * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
706 * when the value of this pointer is accessed, but the pointer is not 689 * when the value of this pointer is accessed, but the pointer is not
707 * dereferenced, for example, when testing an RCU-protected pointer against 690 * dereferenced, for example, when testing an RCU-protected pointer against
708 * NULL. Although rcu_access_pointer() may also be used in cases where 691 * NULL. Although rcu_access_pointer() may also be used in cases where
@@ -787,47 +770,12 @@ static inline void rcu_preempt_sleep_check(void)
787#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) 770#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
788 771
789/** 772/**
790 * rcu_access_index() - fetch RCU index with no dereferencing
791 * @p: The index to read
792 *
793 * Return the value of the specified RCU-protected index, but omit the
794 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
795 * when the value of this index is accessed, but the index is not
796 * dereferenced, for example, when testing an RCU-protected index against
797 * -1. Although rcu_access_index() may also be used in cases where
798 * update-side locks prevent the value of the index from changing, you
799 * should instead use rcu_dereference_index_protected() for this use case.
800 */
801#define rcu_access_index(p) __rcu_access_index((p), __rcu)
802
803/**
804 * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
805 * @p: The pointer to read, prior to dereferencing
806 * @c: The conditions under which the dereference will take place
807 *
808 * Similar to rcu_dereference_check(), but omits the sparse checking.
809 * This allows rcu_dereference_index_check() to be used on integers,
810 * which can then be used as array indices. Attempting to use
811 * rcu_dereference_check() on an integer will give compiler warnings
812 * because the sparse address-space mechanism relies on dereferencing
813 * the RCU-protected pointer. Dereferencing integers is not something
814 * that even gcc will put up with.
815 *
816 * Note that this function does not implicitly check for RCU read-side
817 * critical sections. If this function gains lots of uses, it might
818 * make sense to provide versions for each flavor of RCU, but it does
819 * not make sense as of early 2010.
820 */
821#define rcu_dereference_index_check(p, c) \
822 __rcu_dereference_index_check((p), (c))
823
824/**
825 * rcu_dereference_protected() - fetch RCU pointer when updates prevented 773 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
826 * @p: The pointer to read, prior to dereferencing 774 * @p: The pointer to read, prior to dereferencing
827 * @c: The conditions under which the dereference will take place 775 * @c: The conditions under which the dereference will take place
828 * 776 *
829 * Return the value of the specified RCU-protected pointer, but omit 777 * Return the value of the specified RCU-protected pointer, but omit
830 * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This 778 * both the smp_read_barrier_depends() and the READ_ONCE(). This
831 * is useful in cases where update-side locks prevent the value of the 779 * is useful in cases where update-side locks prevent the value of the
832 * pointer from changing. Please note that this primitive does -not- 780 * pointer from changing. Please note that this primitive does -not-
833 * prevent the compiler from repeating this reference or combining it 781 * prevent the compiler from repeating this reference or combining it
@@ -1153,13 +1101,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
1153#define kfree_rcu(ptr, rcu_head) \ 1101#define kfree_rcu(ptr, rcu_head) \
1154 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) 1102 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1155 1103
1156#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) 1104#ifdef CONFIG_TINY_RCU
1157static inline int rcu_needs_cpu(unsigned long *delta_jiffies) 1105static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1158{ 1106{
1159 *delta_jiffies = ULONG_MAX; 1107 *nextevt = KTIME_MAX;
1160 return 0; 1108 return 0;
1161} 1109}
1162#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */ 1110#endif /* #ifdef CONFIG_TINY_RCU */
1163 1111
1164#if defined(CONFIG_RCU_NOCB_CPU_ALL) 1112#if defined(CONFIG_RCU_NOCB_CPU_ALL)
1165static inline bool rcu_is_nocb_cpu(int cpu) { return true; } 1113static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 937edaeb150d..3df6c1ec4e25 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -159,6 +159,22 @@ static inline void rcu_cpu_stall_reset(void)
159{ 159{
160} 160}
161 161
162static inline void rcu_idle_enter(void)
163{
164}
165
166static inline void rcu_idle_exit(void)
167{
168}
169
170static inline void rcu_irq_enter(void)
171{
172}
173
174static inline void rcu_irq_exit(void)
175{
176}
177
162static inline void exit_rcu(void) 178static inline void exit_rcu(void)
163{ 179{
164} 180}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index d2e583a6aaca..456879143f89 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -31,9 +31,7 @@
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33void rcu_note_context_switch(void); 33void rcu_note_context_switch(void);
34#ifndef CONFIG_RCU_NOCB_CPU_ALL 34int rcu_needs_cpu(u64 basem, u64 *nextevt);
35int rcu_needs_cpu(unsigned long *delta_jiffies);
36#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
37void rcu_cpu_stall_reset(void); 35void rcu_cpu_stall_reset(void);
38 36
39/* 37/*
@@ -93,6 +91,11 @@ void rcu_force_quiescent_state(void);
93void rcu_bh_force_quiescent_state(void); 91void rcu_bh_force_quiescent_state(void);
94void rcu_sched_force_quiescent_state(void); 92void rcu_sched_force_quiescent_state(void);
95 93
94void rcu_idle_enter(void);
95void rcu_idle_exit(void);
96void rcu_irq_enter(void);
97void rcu_irq_exit(void);
98
96void exit_rcu(void); 99void exit_rcu(void);
97 100
98void rcu_scheduler_starting(void); 101void rcu_scheduler_starting(void);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 116655d92269..59c55ea0f0b5 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -433,6 +433,8 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
433 unsigned int mask, unsigned int val, 433 unsigned int mask, unsigned int val,
434 bool *change); 434 bool *change);
435int regmap_get_val_bytes(struct regmap *map); 435int regmap_get_val_bytes(struct regmap *map);
436int regmap_get_max_register(struct regmap *map);
437int regmap_get_reg_stride(struct regmap *map);
436int regmap_async_complete(struct regmap *map); 438int regmap_async_complete(struct regmap *map);
437bool regmap_can_raw_write(struct regmap *map); 439bool regmap_can_raw_write(struct regmap *map);
438 440
@@ -676,6 +678,18 @@ static inline int regmap_get_val_bytes(struct regmap *map)
676 return -EINVAL; 678 return -EINVAL;
677} 679}
678 680
681static inline int regmap_get_max_register(struct regmap *map)
682{
683 WARN_ONCE(1, "regmap API is disabled");
684 return -EINVAL;
685}
686
687static inline int regmap_get_reg_stride(struct regmap *map)
688{
689 WARN_ONCE(1, "regmap API is disabled");
690 return -EINVAL;
691}
692
679static inline int regcache_sync(struct regmap *map) 693static inline int regcache_sync(struct regmap *map)
680{ 694{
681 WARN_ONCE(1, "regmap API is disabled"); 695 WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fffa688ac3a7..4db9fbe4889d 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -91,6 +91,7 @@ struct regulator_linear_range {
91 * @set_current_limit: Configure a limit for a current-limited regulator. 91 * @set_current_limit: Configure a limit for a current-limited regulator.
92 * The driver should select the current closest to max_uA. 92 * The driver should select the current closest to max_uA.
93 * @get_current_limit: Get the configured limit for a current-limited regulator. 93 * @get_current_limit: Get the configured limit for a current-limited regulator.
94 * @set_input_current_limit: Configure an input limit.
94 * 95 *
95 * @set_mode: Set the configured operating mode for the regulator. 96 * @set_mode: Set the configured operating mode for the regulator.
96 * @get_mode: Get the configured operating mode for the regulator. 97 * @get_mode: Get the configured operating mode for the regulator.
@@ -111,6 +112,7 @@ struct regulator_linear_range {
111 * to stabilise after being set to a new value, in microseconds. 112 * to stabilise after being set to a new value, in microseconds.
112 * The function provides the from and to voltage selector, the 113 * The function provides the from and to voltage selector, the
113 * function should return the worst case. 114 * function should return the worst case.
115 * @set_soft_start: Enable soft start for the regulator.
114 * 116 *
115 * @set_suspend_voltage: Set the voltage for the regulator when the system 117 * @set_suspend_voltage: Set the voltage for the regulator when the system
116 * is suspended. 118 * is suspended.
@@ -121,6 +123,9 @@ struct regulator_linear_range {
121 * @set_suspend_mode: Set the operating mode for the regulator when the 123 * @set_suspend_mode: Set the operating mode for the regulator when the
122 * system is suspended. 124 * system is suspended.
123 * 125 *
126 * @set_pull_down: Configure the regulator to pull down when the regulator
127 * is disabled.
128 *
124 * This struct describes regulator operations which can be implemented by 129 * This struct describes regulator operations which can be implemented by
125 * regulator chip drivers. 130 * regulator chip drivers.
126 */ 131 */
@@ -142,6 +147,8 @@ struct regulator_ops {
142 int min_uA, int max_uA); 147 int min_uA, int max_uA);
143 int (*get_current_limit) (struct regulator_dev *); 148 int (*get_current_limit) (struct regulator_dev *);
144 149
150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
151
145 /* enable/disable regulator */ 152 /* enable/disable regulator */
146 int (*enable) (struct regulator_dev *); 153 int (*enable) (struct regulator_dev *);
147 int (*disable) (struct regulator_dev *); 154 int (*disable) (struct regulator_dev *);
@@ -158,6 +165,8 @@ struct regulator_ops {
158 unsigned int old_selector, 165 unsigned int old_selector,
159 unsigned int new_selector); 166 unsigned int new_selector);
160 167
168 int (*set_soft_start) (struct regulator_dev *);
169
161 /* report regulator status ... most other accessors report 170 /* report regulator status ... most other accessors report
162 * control inputs, this reports results of combining inputs 171 * control inputs, this reports results of combining inputs
163 * from Linux (and other sources) with the actual load. 172 * from Linux (and other sources) with the actual load.
@@ -187,6 +196,8 @@ struct regulator_ops {
187 196
188 /* set regulator suspend operating mode (defined in consumer.h) */ 197 /* set regulator suspend operating mode (defined in consumer.h) */
189 int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); 198 int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
199
200 int (*set_pull_down) (struct regulator_dev *);
190}; 201};
191 202
192/* 203/*
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b07562e082c4..b11be1260129 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -75,6 +75,8 @@ struct regulator_state {
75 * 75 *
76 * @min_uA: Smallest current consumers may set. 76 * @min_uA: Smallest current consumers may set.
77 * @max_uA: Largest current consumers may set. 77 * @max_uA: Largest current consumers may set.
78 * @ilim_uA: Maximum input current.
79 * @system_load: Load that isn't captured by any consumer requests.
78 * 80 *
79 * @valid_modes_mask: Mask of modes which may be configured by consumers. 81 * @valid_modes_mask: Mask of modes which may be configured by consumers.
80 * @valid_ops_mask: Operations which may be performed by consumers. 82 * @valid_ops_mask: Operations which may be performed by consumers.
@@ -86,6 +88,8 @@ struct regulator_state {
86 * applied. 88 * applied.
87 * @apply_uV: Apply the voltage constraint when initialising. 89 * @apply_uV: Apply the voltage constraint when initialising.
88 * @ramp_disable: Disable ramp delay when initialising or when setting voltage. 90 * @ramp_disable: Disable ramp delay when initialising or when setting voltage.
91 * @soft_start: Enable soft start so that voltage ramps slowly.
92 * @pull_down: Enable pull down when regulator is disabled.
89 * 93 *
90 * @input_uV: Input voltage for regulator when supplied by another regulator. 94 * @input_uV: Input voltage for regulator when supplied by another regulator.
91 * 95 *
@@ -111,6 +115,9 @@ struct regulation_constraints {
111 /* current output range (inclusive) - for current control */ 115 /* current output range (inclusive) - for current control */
112 int min_uA; 116 int min_uA;
113 int max_uA; 117 int max_uA;
118 int ilim_uA;
119
120 int system_load;
114 121
115 /* valid regulator operating modes for this machine */ 122 /* valid regulator operating modes for this machine */
116 unsigned int valid_modes_mask; 123 unsigned int valid_modes_mask;
@@ -138,6 +145,8 @@ struct regulation_constraints {
138 unsigned boot_on:1; /* bootloader/firmware enabled regulator */ 145 unsigned boot_on:1; /* bootloader/firmware enabled regulator */
139 unsigned apply_uV:1; /* apply uV constraint if min == max */ 146 unsigned apply_uV:1; /* apply uV constraint if min == max */
140 unsigned ramp_disable:1; /* disable ramp delay */ 147 unsigned ramp_disable:1; /* disable ramp delay */
148 unsigned soft_start:1; /* ramp voltage slowly */
149 unsigned pull_down:1; /* pull down resistor when regulator off */
141}; 150};
142 151
143/** 152/**
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index f8acc052e353..f6a8a16a0d4d 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -58,6 +58,9 @@
58 * control signal from EN input pin. If it is false then 58 * control signal from EN input pin. If it is false then
59 * voltage output will be enabled/disabled through EN bit of 59 * voltage output will be enabled/disabled through EN bit of
60 * device register. 60 * device register.
61 * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host
62 * then GPIO number can be provided. If no GPIO controlled then
63 * it should be -1.
61 * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic. 64 * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
62 * @dvs_def_state: Default state of dvs. 1 if it is high else 0. 65 * @dvs_def_state: Default state of dvs. 1 if it is high else 0.
63 */ 66 */
@@ -65,6 +68,7 @@ struct max8973_regulator_platform_data {
65 struct regulator_init_data *reg_init_data; 68 struct regulator_init_data *reg_init_data;
66 unsigned long control_flags; 69 unsigned long control_flags;
67 bool enable_ext_control; 70 bool enable_ext_control;
71 int enable_gpio;
68 int dvs_gpio; 72 int dvs_gpio;
69 unsigned dvs_def_state:1; 73 unsigned dvs_def_state:1;
70}; 74};
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 6bda06f21930..cde976e86b48 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -298,7 +298,7 @@ struct rio_id_table {
298 * struct rio_net - RIO network info 298 * struct rio_net - RIO network info
299 * @node: Node in global list of RIO networks 299 * @node: Node in global list of RIO networks
300 * @devices: List of devices in this network 300 * @devices: List of devices in this network
301 * @switches: List of switches in this netowrk 301 * @switches: List of switches in this network
302 * @mports: List of master ports accessing this network 302 * @mports: List of master ports accessing this network
303 * @hport: Default port for accessing this network 303 * @hport: Default port for accessing this network
304 * @id: RIO network ID 304 * @id: RIO network ID
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 7b8e260c4a27..39adaa9529eb 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -79,17 +79,9 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
79 79
80struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); 80struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
81 81
82#ifdef CONFIG_NET_CLS_ACT 82#ifdef CONFIG_NET_INGRESS
83void net_inc_ingress_queue(void); 83void net_inc_ingress_queue(void);
84void net_dec_ingress_queue(void); 84void net_dec_ingress_queue(void);
85#else
86static inline void net_inc_ingress_queue(void)
87{
88}
89
90static inline void net_dec_ingress_queue(void)
91{
92}
93#endif 85#endif
94 86
95extern void rtnetlink_init(void); 87extern void rtnetlink_init(void);
@@ -122,5 +114,9 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
122 114
123extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 115extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
124 struct net_device *dev, u16 mode, 116 struct net_device *dev, u16 mode,
125 u32 flags, u32 mask, int nlflags); 117 u32 flags, u32 mask, int nlflags,
118 u32 filter_mask,
119 int (*vlan_fill)(struct sk_buff *skb,
120 struct net_device *dev,
121 u32 filter_mask));
126#endif /* __LINUX_RTNETLINK_H */ 122#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index ed8f9e70df9b..50a8486c524b 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -2,13 +2,39 @@
2#define _LINUX_SCATTERLIST_H 2#define _LINUX_SCATTERLIST_H
3 3
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/types.h>
5#include <linux/bug.h> 6#include <linux/bug.h>
6#include <linux/mm.h> 7#include <linux/mm.h>
7
8#include <asm/types.h>
9#include <asm/scatterlist.h>
10#include <asm/io.h> 8#include <asm/io.h>
11 9
10struct scatterlist {
11#ifdef CONFIG_DEBUG_SG
12 unsigned long sg_magic;
13#endif
14 unsigned long page_link;
15 unsigned int offset;
16 unsigned int length;
17 dma_addr_t dma_address;
18#ifdef CONFIG_NEED_SG_DMA_LENGTH
19 unsigned int dma_length;
20#endif
21};
22
23/*
24 * These macros should be used after a dma_map_sg call has been done
25 * to get bus addresses of each of the SG entries and their lengths.
26 * You should only work with the number of sg entries dma_map_sg
27 * returns, or alternatively stop on the first sg_dma_len(sg) which
28 * is 0.
29 */
30#define sg_dma_address(sg) ((sg)->dma_address)
31
32#ifdef CONFIG_NEED_SG_DMA_LENGTH
33#define sg_dma_len(sg) ((sg)->dma_length)
34#else
35#define sg_dma_len(sg) ((sg)->length)
36#endif
37
12struct sg_table { 38struct sg_table {
13 struct scatterlist *sgl; /* the list */ 39 struct scatterlist *sgl; /* the list */
14 unsigned int nents; /* number of mapped entries */ 40 unsigned int nents; /* number of mapped entries */
@@ -18,10 +44,9 @@ struct sg_table {
18/* 44/*
19 * Notes on SG table design. 45 * Notes on SG table design.
20 * 46 *
21 * Architectures must provide an unsigned long page_link field in the 47 * We use the unsigned long page_link field in the scatterlist struct to place
22 * scatterlist struct. We use that to place the page pointer AND encode 48 * the page pointer AND encode information about the sg table as well. The two
23 * information about the sg table as well. The two lower bits are reserved 49 * lower bits are reserved for this information.
24 * for this information.
25 * 50 *
26 * If bit 0 is set, then the page_link contains a pointer to the next sg 51 * If bit 0 is set, then the page_link contains a pointer to the next sg
27 * table list. Otherwise the next entry is at sg + 1. 52 * table list. Otherwise the next entry is at sg + 1.
@@ -221,6 +246,7 @@ static inline void *sg_virt(struct scatterlist *sg)
221} 246}
222 247
223int sg_nents(struct scatterlist *sg); 248int sg_nents(struct scatterlist *sg);
249int sg_nents_for_len(struct scatterlist *sg, u64 len);
224struct scatterlist *sg_next(struct scatterlist *); 250struct scatterlist *sg_next(struct scatterlist *);
225struct scatterlist *sg_last(struct scatterlist *s, unsigned int); 251struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
226void sg_init_table(struct scatterlist *, unsigned int); 252void sg_init_table(struct scatterlist *, unsigned int);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 26a2e6122734..6633e83e608a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -25,7 +25,7 @@ struct sched_param {
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/nodemask.h> 26#include <linux/nodemask.h>
27#include <linux/mm_types.h> 27#include <linux/mm_types.h>
28#include <linux/preempt_mask.h> 28#include <linux/preempt.h>
29 29
30#include <asm/page.h> 30#include <asm/page.h>
31#include <asm/ptrace.h> 31#include <asm/ptrace.h>
@@ -132,6 +132,7 @@ struct fs_struct;
132struct perf_event_context; 132struct perf_event_context;
133struct blk_plug; 133struct blk_plug;
134struct filename; 134struct filename;
135struct nameidata;
135 136
136#define VMACACHE_BITS 2 137#define VMACACHE_BITS 2
137#define VMACACHE_SIZE (1U << VMACACHE_BITS) 138#define VMACACHE_SIZE (1U << VMACACHE_BITS)
@@ -173,7 +174,12 @@ extern unsigned long nr_iowait_cpu(int cpu);
173extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); 174extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
174 175
175extern void calc_global_load(unsigned long ticks); 176extern void calc_global_load(unsigned long ticks);
177
178#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
176extern void update_cpu_load_nohz(void); 179extern void update_cpu_load_nohz(void);
180#else
181static inline void update_cpu_load_nohz(void) { }
182#endif
177 183
178extern unsigned long get_parent_ip(unsigned long addr); 184extern unsigned long get_parent_ip(unsigned long addr);
179 185
@@ -213,9 +219,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
213#define TASK_WAKEKILL 128 219#define TASK_WAKEKILL 128
214#define TASK_WAKING 256 220#define TASK_WAKING 256
215#define TASK_PARKED 512 221#define TASK_PARKED 512
216#define TASK_STATE_MAX 1024 222#define TASK_NOLOAD 1024
223#define TASK_STATE_MAX 2048
217 224
218#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP" 225#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
219 226
220extern char ___assert_task_state[1 - 2*!!( 227extern char ___assert_task_state[1 - 2*!!(
221 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 228 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -225,6 +232,8 @@ extern char ___assert_task_state[1 - 2*!!(
225#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 232#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
226#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 233#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
227 234
235#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
236
228/* Convenience macros for the sake of wake_up */ 237/* Convenience macros for the sake of wake_up */
229#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 238#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
230#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) 239#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
@@ -240,7 +249,8 @@ extern char ___assert_task_state[1 - 2*!!(
240 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 249 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
241#define task_contributes_to_load(task) \ 250#define task_contributes_to_load(task) \
242 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 251 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
243 (task->flags & PF_FROZEN) == 0) 252 (task->flags & PF_FROZEN) == 0 && \
253 (task->state & TASK_NOLOAD) == 0)
244 254
245#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 255#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
246 256
@@ -252,7 +262,7 @@ extern char ___assert_task_state[1 - 2*!!(
252#define set_task_state(tsk, state_value) \ 262#define set_task_state(tsk, state_value) \
253 do { \ 263 do { \
254 (tsk)->task_state_change = _THIS_IP_; \ 264 (tsk)->task_state_change = _THIS_IP_; \
255 set_mb((tsk)->state, (state_value)); \ 265 smp_store_mb((tsk)->state, (state_value)); \
256 } while (0) 266 } while (0)
257 267
258/* 268/*
@@ -274,7 +284,7 @@ extern char ___assert_task_state[1 - 2*!!(
274#define set_current_state(state_value) \ 284#define set_current_state(state_value) \
275 do { \ 285 do { \
276 current->task_state_change = _THIS_IP_; \ 286 current->task_state_change = _THIS_IP_; \
277 set_mb(current->state, (state_value)); \ 287 smp_store_mb(current->state, (state_value)); \
278 } while (0) 288 } while (0)
279 289
280#else 290#else
@@ -282,7 +292,7 @@ extern char ___assert_task_state[1 - 2*!!(
282#define __set_task_state(tsk, state_value) \ 292#define __set_task_state(tsk, state_value) \
283 do { (tsk)->state = (state_value); } while (0) 293 do { (tsk)->state = (state_value); } while (0)
284#define set_task_state(tsk, state_value) \ 294#define set_task_state(tsk, state_value) \
285 set_mb((tsk)->state, (state_value)) 295 smp_store_mb((tsk)->state, (state_value))
286 296
287/* 297/*
288 * set_current_state() includes a barrier so that the write of current->state 298 * set_current_state() includes a barrier so that the write of current->state
@@ -298,7 +308,7 @@ extern char ___assert_task_state[1 - 2*!!(
298#define __set_current_state(state_value) \ 308#define __set_current_state(state_value) \
299 do { current->state = (state_value); } while (0) 309 do { current->state = (state_value); } while (0)
300#define set_current_state(state_value) \ 310#define set_current_state(state_value) \
301 set_mb(current->state, (state_value)) 311 smp_store_mb(current->state, (state_value))
302 312
303#endif 313#endif
304 314
@@ -335,14 +345,10 @@ extern int runqueue_is_locked(int cpu);
335#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 345#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
336extern void nohz_balance_enter_idle(int cpu); 346extern void nohz_balance_enter_idle(int cpu);
337extern void set_cpu_sd_state_idle(void); 347extern void set_cpu_sd_state_idle(void);
338extern int get_nohz_timer_target(int pinned); 348extern int get_nohz_timer_target(void);
339#else 349#else
340static inline void nohz_balance_enter_idle(int cpu) { } 350static inline void nohz_balance_enter_idle(int cpu) { }
341static inline void set_cpu_sd_state_idle(void) { } 351static inline void set_cpu_sd_state_idle(void) { }
342static inline int get_nohz_timer_target(int pinned)
343{
344 return smp_processor_id();
345}
346#endif 352#endif
347 353
348/* 354/*
@@ -567,6 +573,23 @@ struct task_cputime {
567 .sum_exec_runtime = 0, \ 573 .sum_exec_runtime = 0, \
568 } 574 }
569 575
576/*
577 * This is the atomic variant of task_cputime, which can be used for
578 * storing and updating task_cputime statistics without locking.
579 */
580struct task_cputime_atomic {
581 atomic64_t utime;
582 atomic64_t stime;
583 atomic64_t sum_exec_runtime;
584};
585
586#define INIT_CPUTIME_ATOMIC \
587 (struct task_cputime_atomic) { \
588 .utime = ATOMIC64_INIT(0), \
589 .stime = ATOMIC64_INIT(0), \
590 .sum_exec_runtime = ATOMIC64_INIT(0), \
591 }
592
570#ifdef CONFIG_PREEMPT_COUNT 593#ifdef CONFIG_PREEMPT_COUNT
571#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) 594#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
572#else 595#else
@@ -584,18 +607,16 @@ struct task_cputime {
584 607
585/** 608/**
586 * struct thread_group_cputimer - thread group interval timer counts 609 * struct thread_group_cputimer - thread group interval timer counts
587 * @cputime: thread group interval timers. 610 * @cputime_atomic: atomic thread group interval timers.
588 * @running: non-zero when there are timers running and 611 * @running: non-zero when there are timers running and
589 * @cputime receives updates. 612 * @cputime receives updates.
590 * @lock: lock for fields in this struct.
591 * 613 *
592 * This structure contains the version of task_cputime, above, that is 614 * This structure contains the version of task_cputime, above, that is
593 * used for thread group CPU timer calculations. 615 * used for thread group CPU timer calculations.
594 */ 616 */
595struct thread_group_cputimer { 617struct thread_group_cputimer {
596 struct task_cputime cputime; 618 struct task_cputime_atomic cputime_atomic;
597 int running; 619 int running;
598 raw_spinlock_t lock;
599}; 620};
600 621
601#include <linux/rwsem.h> 622#include <linux/rwsem.h>
@@ -900,6 +921,50 @@ enum cpu_idle_type {
900#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) 921#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
901 922
902/* 923/*
924 * Wake-queues are lists of tasks with a pending wakeup, whose
925 * callers have already marked the task as woken internally,
926 * and can thus carry on. A common use case is being able to
927 * do the wakeups once the corresponding user lock as been
928 * released.
929 *
930 * We hold reference to each task in the list across the wakeup,
931 * thus guaranteeing that the memory is still valid by the time
932 * the actual wakeups are performed in wake_up_q().
933 *
934 * One per task suffices, because there's never a need for a task to be
935 * in two wake queues simultaneously; it is forbidden to abandon a task
936 * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
937 * already in a wake queue, the wakeup will happen soon and the second
938 * waker can just skip it.
939 *
940 * The WAKE_Q macro declares and initializes the list head.
941 * wake_up_q() does NOT reinitialize the list; it's expected to be
942 * called near the end of a function, where the fact that the queue is
943 * not used again will be easy to see by inspection.
944 *
945 * Note that this can cause spurious wakeups. schedule() callers
946 * must ensure the call is done inside a loop, confirming that the
947 * wakeup condition has in fact occurred.
948 */
949struct wake_q_node {
950 struct wake_q_node *next;
951};
952
953struct wake_q_head {
954 struct wake_q_node *first;
955 struct wake_q_node **lastp;
956};
957
958#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
959
960#define WAKE_Q(name) \
961 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
962
963extern void wake_q_add(struct wake_q_head *head,
964 struct task_struct *task);
965extern void wake_up_q(struct wake_q_head *head);
966
967/*
903 * sched-domains (multiprocessor balancing) declarations: 968 * sched-domains (multiprocessor balancing) declarations:
904 */ 969 */
905#ifdef CONFIG_SMP 970#ifdef CONFIG_SMP
@@ -1334,8 +1399,6 @@ struct task_struct {
1334 int rcu_read_lock_nesting; 1399 int rcu_read_lock_nesting;
1335 union rcu_special rcu_read_unlock_special; 1400 union rcu_special rcu_read_unlock_special;
1336 struct list_head rcu_node_entry; 1401 struct list_head rcu_node_entry;
1337#endif /* #ifdef CONFIG_PREEMPT_RCU */
1338#ifdef CONFIG_PREEMPT_RCU
1339 struct rcu_node *rcu_blocked_node; 1402 struct rcu_node *rcu_blocked_node;
1340#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1403#endif /* #ifdef CONFIG_PREEMPT_RCU */
1341#ifdef CONFIG_TASKS_RCU 1404#ifdef CONFIG_TASKS_RCU
@@ -1356,9 +1419,6 @@ struct task_struct {
1356#endif 1419#endif
1357 1420
1358 struct mm_struct *mm, *active_mm; 1421 struct mm_struct *mm, *active_mm;
1359#ifdef CONFIG_COMPAT_BRK
1360 unsigned brk_randomized:1;
1361#endif
1362 /* per-thread vma caching */ 1422 /* per-thread vma caching */
1363 u32 vmacache_seqnum; 1423 u32 vmacache_seqnum;
1364 struct vm_area_struct *vmacache[VMACACHE_SIZE]; 1424 struct vm_area_struct *vmacache[VMACACHE_SIZE];
@@ -1369,7 +1429,7 @@ struct task_struct {
1369 int exit_state; 1429 int exit_state;
1370 int exit_code, exit_signal; 1430 int exit_code, exit_signal;
1371 int pdeath_signal; /* The signal sent when the parent dies */ 1431 int pdeath_signal; /* The signal sent when the parent dies */
1372 unsigned int jobctl; /* JOBCTL_*, siglock protected */ 1432 unsigned long jobctl; /* JOBCTL_*, siglock protected */
1373 1433
1374 /* Used for emulating ABI behavior of previous Linux versions */ 1434 /* Used for emulating ABI behavior of previous Linux versions */
1375 unsigned int personality; 1435 unsigned int personality;
@@ -1381,10 +1441,14 @@ struct task_struct {
1381 /* Revert to default priority/policy when forking */ 1441 /* Revert to default priority/policy when forking */
1382 unsigned sched_reset_on_fork:1; 1442 unsigned sched_reset_on_fork:1;
1383 unsigned sched_contributes_to_load:1; 1443 unsigned sched_contributes_to_load:1;
1444 unsigned sched_migrated:1;
1384 1445
1385#ifdef CONFIG_MEMCG_KMEM 1446#ifdef CONFIG_MEMCG_KMEM
1386 unsigned memcg_kmem_skip_account:1; 1447 unsigned memcg_kmem_skip_account:1;
1387#endif 1448#endif
1449#ifdef CONFIG_COMPAT_BRK
1450 unsigned brk_randomized:1;
1451#endif
1388 1452
1389 unsigned long atomic_flags; /* Flags needing atomic access. */ 1453 unsigned long atomic_flags; /* Flags needing atomic access. */
1390 1454
@@ -1461,7 +1525,7 @@ struct task_struct {
1461 it with task_lock()) 1525 it with task_lock())
1462 - initialized normally by setup_new_exec */ 1526 - initialized normally by setup_new_exec */
1463/* file system info */ 1527/* file system info */
1464 int link_count, total_link_count; 1528 struct nameidata *nameidata;
1465#ifdef CONFIG_SYSVIPC 1529#ifdef CONFIG_SYSVIPC
1466/* ipc stuff */ 1530/* ipc stuff */
1467 struct sysv_sem sysvsem; 1531 struct sysv_sem sysvsem;
@@ -1511,6 +1575,8 @@ struct task_struct {
1511 /* Protection of the PI data structures: */ 1575 /* Protection of the PI data structures: */
1512 raw_spinlock_t pi_lock; 1576 raw_spinlock_t pi_lock;
1513 1577
1578 struct wake_q_node wake_q;
1579
1514#ifdef CONFIG_RT_MUTEXES 1580#ifdef CONFIG_RT_MUTEXES
1515 /* PI waiters blocked on a rt_mutex held by this task */ 1581 /* PI waiters blocked on a rt_mutex held by this task */
1516 struct rb_root pi_waiters; 1582 struct rb_root pi_waiters;
@@ -1724,6 +1790,7 @@ struct task_struct {
1724#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1790#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1725 unsigned long task_state_change; 1791 unsigned long task_state_change;
1726#endif 1792#endif
1793 int pagefault_disabled;
1727}; 1794};
1728 1795
1729/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1796/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -2077,22 +2144,22 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2077#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ 2144#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
2078#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ 2145#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
2079 2146
2080#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) 2147#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
2081#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) 2148#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
2082#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) 2149#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
2083#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) 2150#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
2084#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) 2151#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
2085#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) 2152#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
2086#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) 2153#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
2087 2154
2088#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) 2155#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2089#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) 2156#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2090 2157
2091extern bool task_set_jobctl_pending(struct task_struct *task, 2158extern bool task_set_jobctl_pending(struct task_struct *task,
2092 unsigned int mask); 2159 unsigned long mask);
2093extern void task_clear_jobctl_trapping(struct task_struct *task); 2160extern void task_clear_jobctl_trapping(struct task_struct *task);
2094extern void task_clear_jobctl_pending(struct task_struct *task, 2161extern void task_clear_jobctl_pending(struct task_struct *task,
2095 unsigned int mask); 2162 unsigned long mask);
2096 2163
2097static inline void rcu_copy_process(struct task_struct *p) 2164static inline void rcu_copy_process(struct task_struct *p)
2098{ 2165{
@@ -2532,6 +2599,9 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
2532} 2599}
2533#endif 2600#endif
2534 2601
2602#define tasklist_empty() \
2603 list_empty(&init_task.tasks)
2604
2535#define next_task(p) \ 2605#define next_task(p) \
2536 list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 2606 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2537 2607
@@ -2962,11 +3032,6 @@ static __always_inline bool need_resched(void)
2962void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 3032void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2963void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 3033void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2964 3034
2965static inline void thread_group_cputime_init(struct signal_struct *sig)
2966{
2967 raw_spin_lock_init(&sig->cputimer.lock);
2968}
2969
2970/* 3035/*
2971 * Reevaluate whether the task has signals pending delivery. 3036 * Reevaluate whether the task has signals pending delivery.
2972 * Wake the task if so. 3037 * Wake the task if so.
@@ -3080,13 +3145,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
3080static inline unsigned long task_rlimit(const struct task_struct *tsk, 3145static inline unsigned long task_rlimit(const struct task_struct *tsk,
3081 unsigned int limit) 3146 unsigned int limit)
3082{ 3147{
3083 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); 3148 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3084} 3149}
3085 3150
3086static inline unsigned long task_rlimit_max(const struct task_struct *tsk, 3151static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3087 unsigned int limit) 3152 unsigned int limit)
3088{ 3153{
3089 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); 3154 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3090} 3155}
3091 3156
3092static inline unsigned long rlimit(unsigned int limit) 3157static inline unsigned long rlimit(unsigned int limit)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 596a0e007c62..c9e4731cf10b 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -57,24 +57,12 @@ extern unsigned int sysctl_numa_balancing_scan_size;
57extern unsigned int sysctl_sched_migration_cost; 57extern unsigned int sysctl_sched_migration_cost;
58extern unsigned int sysctl_sched_nr_migrate; 58extern unsigned int sysctl_sched_nr_migrate;
59extern unsigned int sysctl_sched_time_avg; 59extern unsigned int sysctl_sched_time_avg;
60extern unsigned int sysctl_timer_migration;
61extern unsigned int sysctl_sched_shares_window; 60extern unsigned int sysctl_sched_shares_window;
62 61
63int sched_proc_update_handler(struct ctl_table *table, int write, 62int sched_proc_update_handler(struct ctl_table *table, int write,
64 void __user *buffer, size_t *length, 63 void __user *buffer, size_t *length,
65 loff_t *ppos); 64 loff_t *ppos);
66#endif 65#endif
67#ifdef CONFIG_SCHED_DEBUG
68static inline unsigned int get_sysctl_timer_migration(void)
69{
70 return sysctl_timer_migration;
71}
72#else
73static inline unsigned int get_sysctl_timer_migration(void)
74{
75 return 1;
76}
77#endif
78 66
79/* 67/*
80 * control realtime throttling: 68 * control realtime throttling:
diff --git a/include/linux/security.h b/include/linux/security.h
index 18264ea9e314..52febde52479 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -43,7 +43,6 @@ struct file;
43struct vfsmount; 43struct vfsmount;
44struct path; 44struct path;
45struct qstr; 45struct qstr;
46struct nameidata;
47struct iattr; 46struct iattr;
48struct fown_struct; 47struct fown_struct;
49struct file_operations; 48struct file_operations;
@@ -477,7 +476,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
477 * @inode_follow_link: 476 * @inode_follow_link:
478 * Check permission to follow a symbolic link when looking up a pathname. 477 * Check permission to follow a symbolic link when looking up a pathname.
479 * @dentry contains the dentry structure for the link. 478 * @dentry contains the dentry structure for the link.
480 * @nd contains the nameidata structure for the parent directory. 479 * @inode contains the inode, which itself is not stable in RCU-walk
480 * @rcu indicates whether we are in RCU-walk mode.
481 * Return 0 if permission is granted. 481 * Return 0 if permission is granted.
482 * @inode_permission: 482 * @inode_permission:
483 * Check permission before accessing an inode. This hook is called by the 483 * Check permission before accessing an inode. This hook is called by the
@@ -1553,7 +1553,8 @@ struct security_operations {
1553 int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, 1553 int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
1554 struct inode *new_dir, struct dentry *new_dentry); 1554 struct inode *new_dir, struct dentry *new_dentry);
1555 int (*inode_readlink) (struct dentry *dentry); 1555 int (*inode_readlink) (struct dentry *dentry);
1556 int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); 1556 int (*inode_follow_link) (struct dentry *dentry, struct inode *inode,
1557 bool rcu);
1557 int (*inode_permission) (struct inode *inode, int mask); 1558 int (*inode_permission) (struct inode *inode, int mask);
1558 int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); 1559 int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
1559 int (*inode_getattr) (const struct path *path); 1560 int (*inode_getattr) (const struct path *path);
@@ -1839,7 +1840,8 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
1839 struct inode *new_dir, struct dentry *new_dentry, 1840 struct inode *new_dir, struct dentry *new_dentry,
1840 unsigned int flags); 1841 unsigned int flags);
1841int security_inode_readlink(struct dentry *dentry); 1842int security_inode_readlink(struct dentry *dentry);
1842int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); 1843int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
1844 bool rcu);
1843int security_inode_permission(struct inode *inode, int mask); 1845int security_inode_permission(struct inode *inode, int mask);
1844int security_inode_setattr(struct dentry *dentry, struct iattr *attr); 1846int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
1845int security_inode_getattr(const struct path *path); 1847int security_inode_getattr(const struct path *path);
@@ -2242,7 +2244,8 @@ static inline int security_inode_readlink(struct dentry *dentry)
2242} 2244}
2243 2245
2244static inline int security_inode_follow_link(struct dentry *dentry, 2246static inline int security_inode_follow_link(struct dentry *dentry,
2245 struct nameidata *nd) 2247 struct inode *inode,
2248 bool rcu)
2246{ 2249{
2247 return 0; 2250 return 0;
2248} 2251}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5f68d0a391ce..486e685a226a 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -233,6 +233,47 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
233 s->sequence++; 233 s->sequence++;
234} 234}
235 235
236/**
237 * raw_write_seqcount_barrier - do a seq write barrier
238 * @s: pointer to seqcount_t
239 *
240 * This can be used to provide an ordering guarantee instead of the
241 * usual consistency guarantee. It is one wmb cheaper, because we can
242 * collapse the two back-to-back wmb()s.
243 *
244 * seqcount_t seq;
245 * bool X = true, Y = false;
246 *
247 * void read(void)
248 * {
249 * bool x, y;
250 *
251 * do {
252 * int s = read_seqcount_begin(&seq);
253 *
254 * x = X; y = Y;
255 *
256 * } while (read_seqcount_retry(&seq, s));
257 *
258 * BUG_ON(!x && !y);
259 * }
260 *
261 * void write(void)
262 * {
263 * Y = true;
264 *
265 * raw_write_seqcount_barrier(seq);
266 *
267 * X = false;
268 * }
269 */
270static inline void raw_write_seqcount_barrier(seqcount_t *s)
271{
272 s->sequence++;
273 smp_wmb();
274 s->sequence++;
275}
276
236/* 277/*
237 * raw_write_seqcount_latch - redirect readers to even/odd copy 278 * raw_write_seqcount_latch - redirect readers to even/odd copy
238 * @s: pointer to seqcount_t 279 * @s: pointer to seqcount_t
@@ -266,13 +307,13 @@ static inline void write_seqcount_end(seqcount_t *s)
266} 307}
267 308
268/** 309/**
269 * write_seqcount_barrier - invalidate in-progress read-side seq operations 310 * write_seqcount_invalidate - invalidate in-progress read-side seq operations
270 * @s: pointer to seqcount_t 311 * @s: pointer to seqcount_t
271 * 312 *
272 * After write_seqcount_barrier, no read-side seq operations will complete 313 * After write_seqcount_invalidate, no read-side seq operations will complete
273 * successfully and see data older than this. 314 * successfully and see data older than this.
274 */ 315 */
275static inline void write_seqcount_barrier(seqcount_t *s) 316static inline void write_seqcount_invalidate(seqcount_t *s)
276{ 317{
277 smp_wmb(); 318 smp_wmb();
278 s->sequence+=2; 319 s->sequence+=2;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f15154a879c7..d6cdd6e87d53 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -34,7 +34,9 @@
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h> 35#include <linux/netdev_features.h>
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <net/flow_keys.h> 37#include <net/flow_dissector.h>
38#include <linux/splice.h>
39#include <linux/in6.h>
38 40
39/* A. Checksumming of received packets by device. 41/* A. Checksumming of received packets by device.
40 * 42 *
@@ -170,13 +172,19 @@ struct nf_bridge_info {
170 BRNF_PROTO_UNCHANGED, 172 BRNF_PROTO_UNCHANGED,
171 BRNF_PROTO_8021Q, 173 BRNF_PROTO_8021Q,
172 BRNF_PROTO_PPPOE 174 BRNF_PROTO_PPPOE
173 } orig_proto; 175 } orig_proto:8;
174 bool pkt_otherhost; 176 bool pkt_otherhost;
177 __u16 frag_max_size;
175 unsigned int mask; 178 unsigned int mask;
176 struct net_device *physindev; 179 struct net_device *physindev;
177 struct net_device *physoutdev; 180 union {
178 char neigh_header[8]; 181 struct net_device *physoutdev;
179 __be32 ipv4_daddr; 182 char neigh_header[8];
183 };
184 union {
185 __be32 ipv4_daddr;
186 struct in6_addr ipv6_daddr;
187 };
180}; 188};
181#endif 189#endif
182 190
@@ -859,6 +867,9 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
859 int len, int odd, struct sk_buff *skb), 867 int len, int odd, struct sk_buff *skb),
860 void *from, int length); 868 void *from, int length);
861 869
870int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
871 int offset, size_t size);
872
862struct skb_seq_state { 873struct skb_seq_state {
863 __u32 lower_offset; 874 __u32 lower_offset;
864 __u32 upper_offset; 875 __u32 upper_offset;
@@ -919,7 +930,6 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
919 skb->hash = hash; 930 skb->hash = hash;
920} 931}
921 932
922void __skb_get_hash(struct sk_buff *skb);
923static inline __u32 skb_get_hash(struct sk_buff *skb) 933static inline __u32 skb_get_hash(struct sk_buff *skb)
924{ 934{
925 if (!skb->l4_hash && !skb->sw_hash) 935 if (!skb->l4_hash && !skb->sw_hash)
@@ -928,6 +938,8 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
928 return skb->hash; 938 return skb->hash;
929} 939}
930 940
941__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
942
931static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) 943static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
932{ 944{
933 return skb->hash; 945 return skb->hash;
@@ -1935,8 +1947,8 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
1935 1947
1936 if (skb_transport_header_was_set(skb)) 1948 if (skb_transport_header_was_set(skb))
1937 return; 1949 return;
1938 else if (skb_flow_dissect(skb, &keys)) 1950 else if (skb_flow_dissect_flow_keys(skb, &keys))
1939 skb_set_transport_header(skb, keys.thoff); 1951 skb_set_transport_header(skb, keys.control.thoff);
1940 else 1952 else
1941 skb_set_transport_header(skb, offset_hint); 1953 skb_set_transport_header(skb, offset_hint);
1942} 1954}
@@ -2127,10 +2139,6 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
2127 kfree_skb(skb); 2139 kfree_skb(skb);
2128} 2140}
2129 2141
2130#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
2131#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
2132#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
2133
2134void *netdev_alloc_frag(unsigned int fragsz); 2142void *netdev_alloc_frag(unsigned int fragsz);
2135 2143
2136struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, 2144struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2185,6 +2193,11 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2185 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 2193 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2186} 2194}
2187 2195
2196static inline void skb_free_frag(void *addr)
2197{
2198 __free_page_frag(addr);
2199}
2200
2188void *napi_alloc_frag(unsigned int fragsz); 2201void *napi_alloc_frag(unsigned int fragsz);
2189struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, 2202struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2190 unsigned int length, gfp_t gfp_mask); 2203 unsigned int length, gfp_t gfp_mask);
@@ -2692,9 +2705,15 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2692int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); 2705int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2693__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, 2706__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2694 int len, __wsum csum); 2707 int len, __wsum csum);
2695int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 2708ssize_t skb_socket_splice(struct sock *sk,
2709 struct pipe_inode_info *pipe,
2710 struct splice_pipe_desc *spd);
2711int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2696 struct pipe_inode_info *pipe, unsigned int len, 2712 struct pipe_inode_info *pipe, unsigned int len,
2697 unsigned int flags); 2713 unsigned int flags,
2714 ssize_t (*splice_cb)(struct sock *,
2715 struct pipe_inode_info *,
2716 struct splice_pipe_desc *));
2698void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2717void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2699unsigned int skb_zerocopy_headlen(const struct sk_buff *from); 2718unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
2700int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, 2719int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -2729,8 +2748,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2729__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, 2748__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2730 __wsum csum); 2749 __wsum csum);
2731 2750
2732static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset, 2751static inline void * __must_check
2733 int len, void *data, int hlen, void *buffer) 2752__skb_header_pointer(const struct sk_buff *skb, int offset,
2753 int len, void *data, int hlen, void *buffer)
2734{ 2754{
2735 if (hlen - offset >= len) 2755 if (hlen - offset >= len)
2736 return data + offset; 2756 return data + offset;
@@ -2742,8 +2762,8 @@ static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
2742 return buffer; 2762 return buffer;
2743} 2763}
2744 2764
2745static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2765static inline void * __must_check
2746 int len, void *buffer) 2766skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
2747{ 2767{
2748 return __skb_header_pointer(skb, offset, len, skb->data, 2768 return __skb_header_pointer(skb, offset, len, skb->data,
2749 skb_headlen(skb), buffer); 2769 skb_headlen(skb), buffer);
@@ -3050,7 +3070,7 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3050 } 3070 }
3051 } else if (skb->csum_bad) { 3071 } else if (skb->csum_bad) {
3052 /* ip_summed == CHECKSUM_NONE in this case */ 3072 /* ip_summed == CHECKSUM_NONE in this case */
3053 return 1; 3073 return (__force __sum16)1;
3054 } 3074 }
3055 3075
3056 skb->csum = psum; 3076 skb->csum = psum;
@@ -3298,9 +3318,6 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3298 return skb->queue_mapping != 0; 3318 return skb->queue_mapping != 0;
3299} 3319}
3300 3320
3301u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3302 unsigned int num_tx_queues);
3303
3304static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 3321static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3305{ 3322{
3306#ifdef CONFIG_XFRM 3323#ifdef CONFIG_XFRM
@@ -3355,15 +3372,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3355static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) 3372static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3356{ 3373{
3357 int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - 3374 int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
3358 skb_transport_offset(skb); 3375 skb_transport_offset(skb);
3359 __u16 csum; 3376 __wsum partial;
3360 3377
3361 csum = csum_fold(csum_partial(skb_transport_header(skb), 3378 partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
3362 plen, skb->csum));
3363 skb->csum = res; 3379 skb->csum = res;
3364 SKB_GSO_CB(skb)->csum_start -= plen; 3380 SKB_GSO_CB(skb)->csum_start -= plen;
3365 3381
3366 return csum; 3382 return csum_fold(partial);
3367} 3383}
3368 3384
3369static inline bool skb_is_gso(const struct sk_buff *skb) 3385static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3418,10 +3434,9 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3418bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 3434bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3419 3435
3420int skb_checksum_setup(struct sk_buff *skb, bool recalculate); 3436int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3421 3437struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3422u32 skb_get_poff(const struct sk_buff *skb); 3438 unsigned int transport_len,
3423u32 __skb_get_poff(const struct sk_buff *skb, void *data, 3439 __sum16(*skb_chkf)(struct sk_buff *skb));
3424 const struct flow_keys *keys, int hlen);
3425 3440
3426/** 3441/**
3427 * skb_head_is_locked - Determine if the skb->head is locked down 3442 * skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ffd24c830151..9de2fdc8b5e4 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -153,8 +153,30 @@ size_t ksize(const void *);
153#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 153#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
154#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 154#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
155#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 155#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
156/*
157 * The KMALLOC_LOOP_LOW is the definition for the for loop index start number
158 * to create the kmalloc_caches object in create_kmalloc_caches(). The first
159 * and the second are 96 and 192. You can see that in the kmalloc_index(), if
160 * the KMALLOC_MIN_SIZE <= 32, then return 1 (96). If KMALLOC_MIN_SIZE <= 64,
161 * then return 2 (192). If the KMALLOC_MIN_SIZE is bigger than 64, we don't
162 * need to initialize 96 and 192. Go directly to start the KMALLOC_SHIFT_LOW.
163 */
164#if KMALLOC_MIN_SIZE <= 32
165#define KMALLOC_LOOP_LOW 1
166#elif KMALLOC_MIN_SIZE <= 64
167#define KMALLOC_LOOP_LOW 2
168#else
169#define KMALLOC_LOOP_LOW KMALLOC_SHIFT_LOW
170#endif
171
156#else 172#else
157#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 173#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
174/*
175 * The KMALLOC_MIN_SIZE of slub/slab/slob is 2^3/2^5/2^3. So, even slab is used.
176 * The KMALLOC_MIN_SIZE <= 32. The kmalloc-96 and kmalloc-192 should also be
177 * initialized.
178 */
179#define KMALLOC_LOOP_LOW 1
158#endif 180#endif
159 181
160/* 182/*
@@ -240,8 +262,8 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
240 * belongs to. 262 * belongs to.
241 * 0 = zero alloc 263 * 0 = zero alloc
242 * 1 = 65 .. 96 bytes 264 * 1 = 65 .. 96 bytes
243 * 2 = 120 .. 192 bytes 265 * 2 = 129 .. 192 bytes
244 * n = 2^(n-1) .. 2^n -1 266 * n = 2^(n-1)+1 .. 2^n
245 */ 267 */
246static __always_inline int kmalloc_index(size_t size) 268static __always_inline int kmalloc_index(size_t size)
247{ 269{
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index d600afb21926..da3c593f9845 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -27,6 +27,8 @@ struct smpboot_thread_data;
27 * @pre_unpark: Optional unpark function, called before the thread is 27 * @pre_unpark: Optional unpark function, called before the thread is
28 * unparked (cpu online). This is not guaranteed to be 28 * unparked (cpu online). This is not guaranteed to be
29 * called on the target cpu of the thread. Careful! 29 * called on the target cpu of the thread. Careful!
30 * @cpumask: Internal state. To update which threads are unparked,
31 * call smpboot_update_cpumask_percpu_thread().
30 * @selfparking: Thread is not parked by the park function. 32 * @selfparking: Thread is not parked by the park function.
31 * @thread_comm: The base name of the thread 33 * @thread_comm: The base name of the thread
32 */ 34 */
@@ -41,11 +43,14 @@ struct smp_hotplug_thread {
41 void (*park)(unsigned int cpu); 43 void (*park)(unsigned int cpu);
42 void (*unpark)(unsigned int cpu); 44 void (*unpark)(unsigned int cpu);
43 void (*pre_unpark)(unsigned int cpu); 45 void (*pre_unpark)(unsigned int cpu);
46 cpumask_var_t cpumask;
44 bool selfparking; 47 bool selfparking;
45 const char *thread_comm; 48 const char *thread_comm;
46}; 49};
47 50
48int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); 51int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
49void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); 52void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
53int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
54 const struct cpumask *);
50 55
51#endif 56#endif
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 083ac388098e..fddebc617469 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -1,7 +1,10 @@
1#ifndef __SOCK_DIAG_H__ 1#ifndef __SOCK_DIAG_H__
2#define __SOCK_DIAG_H__ 2#define __SOCK_DIAG_H__
3 3
4#include <linux/netlink.h>
4#include <linux/user_namespace.h> 5#include <linux/user_namespace.h>
6#include <net/net_namespace.h>
7#include <net/sock.h>
5#include <uapi/linux/sock_diag.h> 8#include <uapi/linux/sock_diag.h>
6 9
7struct sk_buff; 10struct sk_buff;
@@ -11,6 +14,7 @@ struct sock;
11struct sock_diag_handler { 14struct sock_diag_handler {
12 __u8 family; 15 __u8 family;
13 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); 16 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
17 int (*get_info)(struct sk_buff *skb, struct sock *sk);
14}; 18};
15 19
16int sock_diag_register(const struct sock_diag_handler *h); 20int sock_diag_register(const struct sock_diag_handler *h);
@@ -26,4 +30,42 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
26int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, 30int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
27 struct sk_buff *skb, int attrtype); 31 struct sk_buff *skb, int attrtype);
28 32
33static inline
34enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
35{
36 switch (sk->sk_family) {
37 case AF_INET:
38 switch (sk->sk_protocol) {
39 case IPPROTO_TCP:
40 return SKNLGRP_INET_TCP_DESTROY;
41 case IPPROTO_UDP:
42 return SKNLGRP_INET_UDP_DESTROY;
43 default:
44 return SKNLGRP_NONE;
45 }
46 case AF_INET6:
47 switch (sk->sk_protocol) {
48 case IPPROTO_TCP:
49 return SKNLGRP_INET6_TCP_DESTROY;
50 case IPPROTO_UDP:
51 return SKNLGRP_INET6_UDP_DESTROY;
52 default:
53 return SKNLGRP_NONE;
54 }
55 default:
56 return SKNLGRP_NONE;
57 }
58}
59
60static inline
61bool sock_diag_has_destroy_listeners(const struct sock *sk)
62{
63 const struct net *n = sock_net(sk);
64 const enum sknetlink_groups group = sock_diag_destroy_group(sk);
65
66 return group != SKNLGRP_NONE && n->diag_nlsk &&
67 netlink_has_listeners(n->diag_nlsk, group);
68}
69void sock_diag_broadcast_destroy(struct sock *sk);
70
29#endif 71#endif
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
index e741e8baad92..85b8ee67e937 100644
--- a/include/linux/spi/cc2520.h
+++ b/include/linux/spi/cc2520.h
@@ -21,7 +21,6 @@ struct cc2520_platform_data {
21 int sfd; 21 int sfd;
22 int reset; 22 int reset;
23 int vreg; 23 int vreg;
24 bool amplified;
25}; 24};
26 25
27#endif 26#endif
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3e18379dfa6f..0063b24b4f36 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -120,7 +120,7 @@ do { \
120/* 120/*
121 * Despite its name it doesn't necessarily has to be a full barrier. 121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section 122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section. 123 * can not be reordered with LOADs and STOREs inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out 124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that 125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should 126 * a STORE can not move into the critical section, smp_wmb() should
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7f484a239f53..c735f5c91eea 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -99,6 +99,7 @@ struct plat_stmmacenet_data {
99 int phy_addr; 99 int phy_addr;
100 int interface; 100 int interface;
101 struct stmmac_mdio_bus_data *mdio_bus_data; 101 struct stmmac_mdio_bus_data *mdio_bus_data;
102 struct device_node *phy_node;
102 struct stmmac_dma_cfg *dma_cfg; 103 struct stmmac_dma_cfg *dma_cfg;
103 int clk_csr; 104 int clk_csr;
104 int has_gmac; 105 int has_gmac;
diff --git a/include/linux/sw842.h b/include/linux/sw842.h
new file mode 100644
index 000000000000..109ba041c2ae
--- /dev/null
+++ b/include/linux/sw842.h
@@ -0,0 +1,12 @@
1#ifndef __SW842_H__
2#define __SW842_H__
3
4#define SW842_MEM_COMPRESS (0xf000)
5
6int sw842_compress(const u8 *src, unsigned int srclen,
7 u8 *dst, unsigned int *destlen, void *wmem);
8
9int sw842_decompress(const u8 *src, unsigned int srclen,
10 u8 *dst, unsigned int *destlen);
11
12#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index cee108cbe2d5..38874729dc5f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -377,7 +377,6 @@ extern void end_swap_bio_write(struct bio *bio, int err);
377extern int __swap_writepage(struct page *page, struct writeback_control *wbc, 377extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
378 void (*end_write_func)(struct bio *, int)); 378 void (*end_write_func)(struct bio *, int));
379extern int swap_set_page_dirty(struct page *page); 379extern int swap_set_page_dirty(struct page *page);
380extern void end_swap_bio_read(struct bio *bio, int err);
381 380
382int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 381int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
383 unsigned long nr_pages, sector_t start_block); 382 unsigned long nr_pages, sector_t start_block);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index e8bbf403618f..48c3696e8645 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -149,11 +149,16 @@ struct tcp_sock {
149 * sum(delta(rcv_nxt)), or how many bytes 149 * sum(delta(rcv_nxt)), or how many bytes
150 * were acked. 150 * were acked.
151 */ 151 */
152 u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
153 * total number of segments in.
154 */
152 u32 rcv_nxt; /* What we want to receive next */ 155 u32 rcv_nxt; /* What we want to receive next */
153 u32 copied_seq; /* Head of yet unread data */ 156 u32 copied_seq; /* Head of yet unread data */
154 u32 rcv_wup; /* rcv_nxt on last window update sent */ 157 u32 rcv_wup; /* rcv_nxt on last window update sent */
155 u32 snd_nxt; /* Next sequence we send */ 158 u32 snd_nxt; /* Next sequence we send */
156 159 u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
160 * The total number of segments sent.
161 */
157 u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 162 u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
158 * sum(delta(snd_una)), or how many bytes 163 * sum(delta(snd_una)), or how many bytes
159 * were acked. 164 * were acked.
@@ -201,6 +206,7 @@ struct tcp_sock {
201 syn_fastopen:1, /* SYN includes Fast Open option */ 206 syn_fastopen:1, /* SYN includes Fast Open option */
202 syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ 207 syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
203 syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ 208 syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
209 save_syn:1, /* Save headers of SYN packet */
204 is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ 210 is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
205 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ 211 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
206 212
@@ -328,6 +334,7 @@ struct tcp_sock {
328 * socket. Used to retransmit SYNACKs etc. 334 * socket. Used to retransmit SYNACKs etc.
329 */ 335 */
330 struct request_sock *fastopen_rsk; 336 struct request_sock *fastopen_rsk;
337 u32 *saved_syn;
331}; 338};
332 339
333enum tsq_flags { 340enum tsq_flags {
@@ -395,4 +402,10 @@ static inline int fastopen_init_queue(struct sock *sk, int backlog)
395 return 0; 402 return 0;
396} 403}
397 404
405static inline void tcp_saved_syn_free(struct tcp_sock *tp)
406{
407 kfree(tp->saved_syn);
408 tp->saved_syn = NULL;
409}
410
398#endif /* _LINUX_TCP_H */ 411#endif /* _LINUX_TCP_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 5eac316490ea..037e9df2f610 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -40,6 +40,9 @@
40/* No upper/lower limit requirement */ 40/* No upper/lower limit requirement */
41#define THERMAL_NO_LIMIT ((u32)~0) 41#define THERMAL_NO_LIMIT ((u32)~0)
42 42
43/* Default weight of a bound cooling device */
44#define THERMAL_WEIGHT_DEFAULT 0
45
43/* Unit conversion macros */ 46/* Unit conversion macros */
44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ 47#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
45 ((long)t-2732+5)/10 : ((long)t-2732-5)/10) 48 ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
@@ -56,10 +59,13 @@
56#define DEFAULT_THERMAL_GOVERNOR "fair_share" 59#define DEFAULT_THERMAL_GOVERNOR "fair_share"
57#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) 60#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE)
58#define DEFAULT_THERMAL_GOVERNOR "user_space" 61#define DEFAULT_THERMAL_GOVERNOR "user_space"
62#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR)
63#define DEFAULT_THERMAL_GOVERNOR "power_allocator"
59#endif 64#endif
60 65
61struct thermal_zone_device; 66struct thermal_zone_device;
62struct thermal_cooling_device; 67struct thermal_cooling_device;
68struct thermal_instance;
63 69
64enum thermal_device_mode { 70enum thermal_device_mode {
65 THERMAL_DEVICE_DISABLED = 0, 71 THERMAL_DEVICE_DISABLED = 0,
@@ -113,6 +119,12 @@ struct thermal_cooling_device_ops {
113 int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); 119 int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
114 int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); 120 int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
115 int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); 121 int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
122 int (*get_requested_power)(struct thermal_cooling_device *,
123 struct thermal_zone_device *, u32 *);
124 int (*state2power)(struct thermal_cooling_device *,
125 struct thermal_zone_device *, unsigned long, u32 *);
126 int (*power2state)(struct thermal_cooling_device *,
127 struct thermal_zone_device *, u32, unsigned long *);
116}; 128};
117 129
118struct thermal_cooling_device { 130struct thermal_cooling_device {
@@ -144,8 +156,7 @@ struct thermal_attr {
144 * @devdata: private pointer for device private data 156 * @devdata: private pointer for device private data
145 * @trips: number of trip points the thermal zone supports 157 * @trips: number of trip points the thermal zone supports
146 * @passive_delay: number of milliseconds to wait between polls when 158 * @passive_delay: number of milliseconds to wait between polls when
147 * performing passive cooling. Currenty only used by the 159 * performing passive cooling.
148 * step-wise governor
149 * @polling_delay: number of milliseconds to wait between polls when 160 * @polling_delay: number of milliseconds to wait between polls when
150 * checking whether trip points have been crossed (0 for 161 * checking whether trip points have been crossed (0 for
151 * interrupt driven systems) 162 * interrupt driven systems)
@@ -155,13 +166,13 @@ struct thermal_attr {
155 * @last_temperature: previous temperature read 166 * @last_temperature: previous temperature read
156 * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION 167 * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION
157 * @passive: 1 if you've crossed a passive trip point, 0 otherwise. 168 * @passive: 1 if you've crossed a passive trip point, 0 otherwise.
158 * Currenty only used by the step-wise governor.
159 * @forced_passive: If > 0, temperature at which to switch on all ACPI 169 * @forced_passive: If > 0, temperature at which to switch on all ACPI
160 * processor cooling devices. Currently only used by the 170 * processor cooling devices. Currently only used by the
161 * step-wise governor. 171 * step-wise governor.
162 * @ops: operations this &thermal_zone_device supports 172 * @ops: operations this &thermal_zone_device supports
163 * @tzp: thermal zone parameters 173 * @tzp: thermal zone parameters
164 * @governor: pointer to the governor for this thermal zone 174 * @governor: pointer to the governor for this thermal zone
175 * @governor_data: private pointer for governor data
165 * @thermal_instances: list of &struct thermal_instance of this thermal zone 176 * @thermal_instances: list of &struct thermal_instance of this thermal zone
166 * @idr: &struct idr to generate unique id for this zone's cooling 177 * @idr: &struct idr to generate unique id for this zone's cooling
167 * devices 178 * devices
@@ -186,8 +197,9 @@ struct thermal_zone_device {
186 int passive; 197 int passive;
187 unsigned int forced_passive; 198 unsigned int forced_passive;
188 struct thermal_zone_device_ops *ops; 199 struct thermal_zone_device_ops *ops;
189 const struct thermal_zone_params *tzp; 200 struct thermal_zone_params *tzp;
190 struct thermal_governor *governor; 201 struct thermal_governor *governor;
202 void *governor_data;
191 struct list_head thermal_instances; 203 struct list_head thermal_instances;
192 struct idr idr; 204 struct idr idr;
193 struct mutex lock; 205 struct mutex lock;
@@ -198,12 +210,19 @@ struct thermal_zone_device {
198/** 210/**
199 * struct thermal_governor - structure that holds thermal governor information 211 * struct thermal_governor - structure that holds thermal governor information
200 * @name: name of the governor 212 * @name: name of the governor
213 * @bind_to_tz: callback called when binding to a thermal zone. If it
214 * returns 0, the governor is bound to the thermal zone,
215 * otherwise it fails.
216 * @unbind_from_tz: callback called when a governor is unbound from a
217 * thermal zone.
201 * @throttle: callback called for every trip point even if temperature is 218 * @throttle: callback called for every trip point even if temperature is
202 * below the trip point temperature 219 * below the trip point temperature
203 * @governor_list: node in thermal_governor_list (in thermal_core.c) 220 * @governor_list: node in thermal_governor_list (in thermal_core.c)
204 */ 221 */
205struct thermal_governor { 222struct thermal_governor {
206 char name[THERMAL_NAME_LENGTH]; 223 char name[THERMAL_NAME_LENGTH];
224 int (*bind_to_tz)(struct thermal_zone_device *tz);
225 void (*unbind_from_tz)(struct thermal_zone_device *tz);
207 int (*throttle)(struct thermal_zone_device *tz, int trip); 226 int (*throttle)(struct thermal_zone_device *tz, int trip);
208 struct list_head governor_list; 227 struct list_head governor_list;
209}; 228};
@@ -214,9 +233,12 @@ struct thermal_bind_params {
214 233
215 /* 234 /*
216 * This is a measure of 'how effectively these devices can 235 * This is a measure of 'how effectively these devices can
217 * cool 'this' thermal zone. The shall be determined by platform 236 * cool 'this' thermal zone. It shall be determined by
218 * characterization. This is on a 'percentage' scale. 237 * platform characterization. This value is relative to the
219 * See Documentation/thermal/sysfs-api.txt for more information. 238 * rest of the weights so a cooling device whose weight is
239 * double that of another cooling device is twice as
240 * effective. See Documentation/thermal/sysfs-api.txt for more
241 * information.
220 */ 242 */
221 int weight; 243 int weight;
222 244
@@ -253,6 +275,44 @@ struct thermal_zone_params {
253 275
254 int num_tbps; /* Number of tbp entries */ 276 int num_tbps; /* Number of tbp entries */
255 struct thermal_bind_params *tbp; 277 struct thermal_bind_params *tbp;
278
279 /*
280 * Sustainable power (heat) that this thermal zone can dissipate in
281 * mW
282 */
283 u32 sustainable_power;
284
285 /*
286 * Proportional parameter of the PID controller when
287 * overshooting (i.e., when temperature is below the target)
288 */
289 s32 k_po;
290
291 /*
292 * Proportional parameter of the PID controller when
293 * undershooting
294 */
295 s32 k_pu;
296
297 /* Integral parameter of the PID controller */
298 s32 k_i;
299
300 /* Derivative parameter of the PID controller */
301 s32 k_d;
302
303 /* threshold below which the error is no longer accumulated */
304 s32 integral_cutoff;
305
306 /*
307 * @slope: slope of a linear temperature adjustment curve.
308 * Used by thermal zone drivers.
309 */
310 int slope;
311 /*
312 * @offset: offset of a linear temperature adjustment curve.
313 * Used by thermal zone drivers (default 0).
314 */
315 int offset;
256}; 316};
257 317
258struct thermal_genl_event { 318struct thermal_genl_event {
@@ -316,14 +376,25 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
316#endif 376#endif
317 377
318#if IS_ENABLED(CONFIG_THERMAL) 378#if IS_ENABLED(CONFIG_THERMAL)
379static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
380{
381 return cdev->ops->get_requested_power && cdev->ops->state2power &&
382 cdev->ops->power2state;
383}
384
385int power_actor_get_max_power(struct thermal_cooling_device *,
386 struct thermal_zone_device *tz, u32 *max_power);
387int power_actor_set_power(struct thermal_cooling_device *,
388 struct thermal_instance *, u32);
319struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, 389struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
320 void *, struct thermal_zone_device_ops *, 390 void *, struct thermal_zone_device_ops *,
321 const struct thermal_zone_params *, int, int); 391 struct thermal_zone_params *, int, int);
322void thermal_zone_device_unregister(struct thermal_zone_device *); 392void thermal_zone_device_unregister(struct thermal_zone_device *);
323 393
324int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, 394int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
325 struct thermal_cooling_device *, 395 struct thermal_cooling_device *,
326 unsigned long, unsigned long); 396 unsigned long, unsigned long,
397 unsigned int);
327int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, 398int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
328 struct thermal_cooling_device *); 399 struct thermal_cooling_device *);
329void thermal_zone_device_update(struct thermal_zone_device *); 400void thermal_zone_device_update(struct thermal_zone_device *);
@@ -343,6 +414,14 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
343void thermal_cdev_update(struct thermal_cooling_device *); 414void thermal_cdev_update(struct thermal_cooling_device *);
344void thermal_notify_framework(struct thermal_zone_device *, int); 415void thermal_notify_framework(struct thermal_zone_device *, int);
345#else 416#else
417static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
418{ return false; }
419static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
420 struct thermal_zone_device *tz, u32 *max_power)
421{ return 0; }
422static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
423 struct thermal_instance *tz, u32 power)
424{ return 0; }
346static inline struct thermal_zone_device *thermal_zone_device_register( 425static inline struct thermal_zone_device *thermal_zone_device_register(
347 const char *type, int trips, int mask, void *devdata, 426 const char *type, int trips, int mask, void *devdata,
348 struct thermal_zone_device_ops *ops, 427 struct thermal_zone_device_ops *ops,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f8492da57ad3..3741ba1a652c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -13,8 +13,6 @@
13 13
14#ifdef CONFIG_GENERIC_CLOCKEVENTS 14#ifdef CONFIG_GENERIC_CLOCKEVENTS
15extern void __init tick_init(void); 15extern void __init tick_init(void);
16extern void tick_freeze(void);
17extern void tick_unfreeze(void);
18/* Should be core only, but ARM BL switcher requires it */ 16/* Should be core only, but ARM BL switcher requires it */
19extern void tick_suspend_local(void); 17extern void tick_suspend_local(void);
20/* Should be core only, but XEN resume magic and ARM BL switcher require it */ 18/* Should be core only, but XEN resume magic and ARM BL switcher require it */
@@ -23,14 +21,20 @@ extern void tick_handover_do_timer(void);
23extern void tick_cleanup_dead_cpu(int cpu); 21extern void tick_cleanup_dead_cpu(int cpu);
24#else /* CONFIG_GENERIC_CLOCKEVENTS */ 22#else /* CONFIG_GENERIC_CLOCKEVENTS */
25static inline void tick_init(void) { } 23static inline void tick_init(void) { }
26static inline void tick_freeze(void) { }
27static inline void tick_unfreeze(void) { }
28static inline void tick_suspend_local(void) { } 24static inline void tick_suspend_local(void) { }
29static inline void tick_resume_local(void) { } 25static inline void tick_resume_local(void) { }
30static inline void tick_handover_do_timer(void) { } 26static inline void tick_handover_do_timer(void) { }
31static inline void tick_cleanup_dead_cpu(int cpu) { } 27static inline void tick_cleanup_dead_cpu(int cpu) { }
32#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 28#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
33 29
30#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
31extern void tick_freeze(void);
32extern void tick_unfreeze(void);
33#else
34static inline void tick_freeze(void) { }
35static inline void tick_unfreeze(void) { }
36#endif
37
34#ifdef CONFIG_TICK_ONESHOT 38#ifdef CONFIG_TICK_ONESHOT
35extern void tick_irq_enter(void); 39extern void tick_irq_enter(void);
36# ifndef arch_needs_cpu 40# ifndef arch_needs_cpu
@@ -134,6 +138,12 @@ static inline bool tick_nohz_full_cpu(int cpu)
134 return cpumask_test_cpu(cpu, tick_nohz_full_mask); 138 return cpumask_test_cpu(cpu, tick_nohz_full_mask);
135} 139}
136 140
141static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
142{
143 if (tick_nohz_full_enabled())
144 cpumask_or(mask, mask, tick_nohz_full_mask);
145}
146
137extern void __tick_nohz_full_check(void); 147extern void __tick_nohz_full_check(void);
138extern void tick_nohz_full_kick(void); 148extern void tick_nohz_full_kick(void);
139extern void tick_nohz_full_kick_cpu(int cpu); 149extern void tick_nohz_full_kick_cpu(int cpu);
@@ -142,6 +152,7 @@ extern void __tick_nohz_task_switch(struct task_struct *tsk);
142#else 152#else
143static inline bool tick_nohz_full_enabled(void) { return false; } 153static inline bool tick_nohz_full_enabled(void) { return false; }
144static inline bool tick_nohz_full_cpu(int cpu) { return false; } 154static inline bool tick_nohz_full_cpu(int cpu) { return false; }
155static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
145static inline void __tick_nohz_full_check(void) { } 156static inline void __tick_nohz_full_check(void) { }
146static inline void tick_nohz_full_kick_cpu(int cpu) { } 157static inline void tick_nohz_full_kick_cpu(int cpu) { }
147static inline void tick_nohz_full_kick(void) { } 158static inline void tick_nohz_full_kick(void) { }
diff --git a/include/linux/time64.h b/include/linux/time64.h
index a3831478d9cf..77b5df2acd2a 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -2,6 +2,7 @@
2#define _LINUX_TIME64_H 2#define _LINUX_TIME64_H
3 3
4#include <uapi/linux/time.h> 4#include <uapi/linux/time.h>
5#include <linux/math64.h>
5 6
6typedef __s64 time64_t; 7typedef __s64 time64_t;
7 8
@@ -28,6 +29,7 @@ struct timespec64 {
28#define FSEC_PER_SEC 1000000000000000LL 29#define FSEC_PER_SEC 1000000000000000LL
29 30
30/* Located here for timespec[64]_valid_strict */ 31/* Located here for timespec[64]_valid_strict */
32#define TIME64_MAX ((s64)~((u64)1 << 63))
31#define KTIME_MAX ((s64)~((u64)1 << 63)) 33#define KTIME_MAX ((s64)~((u64)1 << 63))
32#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) 34#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
33 35
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index fb86963859c7..25247220b4b7 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -49,6 +49,8 @@ struct tk_read_base {
49 * @offs_boot: Offset clock monotonic -> clock boottime 49 * @offs_boot: Offset clock monotonic -> clock boottime
50 * @offs_tai: Offset clock monotonic -> clock tai 50 * @offs_tai: Offset clock monotonic -> clock tai
51 * @tai_offset: The current UTC to TAI offset in seconds 51 * @tai_offset: The current UTC to TAI offset in seconds
52 * @clock_was_set_seq: The sequence number of clock was set events
53 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
52 * @raw_time: Monotonic raw base time in timespec64 format 54 * @raw_time: Monotonic raw base time in timespec64 format
53 * @cycle_interval: Number of clock cycles in one NTP interval 55 * @cycle_interval: Number of clock cycles in one NTP interval
54 * @xtime_interval: Number of clock shifted nano seconds in one NTP 56 * @xtime_interval: Number of clock shifted nano seconds in one NTP
@@ -60,6 +62,9 @@ struct tk_read_base {
60 * shifted nano seconds. 62 * shifted nano seconds.
61 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and 63 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
62 * ntp shifted nano seconds. 64 * ntp shifted nano seconds.
65 * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
66 * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
67 * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
63 * 68 *
64 * Note: For timespec(64) based interfaces wall_to_monotonic is what 69 * Note: For timespec(64) based interfaces wall_to_monotonic is what
65 * we need to add to xtime (or xtime corrected for sub jiffie times) 70 * we need to add to xtime (or xtime corrected for sub jiffie times)
@@ -85,6 +90,8 @@ struct timekeeper {
85 ktime_t offs_boot; 90 ktime_t offs_boot;
86 ktime_t offs_tai; 91 ktime_t offs_tai;
87 s32 tai_offset; 92 s32 tai_offset;
93 unsigned int clock_was_set_seq;
94 ktime_t next_leap_ktime;
88 struct timespec64 raw_time; 95 struct timespec64 raw_time;
89 96
90 /* The following members are for timekeeping internal use */ 97 /* The following members are for timekeeping internal use */
@@ -104,6 +111,18 @@ struct timekeeper {
104 s64 ntp_error; 111 s64 ntp_error;
105 u32 ntp_error_shift; 112 u32 ntp_error_shift;
106 u32 ntp_err_mult; 113 u32 ntp_err_mult;
114#ifdef CONFIG_DEBUG_TIMEKEEPING
115 long last_warning;
116 /*
117 * These simple flag variables are managed
118 * without locks, which is racy, but they are
119 * ok since we don't really care about being
120 * super precise about how many events were
121 * seen, just that a problem was observed.
122 */
123 int underflow_seen;
124 int overflow_seen;
125#endif
107}; 126};
108 127
109#ifdef CONFIG_GENERIC_TIME_VSYSCALL 128#ifdef CONFIG_GENERIC_TIME_VSYSCALL
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 99176af216af..3aa72e648650 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -163,6 +163,7 @@ extern ktime_t ktime_get(void);
163extern ktime_t ktime_get_with_offset(enum tk_offsets offs); 163extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
164extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); 164extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
165extern ktime_t ktime_get_raw(void); 165extern ktime_t ktime_get_raw(void);
166extern u32 ktime_get_resolution_ns(void);
166 167
167/** 168/**
168 * ktime_get_real - get the real (wall-) time in ktime_t format 169 * ktime_get_real - get the real (wall-) time in ktime_t format
@@ -266,7 +267,6 @@ extern int persistent_clock_is_local;
266 267
267extern void read_persistent_clock(struct timespec *ts); 268extern void read_persistent_clock(struct timespec *ts);
268extern void read_persistent_clock64(struct timespec64 *ts); 269extern void read_persistent_clock64(struct timespec64 *ts);
269extern void read_boot_clock(struct timespec *ts);
270extern void read_boot_clock64(struct timespec64 *ts); 270extern void read_boot_clock64(struct timespec64 *ts);
271extern int update_persistent_clock(struct timespec now); 271extern int update_persistent_clock(struct timespec now);
272extern int update_persistent_clock64(struct timespec64 now); 272extern int update_persistent_clock64(struct timespec64 now);
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 8c5a197e1587..61aa61dc410c 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -14,27 +14,23 @@ struct timer_list {
14 * All fields that change during normal runtime grouped to the 14 * All fields that change during normal runtime grouped to the
15 * same cacheline 15 * same cacheline
16 */ 16 */
17 struct list_head entry; 17 struct hlist_node entry;
18 unsigned long expires; 18 unsigned long expires;
19 struct tvec_base *base; 19 void (*function)(unsigned long);
20 20 unsigned long data;
21 void (*function)(unsigned long); 21 u32 flags;
22 unsigned long data; 22 int slack;
23
24 int slack;
25 23
26#ifdef CONFIG_TIMER_STATS 24#ifdef CONFIG_TIMER_STATS
27 int start_pid; 25 int start_pid;
28 void *start_site; 26 void *start_site;
29 char start_comm[16]; 27 char start_comm[16];
30#endif 28#endif
31#ifdef CONFIG_LOCKDEP 29#ifdef CONFIG_LOCKDEP
32 struct lockdep_map lockdep_map; 30 struct lockdep_map lockdep_map;
33#endif 31#endif
34}; 32};
35 33
36extern struct tvec_base boot_tvec_bases;
37
38#ifdef CONFIG_LOCKDEP 34#ifdef CONFIG_LOCKDEP
39/* 35/*
40 * NB: because we have to copy the lockdep_map, setting the lockdep_map key 36 * NB: because we have to copy the lockdep_map, setting the lockdep_map key
@@ -49,9 +45,6 @@ extern struct tvec_base boot_tvec_bases;
49#endif 45#endif
50 46
51/* 47/*
52 * Note that all tvec_bases are at least 4 byte aligned and lower two bits
53 * of base in timer_list is guaranteed to be zero. Use them for flags.
54 *
55 * A deferrable timer will work normally when the system is busy, but 48 * A deferrable timer will work normally when the system is busy, but
56 * will not cause a CPU to come out of idle just to service it; instead, 49 * will not cause a CPU to come out of idle just to service it; instead,
57 * the timer will be serviced when the CPU eventually wakes up with a 50 * the timer will be serviced when the CPU eventually wakes up with a
@@ -65,17 +58,18 @@ extern struct tvec_base boot_tvec_bases;
65 * workqueue locking issues. It's not meant for executing random crap 58 * workqueue locking issues. It's not meant for executing random crap
66 * with interrupts disabled. Abuse is monitored! 59 * with interrupts disabled. Abuse is monitored!
67 */ 60 */
68#define TIMER_DEFERRABLE 0x1LU 61#define TIMER_CPUMASK 0x0007FFFF
69#define TIMER_IRQSAFE 0x2LU 62#define TIMER_MIGRATING 0x00080000
70 63#define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING)
71#define TIMER_FLAG_MASK 0x3LU 64#define TIMER_DEFERRABLE 0x00100000
65#define TIMER_IRQSAFE 0x00200000
72 66
73#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ 67#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
74 .entry = { .prev = TIMER_ENTRY_STATIC }, \ 68 .entry = { .next = TIMER_ENTRY_STATIC }, \
75 .function = (_function), \ 69 .function = (_function), \
76 .expires = (_expires), \ 70 .expires = (_expires), \
77 .data = (_data), \ 71 .data = (_data), \
78 .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \ 72 .flags = (_flags), \
79 .slack = -1, \ 73 .slack = -1, \
80 __TIMER_LOCKDEP_MAP_INITIALIZER( \ 74 __TIMER_LOCKDEP_MAP_INITIALIZER( \
81 __FILE__ ":" __stringify(__LINE__)) \ 75 __FILE__ ":" __stringify(__LINE__)) \
@@ -168,7 +162,7 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
168 */ 162 */
169static inline int timer_pending(const struct timer_list * timer) 163static inline int timer_pending(const struct timer_list * timer)
170{ 164{
171 return timer->entry.next != NULL; 165 return timer->entry.pprev != NULL;
172} 166}
173 167
174extern void add_timer_on(struct timer_list *timer, int cpu); 168extern void add_timer_on(struct timer_list *timer, int cpu);
@@ -188,26 +182,16 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz);
188#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) 182#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
189 183
190/* 184/*
191 * Return when the next timer-wheel timeout occurs (in absolute jiffies),
192 * locks the timer base and does the comparison against the given
193 * jiffie.
194 */
195extern unsigned long get_next_timer_interrupt(unsigned long now);
196
197/*
198 * Timer-statistics info: 185 * Timer-statistics info:
199 */ 186 */
200#ifdef CONFIG_TIMER_STATS 187#ifdef CONFIG_TIMER_STATS
201 188
202extern int timer_stats_active; 189extern int timer_stats_active;
203 190
204#define TIMER_STATS_FLAG_DEFERRABLE 0x1
205
206extern void init_timer_stats(void); 191extern void init_timer_stats(void);
207 192
208extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, 193extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
209 void *timerf, char *comm, 194 void *timerf, char *comm, u32 flags);
210 unsigned int timer_flag);
211 195
212extern void __timer_stats_timer_set_start_info(struct timer_list *timer, 196extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
213 void *addr); 197 void *addr);
@@ -254,6 +238,15 @@ extern void run_local_timers(void);
254struct hrtimer; 238struct hrtimer;
255extern enum hrtimer_restart it_real_fn(struct hrtimer *); 239extern enum hrtimer_restart it_real_fn(struct hrtimer *);
256 240
241#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
242#include <linux/sysctl.h>
243
244extern unsigned int sysctl_timer_migration;
245int timer_migration_handler(struct ctl_table *table, int write,
246 void __user *buffer, size_t *lenp,
247 loff_t *ppos);
248#endif
249
257unsigned long __round_jiffies(unsigned long j, int cpu); 250unsigned long __round_jiffies(unsigned long j, int cpu);
258unsigned long __round_jiffies_relative(unsigned long j, int cpu); 251unsigned long __round_jiffies_relative(unsigned long j, int cpu);
259unsigned long round_jiffies(unsigned long j); 252unsigned long round_jiffies(unsigned long j);
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index a520fd70a59f..7eec17ad7fa1 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -16,10 +16,10 @@ struct timerqueue_head {
16}; 16};
17 17
18 18
19extern void timerqueue_add(struct timerqueue_head *head, 19extern bool timerqueue_add(struct timerqueue_head *head,
20 struct timerqueue_node *node); 20 struct timerqueue_node *node);
21extern void timerqueue_del(struct timerqueue_head *head, 21extern bool timerqueue_del(struct timerqueue_head *head,
22 struct timerqueue_node *node); 22 struct timerqueue_node *node);
23extern struct timerqueue_node *timerqueue_iterate_next( 23extern struct timerqueue_node *timerqueue_iterate_next(
24 struct timerqueue_node *node); 24 struct timerqueue_node *node);
25 25
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 909b6e43b694..73ddad1e0fa3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -191,8 +191,8 @@ static inline int cpu_to_mem(int cpu)
191#ifndef topology_core_id 191#ifndef topology_core_id
192#define topology_core_id(cpu) ((void)(cpu), 0) 192#define topology_core_id(cpu) ((void)(cpu), 0)
193#endif 193#endif
194#ifndef topology_thread_cpumask 194#ifndef topology_sibling_cpumask
195#define topology_thread_cpumask(cpu) cpumask_of(cpu) 195#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
196#endif 196#endif
197#ifndef topology_core_cpumask 197#ifndef topology_core_cpumask
198#define topology_core_cpumask(cpu) cpumask_of(cpu) 198#define topology_core_cpumask(cpu) cpumask_of(cpu)
@@ -201,7 +201,7 @@ static inline int cpu_to_mem(int cpu)
201#ifdef CONFIG_SCHED_SMT 201#ifdef CONFIG_SCHED_SMT
202static inline const struct cpumask *cpu_smt_mask(int cpu) 202static inline const struct cpumask *cpu_smt_mask(int cpu)
203{ 203{
204 return topology_thread_cpumask(cpu); 204 return topology_sibling_cpumask(cpu);
205} 205}
206#endif 206#endif
207 207
diff --git a/include/linux/types.h b/include/linux/types.h
index 59698be03490..8715287c3b1f 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -139,12 +139,20 @@ typedef unsigned long blkcnt_t;
139 */ 139 */
140#define pgoff_t unsigned long 140#define pgoff_t unsigned long
141 141
142/* A dma_addr_t can hold any valid DMA or bus address for the platform */ 142/*
143 * A dma_addr_t can hold any valid DMA address, i.e., any address returned
144 * by the DMA API.
145 *
146 * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
147 * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
148 * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
149 * so they don't care about the size of the actual bus addresses.
150 */
143#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 151#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
144typedef u64 dma_addr_t; 152typedef u64 dma_addr_t;
145#else 153#else
146typedef u32 dma_addr_t; 154typedef u32 dma_addr_t;
147#endif /* dma_addr_t */ 155#endif
148 156
149typedef unsigned __bitwise__ gfp_t; 157typedef unsigned __bitwise__ gfp_t;
150typedef unsigned __bitwise__ fmode_t; 158typedef unsigned __bitwise__ fmode_t;
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 4b4439e75f45..df89c9bcba7d 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -68,11 +68,12 @@ struct u64_stats_sync {
68}; 68};
69 69
70 70
71static inline void u64_stats_init(struct u64_stats_sync *syncp)
72{
71#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) 73#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
72# define u64_stats_init(syncp) seqcount_init(syncp.seq) 74 seqcount_init(&syncp->seq);
73#else
74# define u64_stats_init(syncp) do { } while (0)
75#endif 75#endif
76}
76 77
77static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) 78static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
78{ 79{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ecd3319dac33..ae572c138607 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,21 +1,30 @@
1#ifndef __LINUX_UACCESS_H__ 1#ifndef __LINUX_UACCESS_H__
2#define __LINUX_UACCESS_H__ 2#define __LINUX_UACCESS_H__
3 3
4#include <linux/preempt.h> 4#include <linux/sched.h>
5#include <asm/uaccess.h> 5#include <asm/uaccess.h>
6 6
7static __always_inline void pagefault_disabled_inc(void)
8{
9 current->pagefault_disabled++;
10}
11
12static __always_inline void pagefault_disabled_dec(void)
13{
14 current->pagefault_disabled--;
15 WARN_ON(current->pagefault_disabled < 0);
16}
17
7/* 18/*
8 * These routines enable/disable the pagefault handler in that 19 * These routines enable/disable the pagefault handler. If disabled, it will
9 * it will not take any locks and go straight to the fixup table. 20 * not take any locks and go straight to the fixup table.
10 * 21 *
11 * They have great resemblance to the preempt_disable/enable calls 22 * User access methods will not sleep when called from a pagefault_disabled()
12 * and in fact they are identical; this is because currently there is 23 * environment.
13 * no other way to make the pagefault handlers do this. So we do
14 * disable preemption but we don't necessarily care about that.
15 */ 24 */
16static inline void pagefault_disable(void) 25static inline void pagefault_disable(void)
17{ 26{
18 preempt_count_inc(); 27 pagefault_disabled_inc();
19 /* 28 /*
20 * make sure to have issued the store before a pagefault 29 * make sure to have issued the store before a pagefault
21 * can hit. 30 * can hit.
@@ -25,18 +34,31 @@ static inline void pagefault_disable(void)
25 34
26static inline void pagefault_enable(void) 35static inline void pagefault_enable(void)
27{ 36{
28#ifndef CONFIG_PREEMPT
29 /* 37 /*
30 * make sure to issue those last loads/stores before enabling 38 * make sure to issue those last loads/stores before enabling
31 * the pagefault handler again. 39 * the pagefault handler again.
32 */ 40 */
33 barrier(); 41 barrier();
34 preempt_count_dec(); 42 pagefault_disabled_dec();
35#else
36 preempt_enable();
37#endif
38} 43}
39 44
45/*
46 * Is the pagefault handler disabled? If so, user access methods will not sleep.
47 */
48#define pagefault_disabled() (current->pagefault_disabled != 0)
49
50/*
51 * The pagefault handler is in general disabled by pagefault_disable() or
52 * when in irq context (via in_atomic()).
53 *
54 * This function should only be used by the fault handlers. Other users should
55 * stick to pagefault_disabled().
56 * Please NEVER use preempt_disable() to disable the fault handler. With
57 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
58 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
59 */
60#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
61
40#ifndef ARCH_HAS_NOCACHE_UACCESS 62#ifndef ARCH_HAS_NOCACHE_UACCESS
41 63
42static inline unsigned long __copy_from_user_inatomic_nocache(void *to, 64static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2db83349865b..d69ac4ecc88b 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -969,7 +969,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *);
969 * on that signal. 969 * on that signal.
970 */ 970 */
971static inline int 971static inline int
972wait_on_bit(void *word, int bit, unsigned mode) 972wait_on_bit(unsigned long *word, int bit, unsigned mode)
973{ 973{
974 might_sleep(); 974 might_sleep();
975 if (!test_bit(bit, word)) 975 if (!test_bit(bit, word))
@@ -994,7 +994,7 @@ wait_on_bit(void *word, int bit, unsigned mode)
994 * on that signal. 994 * on that signal.
995 */ 995 */
996static inline int 996static inline int
997wait_on_bit_io(void *word, int bit, unsigned mode) 997wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
998{ 998{
999 might_sleep(); 999 might_sleep();
1000 if (!test_bit(bit, word)) 1000 if (!test_bit(bit, word))
@@ -1020,7 +1020,8 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
1020 * received a signal and the mode permitted wakeup on that signal. 1020 * received a signal and the mode permitted wakeup on that signal.
1021 */ 1021 */
1022static inline int 1022static inline int
1023wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout) 1023wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1024 unsigned long timeout)
1024{ 1025{
1025 might_sleep(); 1026 might_sleep();
1026 if (!test_bit(bit, word)) 1027 if (!test_bit(bit, word))
@@ -1047,7 +1048,8 @@ wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
1047 * on that signal. 1048 * on that signal.
1048 */ 1049 */
1049static inline int 1050static inline int
1050wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) 1051wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1052 unsigned mode)
1051{ 1053{
1052 might_sleep(); 1054 might_sleep();
1053 if (!test_bit(bit, word)) 1055 if (!test_bit(bit, word))
@@ -1075,7 +1077,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode
1075 * the @mode allows that signal to wake the process. 1077 * the @mode allows that signal to wake the process.
1076 */ 1078 */
1077static inline int 1079static inline int
1078wait_on_bit_lock(void *word, int bit, unsigned mode) 1080wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1079{ 1081{
1080 might_sleep(); 1082 might_sleep();
1081 if (!test_and_set_bit(bit, word)) 1083 if (!test_and_set_bit(bit, word))
@@ -1099,7 +1101,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode)
1099 * the @mode allows that signal to wake the process. 1101 * the @mode allows that signal to wake the process.
1100 */ 1102 */
1101static inline int 1103static inline int
1102wait_on_bit_lock_io(void *word, int bit, unsigned mode) 1104wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1103{ 1105{
1104 might_sleep(); 1106 might_sleep();
1105 if (!test_and_set_bit(bit, word)) 1107 if (!test_and_set_bit(bit, word))
@@ -1125,7 +1127,8 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode)
1125 * the @mode allows that signal to wake the process. 1127 * the @mode allows that signal to wake the process.
1126 */ 1128 */
1127static inline int 1129static inline int
1128wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) 1130wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1131 unsigned mode)
1129{ 1132{
1130 might_sleep(); 1133 might_sleep();
1131 if (!test_and_set_bit(bit, word)) 1134 if (!test_and_set_bit(bit, word))
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b2dd371ec0ca..b333c945e571 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -7,6 +7,8 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/workqueue.h> 8#include <linux/workqueue.h>
9#include <linux/fs.h> 9#include <linux/fs.h>
10#include <linux/flex_proportions.h>
11#include <linux/backing-dev-defs.h>
10 12
11DECLARE_PER_CPU(int, dirty_throttle_leaks); 13DECLARE_PER_CPU(int, dirty_throttle_leaks);
12 14
@@ -84,18 +86,95 @@ struct writeback_control {
84 unsigned for_reclaim:1; /* Invoked from the page allocator */ 86 unsigned for_reclaim:1; /* Invoked from the page allocator */
85 unsigned range_cyclic:1; /* range_start is cyclic */ 87 unsigned range_cyclic:1; /* range_start is cyclic */
86 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 88 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
89#ifdef CONFIG_CGROUP_WRITEBACK
90 struct bdi_writeback *wb; /* wb this writeback is issued under */
91 struct inode *inode; /* inode being written out */
92
93 /* foreign inode detection, see wbc_detach_inode() */
94 int wb_id; /* current wb id */
95 int wb_lcand_id; /* last foreign candidate wb id */
96 int wb_tcand_id; /* this foreign candidate wb id */
97 size_t wb_bytes; /* bytes written by current wb */
98 size_t wb_lcand_bytes; /* bytes written by last candidate */
99 size_t wb_tcand_bytes; /* bytes written by this candidate */
100#endif
87}; 101};
88 102
89/* 103/*
104 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
105 * and are measured against each other in. There always is one global
106 * domain, global_wb_domain, that every wb in the system is a member of.
107 * This allows measuring the relative bandwidth of each wb to distribute
108 * dirtyable memory accordingly.
109 */
110struct wb_domain {
111 spinlock_t lock;
112
113 /*
114 * Scale the writeback cache size proportional to the relative
115 * writeout speed.
116 *
117 * We do this by keeping a floating proportion between BDIs, based
118 * on page writeback completions [end_page_writeback()]. Those
119 * devices that write out pages fastest will get the larger share,
120 * while the slower will get a smaller share.
121 *
122 * We use page writeout completions because we are interested in
123 * getting rid of dirty pages. Having them written out is the
124 * primary goal.
125 *
126 * We introduce a concept of time, a period over which we measure
127 * these events, because demand can/will vary over time. The length
128 * of this period itself is measured in page writeback completions.
129 */
130 struct fprop_global completions;
131 struct timer_list period_timer; /* timer for aging of completions */
132 unsigned long period_time;
133
134 /*
135 * The dirtyable memory and dirty threshold could be suddenly
136 * knocked down by a large amount (eg. on the startup of KVM in a
137 * swapless system). This may throw the system into deep dirty
138 * exceeded state and throttle heavy/light dirtiers alike. To
139 * retain good responsiveness, maintain global_dirty_limit for
140 * tracking slowly down to the knocked down dirty threshold.
141 *
142 * Both fields are protected by ->lock.
143 */
144 unsigned long dirty_limit_tstamp;
145 unsigned long dirty_limit;
146};
147
148/**
149 * wb_domain_size_changed - memory available to a wb_domain has changed
150 * @dom: wb_domain of interest
151 *
152 * This function should be called when the amount of memory available to
153 * @dom has changed. It resets @dom's dirty limit parameters to prevent
154 * the past values which don't match the current configuration from skewing
155 * dirty throttling. Without this, when memory size of a wb_domain is
156 * greatly reduced, the dirty throttling logic may allow too many pages to
157 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
158 * that situation.
159 */
160static inline void wb_domain_size_changed(struct wb_domain *dom)
161{
162 spin_lock(&dom->lock);
163 dom->dirty_limit_tstamp = jiffies;
164 dom->dirty_limit = 0;
165 spin_unlock(&dom->lock);
166}
167
168/*
90 * fs/fs-writeback.c 169 * fs/fs-writeback.c
91 */ 170 */
92struct bdi_writeback; 171struct bdi_writeback;
93void writeback_inodes_sb(struct super_block *, enum wb_reason reason); 172void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
94void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, 173void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
95 enum wb_reason reason); 174 enum wb_reason reason);
96int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason); 175bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
97int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, 176bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
98 enum wb_reason reason); 177 enum wb_reason reason);
99void sync_inodes_sb(struct super_block *); 178void sync_inodes_sb(struct super_block *);
100void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); 179void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
101void inode_wait_for_writeback(struct inode *inode); 180void inode_wait_for_writeback(struct inode *inode);
@@ -107,6 +186,123 @@ static inline void wait_on_inode(struct inode *inode)
107 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); 186 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
108} 187}
109 188
189#ifdef CONFIG_CGROUP_WRITEBACK
190
191#include <linux/cgroup.h>
192#include <linux/bio.h>
193
194void __inode_attach_wb(struct inode *inode, struct page *page);
195void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
196 struct inode *inode)
197 __releases(&inode->i_lock);
198void wbc_detach_inode(struct writeback_control *wbc);
199void wbc_account_io(struct writeback_control *wbc, struct page *page,
200 size_t bytes);
201
202/**
203 * inode_attach_wb - associate an inode with its wb
204 * @inode: inode of interest
205 * @page: page being dirtied (may be NULL)
206 *
207 * If @inode doesn't have its wb, associate it with the wb matching the
208 * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
209 * @inode->i_lock.
210 */
211static inline void inode_attach_wb(struct inode *inode, struct page *page)
212{
213 if (!inode->i_wb)
214 __inode_attach_wb(inode, page);
215}
216
217/**
218 * inode_detach_wb - disassociate an inode from its wb
219 * @inode: inode of interest
220 *
221 * @inode is being freed. Detach from its wb.
222 */
223static inline void inode_detach_wb(struct inode *inode)
224{
225 if (inode->i_wb) {
226 wb_put(inode->i_wb);
227 inode->i_wb = NULL;
228 }
229}
230
231/**
232 * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
233 * @wbc: writeback_control of interest
234 * @inode: target inode
235 *
236 * This function is to be used by __filemap_fdatawrite_range(), which is an
237 * alternative entry point into writeback code, and first ensures @inode is
238 * associated with a bdi_writeback and attaches it to @wbc.
239 */
240static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
241 struct inode *inode)
242{
243 spin_lock(&inode->i_lock);
244 inode_attach_wb(inode, NULL);
245 wbc_attach_and_unlock_inode(wbc, inode);
246}
247
248/**
249 * wbc_init_bio - writeback specific initializtion of bio
250 * @wbc: writeback_control for the writeback in progress
251 * @bio: bio to be initialized
252 *
253 * @bio is a part of the writeback in progress controlled by @wbc. Perform
254 * writeback specific initialization. This is used to apply the cgroup
255 * writeback context.
256 */
257static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
258{
259 /*
260 * pageout() path doesn't attach @wbc to the inode being written
261 * out. This is intentional as we don't want the function to block
262 * behind a slow cgroup. Ultimately, we want pageout() to kick off
263 * regular writeback instead of writing things out itself.
264 */
265 if (wbc->wb)
266 bio_associate_blkcg(bio, wbc->wb->blkcg_css);
267}
268
269#else /* CONFIG_CGROUP_WRITEBACK */
270
271static inline void inode_attach_wb(struct inode *inode, struct page *page)
272{
273}
274
275static inline void inode_detach_wb(struct inode *inode)
276{
277}
278
279static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
280 struct inode *inode)
281 __releases(&inode->i_lock)
282{
283 spin_unlock(&inode->i_lock);
284}
285
286static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
287 struct inode *inode)
288{
289}
290
291static inline void wbc_detach_inode(struct writeback_control *wbc)
292{
293}
294
295static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
296{
297}
298
299static inline void wbc_account_io(struct writeback_control *wbc,
300 struct page *page, size_t bytes)
301{
302}
303
304#endif /* CONFIG_CGROUP_WRITEBACK */
305
110/* 306/*
111 * mm/page-writeback.c 307 * mm/page-writeback.c
112 */ 308 */
@@ -120,8 +316,12 @@ static inline void laptop_sync_completion(void) { }
120#endif 316#endif
121void throttle_vm_writeout(gfp_t gfp_mask); 317void throttle_vm_writeout(gfp_t gfp_mask);
122bool zone_dirty_ok(struct zone *zone); 318bool zone_dirty_ok(struct zone *zone);
319int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
320#ifdef CONFIG_CGROUP_WRITEBACK
321void wb_domain_exit(struct wb_domain *dom);
322#endif
123 323
124extern unsigned long global_dirty_limit; 324extern struct wb_domain global_wb_domain;
125 325
126/* These are exported to sysctl. */ 326/* These are exported to sysctl. */
127extern int dirty_background_ratio; 327extern int dirty_background_ratio;
@@ -155,19 +355,12 @@ int dirty_writeback_centisecs_handler(struct ctl_table *, int,
155 void __user *, size_t *, loff_t *); 355 void __user *, size_t *, loff_t *);
156 356
157void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); 357void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
158unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, 358unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
159 unsigned long dirty);
160
161void __bdi_update_bandwidth(struct backing_dev_info *bdi,
162 unsigned long thresh,
163 unsigned long bg_thresh,
164 unsigned long dirty,
165 unsigned long bdi_thresh,
166 unsigned long bdi_dirty,
167 unsigned long start_time);
168 359
360void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
169void page_writeback_init(void); 361void page_writeback_init(void);
170void balance_dirty_pages_ratelimited(struct address_space *mapping); 362void balance_dirty_pages_ratelimited(struct address_space *mapping);
363bool wb_over_bg_thresh(struct bdi_writeback *wb);
171 364
172typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, 365typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
173 void *data); 366 void *data);