aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h13
-rw-r--r--include/linux/arm-smccc.h52
-rw-r--r--include/linux/async_tx.h2
-rw-r--r--include/linux/audit.h32
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/backing-dev.h12
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h3
-rw-r--r--include/linux/bitfield.h13
-rw-r--r--include/linux/blk-mq.h9
-rw-r--r--include/linux/blk_types.h38
-rw-r--r--include/linux/blkdev.h124
-rw-r--r--include/linux/blktrace_api.h18
-rw-r--r--include/linux/bpf-cgroup.h13
-rw-r--r--include/linux/bpf.h27
-rw-r--r--include/linux/bpf_trace.h7
-rw-r--r--include/linux/brcmphy.h19
-rw-r--r--include/linux/bsg-lib.h5
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/can/core.h7
-rw-r--r--include/linux/can/dev.h8
-rw-r--r--include/linux/can/rx-offload.h59
-rw-r--r--include/linux/cdrom.h5
-rw-r--r--include/linux/clockchips.h9
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h20
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/cpuhotplug.h6
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/cpumask.h15
-rw-r--r--include/linux/cputime.h7
-rw-r--r--include/linux/cryptohash.h2
-rw-r--r--include/linux/debugfs.h11
-rw-r--r--include/linux/delay.h11
-rw-r--r--include/linux/delayacct.h1
-rw-r--r--include/linux/devfreq.h3
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/device.h11
-rw-r--r--include/linux/dma-iommu.h10
-rw-r--r--include/linux/dma-mapping.h7
-rw-r--r--include/linux/dma/dw.h2
-rw-r--r--include/linux/dmaengine.h11
-rw-r--r--include/linux/edac.h4
-rw-r--r--include/linux/efi-bgrt.h11
-rw-r--r--include/linux/efi.h56
-rw-r--r--include/linux/elevator.h63
-rw-r--r--include/linux/etherdevice.h65
-rw-r--r--include/linux/export.h17
-rw-r--r--include/linux/extcon.h71
-rw-r--r--include/linux/extcon/extcon-adc-jack.h2
-rw-r--r--include/linux/filter.h131
-rw-r--r--include/linux/fpga/fpga-mgr.h5
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/fscache-cache.h1
-rw-r--r--include/linux/fscrypt_common.h146
-rw-r--r--include/linux/fscrypt_notsupp.h168
-rw-r--r--include/linux/fscrypt_supp.h66
-rw-r--r--include/linux/fscrypto.h345
-rw-r--r--include/linux/fsi.h50
-rw-r--r--include/linux/fsl_ifc.h8
-rw-r--r--include/linux/genhd.h8
-rw-r--r--include/linux/gpio/driver.h107
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/hid-sensor-ids.h4
-rw-r--r--include/linux/hrtimer.h11
-rw-r--r--include/linux/hyperv.h160
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/ide.h58
-rw-r--r--include/linux/ieee80211.h51
-rw-r--r--include/linux/if_bridge.h2
-rw-r--r--include/linux/if_frad.h2
-rw-r--r--include/linux/if_macvlan.h17
-rw-r--r--include/linux/if_tap.h75
-rw-r--r--include/linux/iio/buffer.h160
-rw-r--r--include/linux/iio/buffer_impl.h162
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h9
-rw-r--r--include/linux/iio/kfifo_buf.h5
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h62
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/init_task.h40
-rw-r--r--include/linux/intel-iommu.h17
-rw-r--r--include/linux/intel_pmic_gpio.h15
-rw-r--r--include/linux/iommu.h138
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h36
-rw-r--r--include/linux/irqchip/arm-gic-v3.h5
-rw-r--r--include/linux/irqdomain.h36
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/jump_label.h4
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kernel_stat.h14
-rw-r--r--include/linux/kmod.h7
-rw-r--r--include/linux/kprobes.h30
-rw-r--r--include/linux/kref.h78
-rw-r--r--include/linux/leds.h16
-rw-r--r--include/linux/libata.h10
-rw-r--r--include/linux/lightnvm.h138
-rw-r--r--include/linux/list.h13
-rw-r--r--include/linux/llist.h37
-rw-r--r--include/linux/log2.h13
-rw-r--r--include/linux/lsm_hooks.h25
-rw-r--r--include/linux/marvell_phy.h7
-rw-r--r--include/linux/math64.h26
-rw-r--r--include/linux/mdio.h26
-rw-r--r--include/linux/memory_hotplug.h7
-rw-r--r--include/linux/mfd/axp20x.h31
-rw-r--r--include/linux/mfd/cros_ec_commands.h3
-rw-r--r--include/linux/mfd/lpc_ich.h3
-rw-r--r--include/linux/mfd/stm32-timers.h71
-rw-r--r--include/linux/mfd/tmio.h6
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx5/cq.h5
-rw-r--r--include/linux/mlx5/device.h111
-rw-r--r--include/linux/mlx5/doorbell.h6
-rw-r--r--include/linux/mlx5/driver.h198
-rw-r--r--include/linux/mlx5/mlx5_ifc.h262
-rw-r--r--include/linux/mlx5/qp.h92
-rw-r--r--include/linux/mlx5/vport.h1
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmc/boot.h7
-rw-r--r--include/linux/mmc/card.h246
-rw-r--r--include/linux/mmc/core.h86
-rw-r--r--include/linux/mmc/dw_mmc.h274
-rw-r--r--include/linux/mmc/host.h84
-rw-r--r--include/linux/mmc/mmc.h63
-rw-r--r--include/linux/mmc/sdio_ids.h8
-rw-r--r--include/linux/mmc/sh_mmcif.h5
-rw-r--r--include/linux/mmc/slot-gpio.h3
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/module.h17
-rw-r--r--include/linux/mroute.h59
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/msi.h11
-rw-r--r--include/linux/mtd/fsmc.h156
-rw-r--r--include/linux/mtd/mtd.h16
-rw-r--r--include/linux/mtd/nand.h9
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/mtd/spi-nor.h34
-rw-r--r--include/linux/mutex.h9
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h108
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/x_tables.h9
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/nvme.h3
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/of_device.h6
-rw-r--r--include/linux/of_iommu.h11
-rw-r--r--include/linux/parman.h76
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/percpu-refcount.h4
-rw-r--r--include/linux/percpu-rwsem.h8
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/phy.h40
-rw-r--r--include/linux/phy_led_triggers.h4
-rw-r--r--include/linux/pinctrl/consumer.h6
-rw-r--r--include/linux/pinctrl/pinconf-generic.h52
-rw-r--r--include/linux/pinctrl/pinctrl.h15
-rw-r--r--include/linux/platform_data/dma-dw.h2
-rw-r--r--include/linux/platform_data/intel-spi.h31
-rw-r--r--include/linux/platform_data/media/ir-rx51.h6
-rw-r--r--include/linux/platform_data/mmc-mxcmmc.h1
-rw-r--r--include/linux/platform_data/spi-ep93xx.h17
-rw-r--r--include/linux/platform_data/ti-aemif.h23
-rw-r--r--include/linux/pm_domain.h3
-rw-r--r--include/linux/pm_opp.h72
-rw-r--r--include/linux/pm_qos.h1
-rw-r--r--include/linux/poison.h1
-rw-r--r--include/linux/posix-timers.h14
-rw-r--r--include/linux/power/bq27xxx_battery.h12
-rw-r--r--include/linux/property.h19
-rw-r--r--include/linux/ptr_ring.h36
-rw-r--r--include/linux/pxa2xx_ssp.h14
-rw-r--r--include/linux/qed/common_hsi.h43
-rw-r--r--include/linux/qed/eth_common.h32
-rw-r--r--include/linux/qed/fcoe_common.h715
-rw-r--r--include/linux/qed/iscsi_common.h32
-rw-r--r--include/linux/qed/qed_chain.h34
-rw-r--r--include/linux/qed/qed_eth_if.h56
-rw-r--r--include/linux/qed/qed_fcoe_if.h145
-rw-r--r--include/linux/qed/qed_if.h76
-rw-r--r--include/linux/qed/qed_iov_if.h34
-rw-r--r--include/linux/qed/qed_iscsi_if.h32
-rw-r--r--include/linux/qed/qed_ll2_if.h31
-rw-r--r--include/linux/qed/qed_roce_if.h2
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/qed/rdma_common.h32
-rw-r--r--include/linux/qed/roce_common.h32
-rw-r--r--include/linux/qed/storage_common.h32
-rw-r--r--include/linux/qed/tcp_common.h32
-rw-r--r--include/linux/rcupdate.h16
-rw-r--r--include/linux/rcutiny.h6
-rw-r--r--include/linux/rcuwait.h63
-rw-r--r--include/linux/refcount.h294
-rw-r--r--include/linux/regmap.h115
-rw-r--r--include/linux/rfkill-regulator.h48
-rw-r--r--include/linux/rhashtable.h78
-rw-r--r--include/linux/sbitmap.h30
-rw-r--r--include/linux/sched.h130
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/sctp.h143
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/sed-opal.h70
-rw-r--r--include/linux/siphash.h140
-rw-r--r--include/linux/skbuff.h64
-rw-r--r--include/linux/soc/qcom/smem_state.h2
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h10
-rw-r--r--include/linux/soc/ti/knav_dma.h2
-rw-r--r--include/linux/socket.h13
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/sram.h27
-rw-r--r--include/linux/srcu.h10
-rw-r--r--include/linux/stmmac.h8
-rw-r--r--include/linux/sunrpc/cache.h2
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/tcp.h27
-rw-r--r--include/linux/timer.h45
-rw-r--r--include/linux/trace_events.h3
-rw-r--r--include/linux/usb/chipidea.h9
-rw-r--r--include/linux/uuid.h24
-rw-r--r--include/linux/virtio.h4
-rw-r--r--include/linux/virtio_net.h6
-rw-r--r--include/linux/vme.h1
-rw-r--r--include/linux/vmw_vmci_defs.h7
-rw-r--r--include/linux/vtime.h7
-rw-r--r--include/linux/ww_mutex.h32
234 files changed, 5873 insertions, 2938 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 5b36974ed60a..673acda012af 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -291,7 +291,8 @@ bool acpi_processor_validate_proc_id(int proc_id);
291 291
292#ifdef CONFIG_ACPI_HOTPLUG_CPU 292#ifdef CONFIG_ACPI_HOTPLUG_CPU
293/* Arch dependent functions for cpu hotplug support */ 293/* Arch dependent functions for cpu hotplug support */
294int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); 294int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
295 int *pcpu);
295int acpi_unmap_cpu(int cpu); 296int acpi_unmap_cpu(int cpu);
296int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid); 297int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
297#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 298#endif /* CONFIG_ACPI_HOTPLUG_CPU */
@@ -1153,4 +1154,14 @@ int parse_spcr(bool earlycon);
1153static inline int parse_spcr(bool earlycon) { return 0; } 1154static inline int parse_spcr(bool earlycon) { return 0; }
1154#endif 1155#endif
1155 1156
1157#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
1158int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res);
1159#else
1160static inline
1161int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
1162{
1163 return -EINVAL;
1164}
1165#endif
1166
1156#endif /*_LINUX_ACPI_H*/ 1167#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index b5abfda80465..4c5bca38c653 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -14,9 +14,6 @@
14#ifndef __LINUX_ARM_SMCCC_H 14#ifndef __LINUX_ARM_SMCCC_H
15#define __LINUX_ARM_SMCCC_H 15#define __LINUX_ARM_SMCCC_H
16 16
17#include <linux/linkage.h>
18#include <linux/types.h>
19
20/* 17/*
21 * This file provides common defines for ARM SMC Calling Convention as 18 * This file provides common defines for ARM SMC Calling Convention as
22 * specified in 19 * specified in
@@ -60,6 +57,13 @@
60#define ARM_SMCCC_OWNER_TRUSTED_OS 50 57#define ARM_SMCCC_OWNER_TRUSTED_OS 50
61#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 58#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
62 59
60#define ARM_SMCCC_QUIRK_NONE 0
61#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
62
63#ifndef __ASSEMBLY__
64
65#include <linux/linkage.h>
66#include <linux/types.h>
63/** 67/**
64 * struct arm_smccc_res - Result from SMC/HVC call 68 * struct arm_smccc_res - Result from SMC/HVC call
65 * @a0-a3 result values from registers 0 to 3 69 * @a0-a3 result values from registers 0 to 3
@@ -72,33 +76,59 @@ struct arm_smccc_res {
72}; 76};
73 77
74/** 78/**
75 * arm_smccc_smc() - make SMC calls 79 * struct arm_smccc_quirk - Contains quirk information
80 * @id: quirk identification
81 * @state: quirk specific information
82 * @a6: Qualcomm quirk entry for returning post-smc call contents of a6
83 */
84struct arm_smccc_quirk {
85 int id;
86 union {
87 unsigned long a6;
88 } state;
89};
90
91/**
92 * __arm_smccc_smc() - make SMC calls
76 * @a0-a7: arguments passed in registers 0 to 7 93 * @a0-a7: arguments passed in registers 0 to 7
77 * @res: result values from registers 0 to 3 94 * @res: result values from registers 0 to 3
95 * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
78 * 96 *
79 * This function is used to make SMC calls following SMC Calling Convention. 97 * This function is used to make SMC calls following SMC Calling Convention.
80 * The content of the supplied param are copied to registers 0 to 7 prior 98 * The content of the supplied param are copied to registers 0 to 7 prior
81 * to the SMC instruction. The return values are updated with the content 99 * to the SMC instruction. The return values are updated with the content
82 * from register 0 to 3 on return from the SMC instruction. 100 * from register 0 to 3 on return from the SMC instruction. An optional
101 * quirk structure provides vendor specific behavior.
83 */ 102 */
84asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1, 103asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
85 unsigned long a2, unsigned long a3, unsigned long a4, 104 unsigned long a2, unsigned long a3, unsigned long a4,
86 unsigned long a5, unsigned long a6, unsigned long a7, 105 unsigned long a5, unsigned long a6, unsigned long a7,
87 struct arm_smccc_res *res); 106 struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
88 107
89/** 108/**
90 * arm_smccc_hvc() - make HVC calls 109 * __arm_smccc_hvc() - make HVC calls
91 * @a0-a7: arguments passed in registers 0 to 7 110 * @a0-a7: arguments passed in registers 0 to 7
92 * @res: result values from registers 0 to 3 111 * @res: result values from registers 0 to 3
112 * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required.
93 * 113 *
94 * This function is used to make HVC calls following SMC Calling 114 * This function is used to make HVC calls following SMC Calling
95 * Convention. The content of the supplied param are copied to registers 0 115 * Convention. The content of the supplied param are copied to registers 0
96 * to 7 prior to the HVC instruction. The return values are updated with 116 * to 7 prior to the HVC instruction. The return values are updated with
97 * the content from register 0 to 3 on return from the HVC instruction. 117 * the content from register 0 to 3 on return from the HVC instruction. An
118 * optional quirk structure provides vendor specific behavior.
98 */ 119 */
99asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1, 120asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
100 unsigned long a2, unsigned long a3, unsigned long a4, 121 unsigned long a2, unsigned long a3, unsigned long a4,
101 unsigned long a5, unsigned long a6, unsigned long a7, 122 unsigned long a5, unsigned long a6, unsigned long a7,
102 struct arm_smccc_res *res); 123 struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
124
125#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL)
126
127#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__)
128
129#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL)
130
131#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
103 132
133#endif /*__ASSEMBLY__*/
104#endif /*__LINUX_ARM_SMCCC_H*/ 134#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 388574ea38ed..28e3cf1465ab 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -87,7 +87,7 @@ struct async_submit_ctl {
87 void *scribble; 87 void *scribble;
88}; 88};
89 89
90#ifdef CONFIG_DMA_ENGINE 90#if defined(CONFIG_DMA_ENGINE) && !defined(CONFIG_ASYNC_TX_CHANNEL_SWITCH)
91#define async_tx_issue_pending_all dma_issue_pending_all 91#define async_tx_issue_pending_all dma_issue_pending_all
92 92
93/** 93/**
diff --git a/include/linux/audit.h b/include/linux/audit.h
index f51fca8d0b6f..504e784b7ffa 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -360,6 +360,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
360 const struct cred *old); 360 const struct cred *old);
361extern void __audit_log_capset(const struct cred *new, const struct cred *old); 361extern void __audit_log_capset(const struct cred *new, const struct cred *old);
362extern void __audit_mmap_fd(int fd, int flags); 362extern void __audit_mmap_fd(int fd, int flags);
363extern void __audit_log_kern_module(char *name);
363 364
364static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) 365static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
365{ 366{
@@ -387,6 +388,20 @@ static inline int audit_socketcall(int nargs, unsigned long *args)
387 return __audit_socketcall(nargs, args); 388 return __audit_socketcall(nargs, args);
388 return 0; 389 return 0;
389} 390}
391
392static inline int audit_socketcall_compat(int nargs, u32 *args)
393{
394 unsigned long a[AUDITSC_ARGS];
395 int i;
396
397 if (audit_dummy_context())
398 return 0;
399
400 for (i = 0; i < nargs; i++)
401 a[i] = (unsigned long)args[i];
402 return __audit_socketcall(nargs, a);
403}
404
390static inline int audit_sockaddr(int len, void *addr) 405static inline int audit_sockaddr(int len, void *addr)
391{ 406{
392 if (unlikely(!audit_dummy_context())) 407 if (unlikely(!audit_dummy_context()))
@@ -436,6 +451,12 @@ static inline void audit_mmap_fd(int fd, int flags)
436 __audit_mmap_fd(fd, flags); 451 __audit_mmap_fd(fd, flags);
437} 452}
438 453
454static inline void audit_log_kern_module(char *name)
455{
456 if (!audit_dummy_context())
457 __audit_log_kern_module(name);
458}
459
439extern int audit_n_rules; 460extern int audit_n_rules;
440extern int audit_signals; 461extern int audit_signals;
441#else /* CONFIG_AUDITSYSCALL */ 462#else /* CONFIG_AUDITSYSCALL */
@@ -513,6 +534,12 @@ static inline int audit_socketcall(int nargs, unsigned long *args)
513{ 534{
514 return 0; 535 return 0;
515} 536}
537
538static inline int audit_socketcall_compat(int nargs, u32 *args)
539{
540 return 0;
541}
542
516static inline void audit_fd_pair(int fd1, int fd2) 543static inline void audit_fd_pair(int fd1, int fd2)
517{ } 544{ }
518static inline int audit_sockaddr(int len, void *addr) 545static inline int audit_sockaddr(int len, void *addr)
@@ -541,6 +568,11 @@ static inline void audit_log_capset(const struct cred *new,
541{ } 568{ }
542static inline void audit_mmap_fd(int fd, int flags) 569static inline void audit_mmap_fd(int fd, int flags)
543{ } 570{ }
571
572static inline void audit_log_kern_module(char *name)
573{
574}
575
544static inline void audit_ptrace(struct task_struct *t) 576static inline void audit_ptrace(struct task_struct *t)
545{ } 577{ }
546#define audit_n_rules 0 578#define audit_n_rules 0
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index e850e76acaaf..ad955817916d 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -10,6 +10,7 @@
10#include <linux/flex_proportions.h> 10#include <linux/flex_proportions.h>
11#include <linux/timer.h> 11#include <linux/timer.h>
12#include <linux/workqueue.h> 12#include <linux/workqueue.h>
13#include <linux/kref.h>
13 14
14struct page; 15struct page;
15struct device; 16struct device;
@@ -144,6 +145,7 @@ struct backing_dev_info {
144 145
145 char *name; 146 char *name;
146 147
148 struct kref refcnt; /* Reference counter for the structure */
147 unsigned int capabilities; /* Device capabilities */ 149 unsigned int capabilities; /* Device capabilities */
148 unsigned int min_ratio; 150 unsigned int min_ratio;
149 unsigned int max_ratio, max_prop_frac; 151 unsigned int max_ratio, max_prop_frac;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 43b93a947e61..c52a48cb9a66 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -18,7 +18,14 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20int __must_check bdi_init(struct backing_dev_info *bdi); 20int __must_check bdi_init(struct backing_dev_info *bdi);
21void bdi_exit(struct backing_dev_info *bdi); 21
22static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
23{
24 kref_get(&bdi->refcnt);
25 return bdi;
26}
27
28void bdi_put(struct backing_dev_info *bdi);
22 29
23__printf(3, 4) 30__printf(3, 4)
24int bdi_register(struct backing_dev_info *bdi, struct device *parent, 31int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -29,6 +36,7 @@ void bdi_unregister(struct backing_dev_info *bdi);
29 36
30int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 37int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
31void bdi_destroy(struct backing_dev_info *bdi); 38void bdi_destroy(struct backing_dev_info *bdi);
39struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
32 40
33void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, 41void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
34 bool range_cyclic, enum wb_reason reason); 42 bool range_cyclic, enum wb_reason reason);
@@ -183,7 +191,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
183 sb = inode->i_sb; 191 sb = inode->i_sb;
184#ifdef CONFIG_BLOCK 192#ifdef CONFIG_BLOCK
185 if (sb_is_blkdev_sb(sb)) 193 if (sb_is_blkdev_sb(sb))
186 return blk_get_backing_dev_info(I_BDEV(inode)); 194 return I_BDEV(inode)->bd_bdi;
187#endif 195#endif
188 return sb->s_bdi; 196 return sb->s_bdi;
189} 197}
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index b20e3d56253f..2f1c690a3e66 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -593,9 +593,6 @@ struct bcma_sflash {
593 u32 blocksize; 593 u32 blocksize;
594 u16 numblocks; 594 u16 numblocks;
595 u32 size; 595 u32 size;
596
597 struct mtd_info *mtd;
598 void *priv;
599}; 596};
600#endif 597#endif
601 598
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index f6505d83069d..8b9d6fff002d 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -63,6 +63,19 @@
63 }) 63 })
64 64
65/** 65/**
66 * FIELD_FIT() - check if value fits in the field
67 * @_mask: shifted mask defining the field's length and position
68 * @_val: value to test against the field
69 *
70 * Return: true if @_val can fit inside @_mask, false if @_val is too big.
71 */
72#define FIELD_FIT(_mask, _val) \
73 ({ \
74 __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
75 !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
76 })
77
78/**
66 * FIELD_PREP() - prepare a bitfield element 79 * FIELD_PREP() - prepare a bitfield element
67 * @_mask: shifted mask defining the field's length and position 80 * @_mask: shifted mask defining the field's length and position
68 * @_val: value to put in the field 81 * @_val: value to put in the field
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 4a2ab5d99ff7..8e4df3d6c8cd 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -22,6 +22,7 @@ struct blk_mq_hw_ctx {
22 22
23 unsigned long flags; /* BLK_MQ_F_* flags */ 23 unsigned long flags; /* BLK_MQ_F_* flags */
24 24
25 void *sched_data;
25 struct request_queue *queue; 26 struct request_queue *queue;
26 struct blk_flush_queue *fq; 27 struct blk_flush_queue *fq;
27 28
@@ -35,6 +36,7 @@ struct blk_mq_hw_ctx {
35 atomic_t wait_index; 36 atomic_t wait_index;
36 37
37 struct blk_mq_tags *tags; 38 struct blk_mq_tags *tags;
39 struct blk_mq_tags *sched_tags;
38 40
39 struct srcu_struct queue_rq_srcu; 41 struct srcu_struct queue_rq_srcu;
40 42
@@ -60,7 +62,7 @@ struct blk_mq_hw_ctx {
60 62
61struct blk_mq_tag_set { 63struct blk_mq_tag_set {
62 unsigned int *mq_map; 64 unsigned int *mq_map;
63 struct blk_mq_ops *ops; 65 const struct blk_mq_ops *ops;
64 unsigned int nr_hw_queues; 66 unsigned int nr_hw_queues;
65 unsigned int queue_depth; /* max hw supported */ 67 unsigned int queue_depth; /* max hw supported */
66 unsigned int reserved_tags; 68 unsigned int reserved_tags;
@@ -151,11 +153,13 @@ enum {
151 BLK_MQ_F_SG_MERGE = 1 << 2, 153 BLK_MQ_F_SG_MERGE = 1 << 2,
152 BLK_MQ_F_DEFER_ISSUE = 1 << 4, 154 BLK_MQ_F_DEFER_ISSUE = 1 << 4,
153 BLK_MQ_F_BLOCKING = 1 << 5, 155 BLK_MQ_F_BLOCKING = 1 << 5,
156 BLK_MQ_F_NO_SCHED = 1 << 6,
154 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 157 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
155 BLK_MQ_F_ALLOC_POLICY_BITS = 1, 158 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
156 159
157 BLK_MQ_S_STOPPED = 0, 160 BLK_MQ_S_STOPPED = 0,
158 BLK_MQ_S_TAG_ACTIVE = 1, 161 BLK_MQ_S_TAG_ACTIVE = 1,
162 BLK_MQ_S_SCHED_RESTART = 2,
159 163
160 BLK_MQ_MAX_DEPTH = 10240, 164 BLK_MQ_MAX_DEPTH = 10240,
161 165
@@ -179,14 +183,13 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
179 183
180void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 184void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
181 185
182void blk_mq_insert_request(struct request *, bool, bool, bool);
183void blk_mq_free_request(struct request *rq); 186void blk_mq_free_request(struct request *rq);
184void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
185bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 187bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
186 188
187enum { 189enum {
188 BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */ 190 BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
189 BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */ 191 BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
192 BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
190}; 193};
191 194
192struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 195struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 519ea2c9df61..d703acb55d0f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -162,6 +162,13 @@ enum req_opf {
162 /* write the zero filled sector many times */ 162 /* write the zero filled sector many times */
163 REQ_OP_WRITE_ZEROES = 8, 163 REQ_OP_WRITE_ZEROES = 8,
164 164
165 /* SCSI passthrough using struct scsi_request */
166 REQ_OP_SCSI_IN = 32,
167 REQ_OP_SCSI_OUT = 33,
168 /* Driver private requests */
169 REQ_OP_DRV_IN = 34,
170 REQ_OP_DRV_OUT = 35,
171
165 REQ_OP_LAST, 172 REQ_OP_LAST,
166}; 173};
167 174
@@ -221,6 +228,15 @@ static inline bool op_is_write(unsigned int op)
221} 228}
222 229
223/* 230/*
231 * Check if the bio or request is one that needs special treatment in the
232 * flush state machine.
233 */
234static inline bool op_is_flush(unsigned int op)
235{
236 return op & (REQ_FUA | REQ_PREFLUSH);
237}
238
239/*
224 * Reads are always treated as synchronous, as are requests with the FUA or 240 * Reads are always treated as synchronous, as are requests with the FUA or
225 * PREFLUSH flag. Other operations may be marked as synchronous using the 241 * PREFLUSH flag. Other operations may be marked as synchronous using the
226 * REQ_SYNC flag. 242 * REQ_SYNC flag.
@@ -232,22 +248,29 @@ static inline bool op_is_sync(unsigned int op)
232} 248}
233 249
234typedef unsigned int blk_qc_t; 250typedef unsigned int blk_qc_t;
235#define BLK_QC_T_NONE -1U 251#define BLK_QC_T_NONE -1U
236#define BLK_QC_T_SHIFT 16 252#define BLK_QC_T_SHIFT 16
253#define BLK_QC_T_INTERNAL (1U << 31)
237 254
238static inline bool blk_qc_t_valid(blk_qc_t cookie) 255static inline bool blk_qc_t_valid(blk_qc_t cookie)
239{ 256{
240 return cookie != BLK_QC_T_NONE; 257 return cookie != BLK_QC_T_NONE;
241} 258}
242 259
243static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num) 260static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
261 bool internal)
244{ 262{
245 return tag | (queue_num << BLK_QC_T_SHIFT); 263 blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
264
265 if (internal)
266 ret |= BLK_QC_T_INTERNAL;
267
268 return ret;
246} 269}
247 270
248static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 271static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
249{ 272{
250 return cookie >> BLK_QC_T_SHIFT; 273 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
251} 274}
252 275
253static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) 276static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
@@ -255,6 +278,11 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
255 return cookie & ((1u << BLK_QC_T_SHIFT) - 1); 278 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
256} 279}
257 280
281static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
282{
283 return (cookie & BLK_QC_T_INTERNAL) != 0;
284}
285
258struct blk_issue_stat { 286struct blk_issue_stat {
259 u64 time; 287 u64 time;
260}; 288};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1ca8e8fd1078..aecca0e7d9ca 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -71,15 +71,6 @@ struct request_list {
71}; 71};
72 72
73/* 73/*
74 * request command types
75 */
76enum rq_cmd_type_bits {
77 REQ_TYPE_FS = 1, /* fs request */
78 REQ_TYPE_BLOCK_PC, /* scsi command */
79 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
80};
81
82/*
83 * request flags */ 74 * request flags */
84typedef __u32 __bitwise req_flags_t; 75typedef __u32 __bitwise req_flags_t;
85 76
@@ -128,8 +119,6 @@ typedef __u32 __bitwise req_flags_t;
128#define RQF_NOMERGE_FLAGS \ 119#define RQF_NOMERGE_FLAGS \
129 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) 120 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
130 121
131#define BLK_MAX_CDB 16
132
133/* 122/*
134 * Try to put the fields that are referenced together in the same cacheline. 123 * Try to put the fields that are referenced together in the same cacheline.
135 * 124 *
@@ -147,13 +136,16 @@ struct request {
147 struct blk_mq_ctx *mq_ctx; 136 struct blk_mq_ctx *mq_ctx;
148 137
149 int cpu; 138 int cpu;
150 unsigned cmd_type;
151 unsigned int cmd_flags; /* op and common flags */ 139 unsigned int cmd_flags; /* op and common flags */
152 req_flags_t rq_flags; 140 req_flags_t rq_flags;
141
142 int internal_tag;
143
153 unsigned long atomic_flags; 144 unsigned long atomic_flags;
154 145
155 /* the following two fields are internal, NEVER access directly */ 146 /* the following two fields are internal, NEVER access directly */
156 unsigned int __data_len; /* total data len */ 147 unsigned int __data_len; /* total data len */
148 int tag;
157 sector_t __sector; /* sector cursor */ 149 sector_t __sector; /* sector cursor */
158 150
159 struct bio *bio; 151 struct bio *bio;
@@ -222,20 +214,9 @@ struct request {
222 214
223 void *special; /* opaque pointer available for LLD use */ 215 void *special; /* opaque pointer available for LLD use */
224 216
225 int tag;
226 int errors; 217 int errors;
227 218
228 /*
229 * when request is used as a packet command carrier
230 */
231 unsigned char __cmd[BLK_MAX_CDB];
232 unsigned char *cmd;
233 unsigned short cmd_len;
234
235 unsigned int extra_len; /* length of alignment and padding */ 219 unsigned int extra_len; /* length of alignment and padding */
236 unsigned int sense_len;
237 unsigned int resid_len; /* residual count */
238 void *sense;
239 220
240 unsigned long deadline; 221 unsigned long deadline;
241 struct list_head timeout_list; 222 struct list_head timeout_list;
@@ -252,6 +233,21 @@ struct request {
252 struct request *next_rq; 233 struct request *next_rq;
253}; 234};
254 235
236static inline bool blk_rq_is_scsi(struct request *rq)
237{
238 return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
239}
240
241static inline bool blk_rq_is_private(struct request *rq)
242{
243 return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
244}
245
246static inline bool blk_rq_is_passthrough(struct request *rq)
247{
248 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
249}
250
255static inline unsigned short req_get_ioprio(struct request *req) 251static inline unsigned short req_get_ioprio(struct request *req)
256{ 252{
257 return req->ioprio; 253 return req->ioprio;
@@ -271,6 +267,8 @@ typedef void (softirq_done_fn)(struct request *);
271typedef int (dma_drain_needed_fn)(struct request *); 267typedef int (dma_drain_needed_fn)(struct request *);
272typedef int (lld_busy_fn) (struct request_queue *q); 268typedef int (lld_busy_fn) (struct request_queue *q);
273typedef int (bsg_job_fn) (struct bsg_job *); 269typedef int (bsg_job_fn) (struct bsg_job *);
270typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
271typedef void (exit_rq_fn)(struct request_queue *, struct request *);
274 272
275enum blk_eh_timer_return { 273enum blk_eh_timer_return {
276 BLK_EH_NOT_HANDLED, 274 BLK_EH_NOT_HANDLED,
@@ -333,6 +331,7 @@ struct queue_limits {
333 unsigned short logical_block_size; 331 unsigned short logical_block_size;
334 unsigned short max_segments; 332 unsigned short max_segments;
335 unsigned short max_integrity_segments; 333 unsigned short max_integrity_segments;
334 unsigned short max_discard_segments;
336 335
337 unsigned char misaligned; 336 unsigned char misaligned;
338 unsigned char discard_misaligned; 337 unsigned char discard_misaligned;
@@ -406,8 +405,10 @@ struct request_queue {
406 rq_timed_out_fn *rq_timed_out_fn; 405 rq_timed_out_fn *rq_timed_out_fn;
407 dma_drain_needed_fn *dma_drain_needed; 406 dma_drain_needed_fn *dma_drain_needed;
408 lld_busy_fn *lld_busy_fn; 407 lld_busy_fn *lld_busy_fn;
408 init_rq_fn *init_rq_fn;
409 exit_rq_fn *exit_rq_fn;
409 410
410 struct blk_mq_ops *mq_ops; 411 const struct blk_mq_ops *mq_ops;
411 412
412 unsigned int *mq_map; 413 unsigned int *mq_map;
413 414
@@ -432,7 +433,8 @@ struct request_queue {
432 */ 433 */
433 struct delayed_work delay_work; 434 struct delayed_work delay_work;
434 435
435 struct backing_dev_info backing_dev_info; 436 struct backing_dev_info *backing_dev_info;
437 struct disk_devt *disk_devt;
436 438
437 /* 439 /*
438 * The queue owner gets to use this for whatever they like. 440 * The queue owner gets to use this for whatever they like.
@@ -569,7 +571,15 @@ struct request_queue {
569 struct list_head tag_set_list; 571 struct list_head tag_set_list;
570 struct bio_set *bio_split; 572 struct bio_set *bio_split;
571 573
574#ifdef CONFIG_BLK_DEBUG_FS
575 struct dentry *debugfs_dir;
576 struct dentry *mq_debugfs_dir;
577#endif
578
572 bool mq_sysfs_init_done; 579 bool mq_sysfs_init_done;
580
581 size_t cmd_size;
582 void *rq_alloc_data;
573}; 583};
574 584
575#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 585#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -600,6 +610,7 @@ struct request_queue {
600#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
601#define QUEUE_FLAG_DAX 26 /* device supports DAX */ 611#define QUEUE_FLAG_DAX 26 /* device supports DAX */
602#define QUEUE_FLAG_STATS 27 /* track rq completion times */ 612#define QUEUE_FLAG_STATS 27 /* track rq completion times */
613#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
603 614
604#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 615#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
605 (1 << QUEUE_FLAG_STACKABLE) | \ 616 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -695,9 +706,10 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
695 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 706 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
696 REQ_FAILFAST_DRIVER)) 707 REQ_FAILFAST_DRIVER))
697 708
698#define blk_account_rq(rq) \ 709static inline bool blk_account_rq(struct request *rq)
699 (((rq)->rq_flags & RQF_STARTED) && \ 710{
700 ((rq)->cmd_type == REQ_TYPE_FS)) 711 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
712}
701 713
702#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 714#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
703#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 715#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
@@ -772,7 +784,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
772 784
773static inline bool rq_mergeable(struct request *rq) 785static inline bool rq_mergeable(struct request *rq)
774{ 786{
775 if (rq->cmd_type != REQ_TYPE_FS) 787 if (blk_rq_is_passthrough(rq))
776 return false; 788 return false;
777 789
778 if (req_op(rq) == REQ_OP_FLUSH) 790 if (req_op(rq) == REQ_OP_FLUSH)
@@ -910,7 +922,6 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
910extern void blk_put_request(struct request *); 922extern void blk_put_request(struct request *);
911extern void __blk_put_request(struct request_queue *, struct request *); 923extern void __blk_put_request(struct request_queue *, struct request *);
912extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 924extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
913extern void blk_rq_set_block_pc(struct request *);
914extern void blk_requeue_request(struct request_queue *, struct request *); 925extern void blk_requeue_request(struct request_queue *, struct request *);
915extern int blk_lld_busy(struct request_queue *q); 926extern int blk_lld_busy(struct request_queue *q);
916extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 927extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@@ -1047,7 +1058,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
1047{ 1058{
1048 struct request_queue *q = rq->q; 1059 struct request_queue *q = rq->q;
1049 1060
1050 if (unlikely(rq->cmd_type != REQ_TYPE_FS)) 1061 if (blk_rq_is_passthrough(rq))
1051 return q->limits.max_hw_sectors; 1062 return q->limits.max_hw_sectors;
1052 1063
1053 if (!q->limits.chunk_sectors || 1064 if (!q->limits.chunk_sectors ||
@@ -1129,14 +1140,15 @@ extern void blk_unprep_request(struct request *);
1129extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 1140extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1130 spinlock_t *lock, int node_id); 1141 spinlock_t *lock, int node_id);
1131extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 1142extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
1132extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 1143extern int blk_init_allocated_queue(struct request_queue *);
1133 request_fn_proc *, spinlock_t *);
1134extern void blk_cleanup_queue(struct request_queue *); 1144extern void blk_cleanup_queue(struct request_queue *);
1135extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 1145extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1136extern void blk_queue_bounce_limit(struct request_queue *, u64); 1146extern void blk_queue_bounce_limit(struct request_queue *, u64);
1137extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1147extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1138extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 1148extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1139extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1149extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1150extern void blk_queue_max_discard_segments(struct request_queue *,
1151 unsigned short);
1140extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 1152extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1141extern void blk_queue_max_discard_sectors(struct request_queue *q, 1153extern void blk_queue_max_discard_sectors(struct request_queue *q,
1142 unsigned int max_discard_sectors); 1154 unsigned int max_discard_sectors);
@@ -1179,8 +1191,16 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1179extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1191extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1180extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1192extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1181extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); 1193extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1182extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1183 1194
1195/*
1196 * Number of physical segments as sent to the device.
1197 *
1198 * Normally this is the number of discontiguous data segments sent by the
1199 * submitter. But for data-less command like discard we might have no
1200 * actual data segments submitted, but the driver might have to add it's
1201 * own special payload. In that case we still return 1 here so that this
1202 * special payload will be mapped.
1203 */
1184static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) 1204static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1185{ 1205{
1186 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1206 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
@@ -1188,6 +1208,15 @@ static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1188 return rq->nr_phys_segments; 1208 return rq->nr_phys_segments;
1189} 1209}
1190 1210
1211/*
1212 * Number of discard segments (or ranges) the driver needs to fill in.
1213 * Each discard bio merged into a request is counted as one segment.
1214 */
1215static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1216{
1217 return max_t(unsigned short, rq->nr_phys_segments, 1);
1218}
1219
1191extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1220extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1192extern void blk_dump_rq_flags(struct request *, char *); 1221extern void blk_dump_rq_flags(struct request *, char *);
1193extern long nr_blockdev_pages(void); 1222extern long nr_blockdev_pages(void);
@@ -1376,6 +1405,11 @@ static inline unsigned short queue_max_segments(struct request_queue *q)
1376 return q->limits.max_segments; 1405 return q->limits.max_segments;
1377} 1406}
1378 1407
1408static inline unsigned short queue_max_discard_segments(struct request_queue *q)
1409{
1410 return q->limits.max_discard_segments;
1411}
1412
1379static inline unsigned int queue_max_segment_size(struct request_queue *q) 1413static inline unsigned int queue_max_segment_size(struct request_queue *q)
1380{ 1414{
1381 return q->limits.max_segment_size; 1415 return q->limits.max_segment_size;
@@ -1620,6 +1654,25 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
1620 return __bvec_gap_to_prev(q, bprv, offset); 1654 return __bvec_gap_to_prev(q, bprv, offset);
1621} 1655}
1622 1656
1657/*
1658 * Check if the two bvecs from two bios can be merged to one segment.
1659 * If yes, no need to check gap between the two bios since the 1st bio
1660 * and the 1st bvec in the 2nd bio can be handled in one segment.
1661 */
1662static inline bool bios_segs_mergeable(struct request_queue *q,
1663 struct bio *prev, struct bio_vec *prev_last_bv,
1664 struct bio_vec *next_first_bv)
1665{
1666 if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
1667 return false;
1668 if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
1669 return false;
1670 if (prev->bi_seg_back_size + next_first_bv->bv_len >
1671 queue_max_segment_size(q))
1672 return false;
1673 return true;
1674}
1675
1623static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1676static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1624 struct bio *next) 1677 struct bio *next)
1625{ 1678{
@@ -1629,7 +1682,8 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1629 bio_get_last_bvec(prev, &pb); 1682 bio_get_last_bvec(prev, &pb);
1630 bio_get_first_bvec(next, &nb); 1683 bio_get_first_bvec(next, &nb);
1631 1684
1632 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); 1685 if (!bios_segs_mergeable(q, prev, &pb, &nb))
1686 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1633 } 1687 }
1634 1688
1635 return false; 1689 return false;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index e417f080219a..d2e908586e3d 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -30,9 +30,6 @@ struct blk_trace {
30 30
31extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 31extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
32extern void blk_trace_shutdown(struct request_queue *); 32extern void blk_trace_shutdown(struct request_queue *);
33extern int do_blk_trace_setup(struct request_queue *q, char *name,
34 dev_t dev, struct block_device *bdev,
35 struct blk_user_trace_setup *buts);
36extern __printf(2, 3) 33extern __printf(2, 3)
37void __trace_note_message(struct blk_trace *, const char *fmt, ...); 34void __trace_note_message(struct blk_trace *, const char *fmt, ...);
38 35
@@ -80,7 +77,6 @@ extern struct attribute_group blk_trace_attr_group;
80#else /* !CONFIG_BLK_DEV_IO_TRACE */ 77#else /* !CONFIG_BLK_DEV_IO_TRACE */
81# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 78# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
82# define blk_trace_shutdown(q) do { } while (0) 79# define blk_trace_shutdown(q) do { } while (0)
83# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY)
84# define blk_add_driver_data(q, rq, data, len) do {} while (0) 80# define blk_add_driver_data(q, rq, data, len) do {} while (0)
85# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) 81# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
86# define blk_trace_startstop(q, start) (-ENOTTY) 82# define blk_trace_startstop(q, start) (-ENOTTY)
@@ -110,16 +106,16 @@ struct compat_blk_user_trace_setup {
110 106
111#endif 107#endif
112 108
113#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) 109extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
114 110
115static inline int blk_cmd_buf_len(struct request *rq) 111static inline sector_t blk_rq_trace_sector(struct request *rq)
116{ 112{
117 return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; 113 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
118} 114}
119 115
120extern void blk_dump_cmd(char *buf, struct request *rq); 116static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
121extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); 117{
122 118 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
123#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ 119}
124 120
125#endif 121#endif
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 92bc89ae7e20..c970a25d2a49 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -21,20 +21,19 @@ struct cgroup_bpf {
21 */ 21 */
22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; 22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; 23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
24 bool disallow_override[MAX_BPF_ATTACH_TYPE];
24}; 25};
25 26
26void cgroup_bpf_put(struct cgroup *cgrp); 27void cgroup_bpf_put(struct cgroup *cgrp);
27void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); 28void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
28 29
29void __cgroup_bpf_update(struct cgroup *cgrp, 30int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
30 struct cgroup *parent, 31 struct bpf_prog *prog, enum bpf_attach_type type,
31 struct bpf_prog *prog, 32 bool overridable);
32 enum bpf_attach_type type);
33 33
34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ 34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
35void cgroup_bpf_update(struct cgroup *cgrp, 35int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
36 struct bpf_prog *prog, 36 enum bpf_attach_type type, bool overridable);
37 enum bpf_attach_type type);
38 37
39int __cgroup_bpf_run_filter_skb(struct sock *sk, 38int __cgroup_bpf_run_filter_skb(struct sock *sk,
40 struct sk_buff *skb, 39 struct sk_buff *skb,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f74ae68086dc..909fc033173a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -8,10 +8,12 @@
8#define _LINUX_BPF_H 1 8#define _LINUX_BPF_H 1
9 9
10#include <uapi/linux/bpf.h> 10#include <uapi/linux/bpf.h>
11
11#include <linux/workqueue.h> 12#include <linux/workqueue.h>
12#include <linux/file.h> 13#include <linux/file.h>
13#include <linux/percpu.h> 14#include <linux/percpu.h>
14#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/rbtree_latch.h>
15 17
16struct perf_event; 18struct perf_event;
17struct bpf_map; 19struct bpf_map;
@@ -69,14 +71,14 @@ enum bpf_arg_type {
69 /* the following constraints used to prototype bpf_memcmp() and other 71 /* the following constraints used to prototype bpf_memcmp() and other
70 * functions that access data on eBPF program stack 72 * functions that access data on eBPF program stack
71 */ 73 */
72 ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ 74 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
73 ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not 75 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
74 * need to be initialized, helper function must fill 76 * helper function must fill all bytes or clear
75 * all bytes or clear them in error case. 77 * them in error case.
76 */ 78 */
77 79
78 ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ 80 ARG_CONST_SIZE, /* number of bytes accessed from memory */
79 ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */ 81 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
80 82
81 ARG_PTR_TO_CTX, /* pointer to context */ 83 ARG_PTR_TO_CTX, /* pointer to context */
82 ARG_ANYTHING, /* any (initialized) argument is ok */ 84 ARG_ANYTHING, /* any (initialized) argument is ok */
@@ -161,9 +163,10 @@ struct bpf_verifier_ops {
161 enum bpf_reg_type *reg_type); 163 enum bpf_reg_type *reg_type);
162 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, 164 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
163 const struct bpf_prog *prog); 165 const struct bpf_prog *prog);
164 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, 166 u32 (*convert_ctx_access)(enum bpf_access_type type,
165 int src_reg, int ctx_off, 167 const struct bpf_insn *src,
166 struct bpf_insn *insn, struct bpf_prog *prog); 168 struct bpf_insn *dst,
169 struct bpf_prog *prog);
167}; 170};
168 171
169struct bpf_prog_type_list { 172struct bpf_prog_type_list {
@@ -176,6 +179,8 @@ struct bpf_prog_aux {
176 atomic_t refcnt; 179 atomic_t refcnt;
177 u32 used_map_cnt; 180 u32 used_map_cnt;
178 u32 max_ctx_offset; 181 u32 max_ctx_offset;
182 struct latch_tree_node ksym_tnode;
183 struct list_head ksym_lnode;
179 const struct bpf_verifier_ops *ops; 184 const struct bpf_verifier_ops *ops;
180 struct bpf_map **used_maps; 185 struct bpf_map **used_maps;
181 struct bpf_prog *prog; 186 struct bpf_prog *prog;
@@ -216,7 +221,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
216u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 221u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
217 222
218bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 223bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
219int bpf_prog_calc_digest(struct bpf_prog *fp); 224int bpf_prog_calc_tag(struct bpf_prog *fp);
220 225
221const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 226const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
222 227
@@ -247,6 +252,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
247void bpf_map_put_with_uref(struct bpf_map *map); 252void bpf_map_put_with_uref(struct bpf_map *map);
248void bpf_map_put(struct bpf_map *map); 253void bpf_map_put(struct bpf_map *map);
249int bpf_map_precharge_memlock(u32 pages); 254int bpf_map_precharge_memlock(u32 pages);
255void *bpf_map_area_alloc(size_t size);
256void bpf_map_area_free(void *base);
250 257
251extern int sysctl_unprivileged_bpf_disabled; 258extern int sysctl_unprivileged_bpf_disabled;
252 259
diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h
new file mode 100644
index 000000000000..b22efbdd2eb4
--- /dev/null
+++ b/include/linux/bpf_trace.h
@@ -0,0 +1,7 @@
1#ifndef __LINUX_BPF_TRACE_H__
2#define __LINUX_BPF_TRACE_H__
3
4#include <trace/events/bpf.h>
5#include <trace/events/xdp.h>
6
7#endif /* __LINUX_BPF_TRACE_H__ */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 4f7d8be9ddbf..55e517130311 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -17,6 +17,7 @@
17#define PHY_ID_BCM5482 0x0143bcb0 17#define PHY_ID_BCM5482 0x0143bcb0
18#define PHY_ID_BCM5411 0x00206070 18#define PHY_ID_BCM5411 0x00206070
19#define PHY_ID_BCM5421 0x002060e0 19#define PHY_ID_BCM5421 0x002060e0
20#define PHY_ID_BCM54210E 0x600d84a0
20#define PHY_ID_BCM5464 0x002060b0 21#define PHY_ID_BCM5464 0x002060b0
21#define PHY_ID_BCM5461 0x002060c0 22#define PHY_ID_BCM5461 0x002060c0
22#define PHY_ID_BCM54612E 0x03625e60 23#define PHY_ID_BCM54612E 0x03625e60
@@ -24,6 +25,7 @@
24#define PHY_ID_BCM57780 0x03625d90 25#define PHY_ID_BCM57780 0x03625d90
25 26
26#define PHY_ID_BCM7250 0xae025280 27#define PHY_ID_BCM7250 0xae025280
28#define PHY_ID_BCM7278 0xae0251a0
27#define PHY_ID_BCM7364 0xae025260 29#define PHY_ID_BCM7364 0xae025260
28#define PHY_ID_BCM7366 0x600d8490 30#define PHY_ID_BCM7366 0x600d8490
29#define PHY_ID_BCM7346 0x600d8650 31#define PHY_ID_BCM7346 0x600d8650
@@ -31,6 +33,7 @@
31#define PHY_ID_BCM7425 0x600d86b0 33#define PHY_ID_BCM7425 0x600d86b0
32#define PHY_ID_BCM7429 0x600d8730 34#define PHY_ID_BCM7429 0x600d8730
33#define PHY_ID_BCM7435 0x600d8750 35#define PHY_ID_BCM7435 0x600d8750
36#define PHY_ID_BCM74371 0xae0252e0
34#define PHY_ID_BCM7439 0x600d8480 37#define PHY_ID_BCM7439 0x600d8480
35#define PHY_ID_BCM7439_2 0xae025080 38#define PHY_ID_BCM7439_2 0xae025080
36#define PHY_ID_BCM7445 0x600d8510 39#define PHY_ID_BCM7445 0x600d8510
@@ -103,19 +106,17 @@
103/* 106/*
104 * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) 107 * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
105 */ 108 */
106#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 109#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
107#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 110#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
108#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 111#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
109 112
110#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000 113#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
111#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100 114#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
112#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200 115#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
113#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000 116#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
114#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007 117#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
115#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
116#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8)
117#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4)
118 118
119#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
119#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007 120#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007
120 121
121/* 122/*
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 657a718c27d2..e34dde2da0ef 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -66,9 +66,8 @@ struct bsg_job {
66 66
67void bsg_job_done(struct bsg_job *job, int result, 67void bsg_job_done(struct bsg_job *job, int result,
68 unsigned int reply_payload_rcv_len); 68 unsigned int reply_payload_rcv_len);
69int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, 69struct request_queue *bsg_setup_queue(struct device *dev, char *name,
70 bsg_job_fn *job_fn, int dd_job_size); 70 bsg_job_fn *job_fn, int dd_job_size);
71void bsg_request_fn(struct request_queue *q);
72void bsg_job_put(struct bsg_job *job); 71void bsg_job_put(struct bsg_job *job);
73int __must_check bsg_job_get(struct bsg_job *job); 72int __must_check bsg_job_get(struct bsg_job *job);
74 73
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index d67ab83823ad..79591c3660cc 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -243,12 +243,10 @@ static inline int block_page_mkwrite_return(int err)
243{ 243{
244 if (err == 0) 244 if (err == 0)
245 return VM_FAULT_LOCKED; 245 return VM_FAULT_LOCKED;
246 if (err == -EFAULT) 246 if (err == -EFAULT || err == -EAGAIN)
247 return VM_FAULT_NOPAGE; 247 return VM_FAULT_NOPAGE;
248 if (err == -ENOMEM) 248 if (err == -ENOMEM)
249 return VM_FAULT_OOM; 249 return VM_FAULT_OOM;
250 if (err == -EAGAIN)
251 return VM_FAULT_RETRY;
252 /* -ENOSPC, -EDQUOT, -EIO ... */ 250 /* -ENOSPC, -EDQUOT, -EIO ... */
253 return VM_FAULT_SIGBUS; 251 return VM_FAULT_SIGBUS;
254} 252}
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index a0875001b13c..df08a41d5be5 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -45,10 +45,9 @@ struct can_proto {
45extern int can_proto_register(const struct can_proto *cp); 45extern int can_proto_register(const struct can_proto *cp);
46extern void can_proto_unregister(const struct can_proto *cp); 46extern void can_proto_unregister(const struct can_proto *cp);
47 47
48extern int can_rx_register(struct net_device *dev, canid_t can_id, 48int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
49 canid_t mask, 49 void (*func)(struct sk_buff *, void *),
50 void (*func)(struct sk_buff *, void *), 50 void *data, char *ident, struct sock *sk);
51 void *data, char *ident);
52 51
53extern void can_rx_unregister(struct net_device *dev, canid_t can_id, 52extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
54 canid_t mask, 53 canid_t mask,
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5f5270941ba0..141b05aade81 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -38,6 +38,13 @@ struct can_priv {
38 struct can_bittiming bittiming, data_bittiming; 38 struct can_bittiming bittiming, data_bittiming;
39 const struct can_bittiming_const *bittiming_const, 39 const struct can_bittiming_const *bittiming_const,
40 *data_bittiming_const; 40 *data_bittiming_const;
41 const u16 *termination_const;
42 unsigned int termination_const_cnt;
43 u16 termination;
44 const u32 *bitrate_const;
45 unsigned int bitrate_const_cnt;
46 const u32 *data_bitrate_const;
47 unsigned int data_bitrate_const_cnt;
41 struct can_clock clock; 48 struct can_clock clock;
42 49
43 enum can_state state; 50 enum can_state state;
@@ -53,6 +60,7 @@ struct can_priv {
53 int (*do_set_bittiming)(struct net_device *dev); 60 int (*do_set_bittiming)(struct net_device *dev);
54 int (*do_set_data_bittiming)(struct net_device *dev); 61 int (*do_set_data_bittiming)(struct net_device *dev);
55 int (*do_set_mode)(struct net_device *dev, enum can_mode mode); 62 int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
63 int (*do_set_termination)(struct net_device *dev, u16 term);
56 int (*do_get_state)(const struct net_device *dev, 64 int (*do_get_state)(const struct net_device *dev,
57 enum can_state *state); 65 enum can_state *state);
58 int (*do_get_berr_counter)(const struct net_device *dev, 66 int (*do_get_berr_counter)(const struct net_device *dev,
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
new file mode 100644
index 000000000000..cb31683bbe15
--- /dev/null
+++ b/include/linux/can/rx-offload.h
@@ -0,0 +1,59 @@
1/*
2 * linux/can/rx-offload.h
3 *
4 * Copyright (c) 2014 David Jander, Protonic Holland
5 * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the version 2 of the GNU General Public License
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef _CAN_RX_OFFLOAD_H
18#define _CAN_RX_OFFLOAD_H
19
20#include <linux/netdevice.h>
21#include <linux/can.h>
22
23struct can_rx_offload {
24 struct net_device *dev;
25
26 unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf,
27 u32 *timestamp, unsigned int mb);
28
29 struct sk_buff_head skb_queue;
30 u32 skb_queue_len_max;
31
32 unsigned int mb_first;
33 unsigned int mb_last;
34
35 struct napi_struct napi;
36
37 bool inc;
38};
39
40int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload);
41int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
42int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
43int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
44int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
45void can_rx_offload_reset(struct can_rx_offload *offload);
46void can_rx_offload_del(struct can_rx_offload *offload);
47void can_rx_offload_enable(struct can_rx_offload *offload);
48
49static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
50{
51 napi_schedule(&offload->napi);
52}
53
54static inline void can_rx_offload_disable(struct can_rx_offload *offload)
55{
56 napi_disable(&offload->napi);
57}
58
59#endif /* !_CAN_RX_OFFLOAD_H */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 8609d577bb66..6e8f209a6dff 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -36,7 +36,7 @@ struct packet_command
36 36
37/* Uniform cdrom data structures for cdrom.c */ 37/* Uniform cdrom data structures for cdrom.c */
38struct cdrom_device_info { 38struct cdrom_device_info {
39 struct cdrom_device_ops *ops; /* link to device_ops */ 39 const struct cdrom_device_ops *ops; /* link to device_ops */
40 struct list_head list; /* linked list of all device_info */ 40 struct list_head list; /* linked list of all device_info */
41 struct gendisk *disk; /* matching block layer disk */ 41 struct gendisk *disk; /* matching block layer disk */
42 void *handle; /* driver-dependent data */ 42 void *handle; /* driver-dependent data */
@@ -87,7 +87,6 @@ struct cdrom_device_ops {
87 87
88/* driver specifications */ 88/* driver specifications */
89 const int capability; /* capability flags */ 89 const int capability; /* capability flags */
90 int n_minors; /* number of active minor devices */
91 /* handle uniform packets for scsi type devices (scsi,atapi) */ 90 /* handle uniform packets for scsi type devices (scsi,atapi) */
92 int (*generic_packet) (struct cdrom_device_info *, 91 int (*generic_packet) (struct cdrom_device_info *,
93 struct packet_command *); 92 struct packet_command *);
@@ -123,6 +122,8 @@ extern int cdrom_mode_sense(struct cdrom_device_info *cdi,
123 int page_code, int page_control); 122 int page_code, int page_control);
124extern void init_cdrom_command(struct packet_command *cgc, 123extern void init_cdrom_command(struct packet_command *cgc,
125 void *buffer, int len, int type); 124 void *buffer, int len, int type);
125extern int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
126 struct packet_command *cgc);
126 127
127/* The SCSI spec says there could be 256 slots. */ 128/* The SCSI spec says there could be 256 slots. */
128#define CDROM_MAX_SLOTS 256 129#define CDROM_MAX_SLOTS 256
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 0d442e34c349..5d3053c34fb3 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -224,4 +224,13 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
224 224
225#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 225#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
226 226
227#define CLOCKEVENT_OF_DECLARE(name, compat, fn) \
228 OF_DECLARE_1_RET(clkevt, name, compat, fn)
229
230#ifdef CONFIG_CLKEVT_PROBE
231extern int clockevent_probe(void);
232#els
233static inline int clockevent_probe(void) { return 0; }
234#endif
235
227#endif /* _LINUX_CLOCKCHIPS_H */ 236#endif /* _LINUX_CLOCKCHIPS_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index e315d04a2fd9..cfc75848a35d 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -62,6 +62,8 @@ struct module;
62 * @archdata: arch-specific data 62 * @archdata: arch-specific data
63 * @suspend: suspend function for the clocksource, if necessary 63 * @suspend: suspend function for the clocksource, if necessary
64 * @resume: resume function for the clocksource, if necessary 64 * @resume: resume function for the clocksource, if necessary
65 * @mark_unstable: Optional function to inform the clocksource driver that
66 * the watchdog marked the clocksource unstable
65 * @owner: module reference, must be set by clocksource in modules 67 * @owner: module reference, must be set by clocksource in modules
66 * 68 *
67 * Note: This struct is not used in hotpathes of the timekeeping code 69 * Note: This struct is not used in hotpathes of the timekeeping code
@@ -93,6 +95,7 @@ struct clocksource {
93 unsigned long flags; 95 unsigned long flags;
94 void (*suspend)(struct clocksource *cs); 96 void (*suspend)(struct clocksource *cs);
95 void (*resume)(struct clocksource *cs); 97 void (*resume)(struct clocksource *cs);
98 void (*mark_unstable)(struct clocksource *cs);
96 99
97 /* private: */ 100 /* private: */
98#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 101#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 63609398ef9f..9e40be522793 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -731,7 +731,25 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
731static inline bool in_compat_syscall(void) { return is_compat_task(); } 731static inline bool in_compat_syscall(void) { return is_compat_task(); }
732#endif 732#endif
733 733
734#else 734/**
735 * ns_to_compat_timeval - Compat version of ns_to_timeval
736 * @nsec: the nanoseconds value to be converted
737 *
738 * Returns the compat_timeval representation of the nsec parameter.
739 */
740static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
741{
742 struct timeval tv;
743 struct compat_timeval ctv;
744
745 tv = ns_to_timeval(nsec);
746 ctv.tv_sec = tv.tv_sec;
747 ctv.tv_usec = tv.tv_usec;
748
749 return ctv;
750}
751
752#else /* !CONFIG_COMPAT */
735 753
736#define is_compat_task() (0) 754#define is_compat_task() (0)
737static inline bool in_compat_syscall(void) { return false; } 755static inline bool in_compat_syscall(void) { return false; }
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index cf0fa5d86059..91c30cba984e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -27,7 +27,11 @@ extern void __chk_user_ptr(const volatile void __user *);
27extern void __chk_io_ptr(const volatile void __iomem *); 27extern void __chk_io_ptr(const volatile void __iomem *);
28# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) 28# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
29#else /* __CHECKER__ */ 29#else /* __CHECKER__ */
30# define __user 30# ifdef STRUCTLEAK_PLUGIN
31# define __user __attribute__((user))
32# else
33# define __user
34# endif
31# define __kernel 35# define __kernel
32# define __safe 36# define __safe
33# define __force 37# define __force
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7e05c5e4e45c..87165f06a307 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -31,7 +31,7 @@
31 31
32#define CPUFREQ_ETERNAL (-1) 32#define CPUFREQ_ETERNAL (-1)
33#define CPUFREQ_NAME_LEN 16 33#define CPUFREQ_NAME_LEN 16
34/* Print length for names. Extra 1 space for accomodating '\n' in prints */ 34/* Print length for names. Extra 1 space for accommodating '\n' in prints */
35#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) 35#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
36 36
37struct cpufreq_governor; 37struct cpufreq_governor;
@@ -115,7 +115,7 @@ struct cpufreq_policy {
115 * guarantee that frequency can be changed on any CPU sharing the 115 * guarantee that frequency can be changed on any CPU sharing the
116 * policy and that the change will affect all of the policy CPUs then. 116 * policy and that the change will affect all of the policy CPUs then.
117 * - fast_switch_enabled is to be set by governors that support fast 117 * - fast_switch_enabled is to be set by governors that support fast
118 * freqnency switching with the help of cpufreq_enable_fast_switch(). 118 * frequency switching with the help of cpufreq_enable_fast_switch().
119 */ 119 */
120 bool fast_switch_possible; 120 bool fast_switch_possible;
121 bool fast_switch_enabled; 121 bool fast_switch_enabled;
@@ -415,9 +415,6 @@ static inline void cpufreq_resume(void) {}
415/* Policy Notifiers */ 415/* Policy Notifiers */
416#define CPUFREQ_ADJUST (0) 416#define CPUFREQ_ADJUST (0)
417#define CPUFREQ_NOTIFY (1) 417#define CPUFREQ_NOTIFY (1)
418#define CPUFREQ_START (2)
419#define CPUFREQ_CREATE_POLICY (3)
420#define CPUFREQ_REMOVE_POLICY (4)
421 418
422#ifdef CONFIG_CPU_FREQ 419#ifdef CONFIG_CPU_FREQ
423int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 420int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 20bfefbe7594..bb790c4db0c5 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -8,9 +8,7 @@ enum cpuhp_state {
8 CPUHP_CREATE_THREADS, 8 CPUHP_CREATE_THREADS,
9 CPUHP_PERF_PREPARE, 9 CPUHP_PERF_PREPARE,
10 CPUHP_PERF_X86_PREPARE, 10 CPUHP_PERF_X86_PREPARE,
11 CPUHP_PERF_X86_UNCORE_PREP,
12 CPUHP_PERF_X86_AMD_UNCORE_PREP, 11 CPUHP_PERF_X86_AMD_UNCORE_PREP,
13 CPUHP_PERF_X86_RAPL_PREP,
14 CPUHP_PERF_BFIN, 12 CPUHP_PERF_BFIN,
15 CPUHP_PERF_POWER, 13 CPUHP_PERF_POWER,
16 CPUHP_PERF_SUPERH, 14 CPUHP_PERF_SUPERH,
@@ -74,6 +72,8 @@ enum cpuhp_state {
74 CPUHP_ZCOMP_PREPARE, 72 CPUHP_ZCOMP_PREPARE,
75 CPUHP_TIMERS_DEAD, 73 CPUHP_TIMERS_DEAD,
76 CPUHP_MIPS_SOC_PREPARE, 74 CPUHP_MIPS_SOC_PREPARE,
75 CPUHP_BP_PREPARE_DYN,
76 CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
77 CPUHP_BRINGUP_CPU, 77 CPUHP_BRINGUP_CPU,
78 CPUHP_AP_IDLE_DEAD, 78 CPUHP_AP_IDLE_DEAD,
79 CPUHP_AP_OFFLINE, 79 CPUHP_AP_OFFLINE,
@@ -84,7 +84,6 @@ enum cpuhp_state {
84 CPUHP_AP_IRQ_ARMADA_XP_STARTING, 84 CPUHP_AP_IRQ_ARMADA_XP_STARTING,
85 CPUHP_AP_IRQ_BCM2836_STARTING, 85 CPUHP_AP_IRQ_BCM2836_STARTING,
86 CPUHP_AP_ARM_MVEBU_COHERENCY, 86 CPUHP_AP_ARM_MVEBU_COHERENCY,
87 CPUHP_AP_PERF_X86_UNCORE_STARTING,
88 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 87 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
89 CPUHP_AP_PERF_X86_STARTING, 88 CPUHP_AP_PERF_X86_STARTING,
90 CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 89 CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
@@ -138,6 +137,7 @@ enum cpuhp_state {
138 CPUHP_AP_PERF_ARM_CCI_ONLINE, 137 CPUHP_AP_PERF_ARM_CCI_ONLINE,
139 CPUHP_AP_PERF_ARM_CCN_ONLINE, 138 CPUHP_AP_PERF_ARM_CCN_ONLINE,
140 CPUHP_AP_PERF_ARM_L2X0_ONLINE, 139 CPUHP_AP_PERF_ARM_L2X0_ONLINE,
140 CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
141 CPUHP_AP_WORKQUEUE_ONLINE, 141 CPUHP_AP_WORKQUEUE_ONLINE,
142 CPUHP_AP_RCUTREE_ONLINE, 142 CPUHP_AP_RCUTREE_ONLINE,
143 CPUHP_AP_ONLINE_DYN, 143 CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index da346f2817a8..fc1e5d7fc1c7 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -62,6 +62,7 @@ struct cpuidle_state {
62}; 62};
63 63
64/* Idle State Flags */ 64/* Idle State Flags */
65#define CPUIDLE_FLAG_NONE (0x00)
65#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ 66#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
66#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ 67#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
67 68
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c717f5ea88cb..96f1e88b767c 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
560static inline int cpumask_parse_user(const char __user *buf, int len, 560static inline int cpumask_parse_user(const char __user *buf, int len,
561 struct cpumask *dstp) 561 struct cpumask *dstp)
562{ 562{
563 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); 563 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
564} 564}
565 565
566/** 566/**
@@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
575 struct cpumask *dstp) 575 struct cpumask *dstp)
576{ 576{
577 return bitmap_parselist_user(buf, len, cpumask_bits(dstp), 577 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
578 nr_cpu_ids); 578 nr_cpumask_bits);
579} 579}
580 580
581/** 581/**
@@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
590 char *nl = strchr(buf, '\n'); 590 char *nl = strchr(buf, '\n');
591 unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); 591 unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
592 592
593 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); 593 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
594} 594}
595 595
596/** 596/**
@@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
602 */ 602 */
603static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 603static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
604{ 604{
605 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids); 605 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
606} 606}
607 607
608/** 608/**
@@ -649,11 +649,15 @@ static inline size_t cpumask_size(void)
649 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use 649 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
650 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the 650 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
651 * other type of cpumask_var_t implementation is configured. 651 * other type of cpumask_var_t implementation is configured.
652 *
653 * Please also note that __cpumask_var_read_mostly can be used to declare
654 * a cpumask_var_t variable itself (not its content) as read mostly.
652 */ 655 */
653#ifdef CONFIG_CPUMASK_OFFSTACK 656#ifdef CONFIG_CPUMASK_OFFSTACK
654typedef struct cpumask *cpumask_var_t; 657typedef struct cpumask *cpumask_var_t;
655 658
656#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) 659#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
660#define __cpumask_var_read_mostly __read_mostly
657 661
658bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 662bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
659bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 663bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
@@ -667,6 +671,7 @@ void free_bootmem_cpumask_var(cpumask_var_t mask);
667typedef struct cpumask cpumask_var_t[1]; 671typedef struct cpumask cpumask_var_t[1];
668 672
669#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) 673#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
674#define __cpumask_var_read_mostly
670 675
671static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 676static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
672{ 677{
diff --git a/include/linux/cputime.h b/include/linux/cputime.h
index f2eb2ee535ca..a691dc4ddc13 100644
--- a/include/linux/cputime.h
+++ b/include/linux/cputime.h
@@ -1,6 +1,7 @@
1#ifndef __LINUX_CPUTIME_H 1#ifndef __LINUX_CPUTIME_H
2#define __LINUX_CPUTIME_H 2#define __LINUX_CPUTIME_H
3 3
4#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
4#include <asm/cputime.h> 5#include <asm/cputime.h>
5 6
6#ifndef cputime_to_nsecs 7#ifndef cputime_to_nsecs
@@ -8,9 +9,5 @@
8 (cputime_to_usecs(__ct) * NSEC_PER_USEC) 9 (cputime_to_usecs(__ct) * NSEC_PER_USEC)
9#endif 10#endif
10 11
11#ifndef nsecs_to_cputime 12#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
12# define nsecs_to_cputime(__nsecs) \
13 usecs_to_cputime((__nsecs) / NSEC_PER_USEC)
14#endif
15
16#endif /* __LINUX_CPUTIME_H */ 13#endif /* __LINUX_CPUTIME_H */
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index f4754282c9c2..3252799832cf 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -15,6 +15,4 @@ void sha_transform(__u32 *digest, const char *data, __u32 *W);
15 15
16void md5_transform(__u32 *hash, __u32 const *in); 16void md5_transform(__u32 *hash, __u32 const *in);
17 17
18__u32 half_md4_transform(__u32 buf[4], __u32 const in[8]);
19
20#endif 18#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 014cc564d1c4..9d571acd3a48 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -52,8 +52,7 @@ extern struct srcu_struct debugfs_srcu;
52 * Must only be called under the protection established by 52 * Must only be called under the protection established by
53 * debugfs_use_file_start(). 53 * debugfs_use_file_start().
54 */ 54 */
55static inline const struct file_operations * 55static inline const struct file_operations *debugfs_real_fops(const struct file *filp)
56debugfs_real_fops(const struct file *filp)
57 __must_hold(&debugfs_srcu) 56 __must_hold(&debugfs_srcu)
58{ 57{
59 /* 58 /*
@@ -80,6 +79,8 @@ static const struct file_operations __fops = { \
80 79
81#if defined(CONFIG_DEBUG_FS) 80#if defined(CONFIG_DEBUG_FS)
82 81
82struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
83
83struct dentry *debugfs_create_file(const char *name, umode_t mode, 84struct dentry *debugfs_create_file(const char *name, umode_t mode,
84 struct dentry *parent, void *data, 85 struct dentry *parent, void *data,
85 const struct file_operations *fops); 86 const struct file_operations *fops);
@@ -181,6 +182,12 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
181 * want to duplicate the design decision mistakes of procfs and devfs again. 182 * want to duplicate the design decision mistakes of procfs and devfs again.
182 */ 183 */
183 184
185static inline struct dentry *debugfs_lookup(const char *name,
186 struct dentry *parent)
187{
188 return ERR_PTR(-ENODEV);
189}
190
184static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, 191static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
185 struct dentry *parent, void *data, 192 struct dentry *parent, void *data,
186 const struct file_operations *fops) 193 const struct file_operations *fops)
diff --git a/include/linux/delay.h b/include/linux/delay.h
index a6ecb34cf547..2ecb3c46b20a 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -5,6 +5,17 @@
5 * Copyright (C) 1993 Linus Torvalds 5 * Copyright (C) 1993 Linus Torvalds
6 * 6 *
7 * Delay routines, using a pre-computed "loops_per_jiffy" value. 7 * Delay routines, using a pre-computed "loops_per_jiffy" value.
8 *
9 * Please note that ndelay(), udelay() and mdelay() may return early for
10 * several reasons:
11 * 1. computed loops_per_jiffy too low (due to the time taken to
12 * execute the timer interrupt.)
13 * 2. cache behaviour affecting the time it takes to execute the
14 * loop function.
15 * 3. CPU clock rate changes.
16 *
17 * Please see this thread:
18 * http://lists.openwall.net/linux-kernel/2011/01/09/56
8 */ 19 */
9 20
10#include <linux/kernel.h> 21#include <linux/kernel.h>
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 6cee17c22313..00e60f79a9cc 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -17,6 +17,7 @@
17#ifndef _LINUX_DELAYACCT_H 17#ifndef _LINUX_DELAYACCT_H
18#define _LINUX_DELAYACCT_H 18#define _LINUX_DELAYACCT_H
19 19
20#include <uapi/linux/taskstats.h>
20#include <linux/sched.h> 21#include <linux/sched.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22 23
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 2de4e2eea180..e0acb0e5243b 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -104,6 +104,8 @@ struct devfreq_dev_profile {
104 * struct devfreq_governor - Devfreq policy governor 104 * struct devfreq_governor - Devfreq policy governor
105 * @node: list node - contains registered devfreq governors 105 * @node: list node - contains registered devfreq governors
106 * @name: Governor's name 106 * @name: Governor's name
107 * @immutable: Immutable flag for governor. If the value is 1,
108 * this govenror is never changeable to other governor.
107 * @get_target_freq: Returns desired operating frequency for the device. 109 * @get_target_freq: Returns desired operating frequency for the device.
108 * Basically, get_target_freq will run 110 * Basically, get_target_freq will run
109 * devfreq_dev_profile.get_dev_status() to get the 111 * devfreq_dev_profile.get_dev_status() to get the
@@ -121,6 +123,7 @@ struct devfreq_governor {
121 struct list_head node; 123 struct list_head node;
122 124
123 const char name[DEVFREQ_NAME_LEN]; 125 const char name[DEVFREQ_NAME_LEN];
126 const unsigned int immutable;
124 int (*get_target_freq)(struct devfreq *this, unsigned long *freq); 127 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
125 int (*event_handler)(struct devfreq *devfreq, 128 int (*event_handler)(struct devfreq *devfreq,
126 unsigned int event, void *data); 129 unsigned int event, void *data);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ef7962e84444..a7e6903866fd 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -55,8 +55,6 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
55 * = 2: The target wants to push back the io 55 * = 2: The target wants to push back the io
56 */ 56 */
57typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 57typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
58typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
59 union map_info *map_context);
60typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, 58typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
61 struct request *rq, 59 struct request *rq,
62 union map_info *map_context, 60 union map_info *map_context,
@@ -163,7 +161,6 @@ struct target_type {
163 dm_ctr_fn ctr; 161 dm_ctr_fn ctr;
164 dm_dtr_fn dtr; 162 dm_dtr_fn dtr;
165 dm_map_fn map; 163 dm_map_fn map;
166 dm_map_request_fn map_rq;
167 dm_clone_and_map_request_fn clone_and_map_rq; 164 dm_clone_and_map_request_fn clone_and_map_rq;
168 dm_release_clone_request_fn release_clone_rq; 165 dm_release_clone_request_fn release_clone_rq;
169 dm_endio_fn end_io; 166 dm_endio_fn end_io;
diff --git a/include/linux/device.h b/include/linux/device.h
index 491b4c0ca633..bd684fc8ec1d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -88,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
88 * 88 *
89 * @suspend: Called when a device on this bus wants to go to sleep mode. 89 * @suspend: Called when a device on this bus wants to go to sleep mode.
90 * @resume: Called to bring a device on this bus out of sleep mode. 90 * @resume: Called to bring a device on this bus out of sleep mode.
91 * @num_vf: Called to find out how many virtual functions a device on this
92 * bus supports.
91 * @pm: Power management operations of this bus, callback the specific 93 * @pm: Power management operations of this bus, callback the specific
92 * device driver's pm-ops. 94 * device driver's pm-ops.
93 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU 95 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -127,6 +129,8 @@ struct bus_type {
127 int (*suspend)(struct device *dev, pm_message_t state); 129 int (*suspend)(struct device *dev, pm_message_t state);
128 int (*resume)(struct device *dev); 130 int (*resume)(struct device *dev);
129 131
132 int (*num_vf)(struct device *dev);
133
130 const struct dev_pm_ops *pm; 134 const struct dev_pm_ops *pm;
131 135
132 const struct iommu_ops *iommu_ops; 136 const struct iommu_ops *iommu_ops;
@@ -1140,6 +1144,13 @@ extern int device_online(struct device *dev);
1140extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1144extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
1141extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); 1145extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
1142 1146
1147static inline int dev_num_vf(struct device *dev)
1148{
1149 if (dev->bus && dev->bus->num_vf)
1150 return dev->bus->num_vf(dev);
1151 return 0;
1152}
1153
1143/* 1154/*
1144 * Root device objects for grouping under /sys/devices 1155 * Root device objects for grouping under /sys/devices
1145 */ 1156 */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 7f7e9a7e3839..5725c94b1f12 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -27,6 +27,7 @@ int iommu_dma_init(void);
27 27
28/* Domain management interface for IOMMU drivers */ 28/* Domain management interface for IOMMU drivers */
29int iommu_get_dma_cookie(struct iommu_domain *domain); 29int iommu_get_dma_cookie(struct iommu_domain *domain);
30int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
30void iommu_put_dma_cookie(struct iommu_domain *domain); 31void iommu_put_dma_cookie(struct iommu_domain *domain);
31 32
32/* Setup call for arch DMA mapping code */ 33/* Setup call for arch DMA mapping code */
@@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
34 u64 size, struct device *dev); 35 u64 size, struct device *dev);
35 36
36/* General helpers for DMA-API <-> IOMMU-API interaction */ 37/* General helpers for DMA-API <-> IOMMU-API interaction */
37int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); 38int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
39 unsigned long attrs);
38 40
39/* 41/*
40 * These implement the bulk of the relevant DMA mapping callbacks, but require 42 * These implement the bulk of the relevant DMA mapping callbacks, but require
@@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
65 size_t size, enum dma_data_direction dir, unsigned long attrs); 67 size_t size, enum dma_data_direction dir, unsigned long attrs);
66void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 68void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
67 size_t size, enum dma_data_direction dir, unsigned long attrs); 69 size_t size, enum dma_data_direction dir, unsigned long attrs);
68int iommu_dma_supported(struct device *dev, u64 mask);
69int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 70int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
70 71
71/* The DMA API isn't _quite_ the whole story, though... */ 72/* The DMA API isn't _quite_ the whole story, though... */
@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
86 return -ENODEV; 87 return -ENODEV;
87} 88}
88 89
90static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
91{
92 return -ENODEV;
93}
94
89static inline void iommu_put_dma_cookie(struct iommu_domain *domain) 95static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
90{ 96{
91} 97}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 10c5a17b1f51..c24721a33b4c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -63,6 +63,13 @@
63#define DMA_ATTR_NO_WARN (1UL << 8) 63#define DMA_ATTR_NO_WARN (1UL << 8)
64 64
65/* 65/*
66 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
67 * accessible at an elevated privilege level (and ideally inaccessible or
68 * at least read-only at lesser-privileged levels).
69 */
70#define DMA_ATTR_PRIVILEGED (1UL << 9)
71
72/*
66 * A dma_addr_t can hold any valid DMA or bus address for the platform. 73 * A dma_addr_t can hold any valid DMA or bus address for the platform.
67 * It can be given to a device to use as a DMA source or target. A CPU cannot 74 * It can be given to a device to use as a DMA source or target. A CPU cannot
68 * reference a dma_addr_t directly because there may be translation between 75 * reference a dma_addr_t directly because there may be translation between
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index ccfd0c3777df..b63b25814d77 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -23,6 +23,7 @@ struct dw_dma;
23/** 23/**
24 * struct dw_dma_chip - representation of DesignWare DMA controller hardware 24 * struct dw_dma_chip - representation of DesignWare DMA controller hardware
25 * @dev: struct device of the DMA controller 25 * @dev: struct device of the DMA controller
26 * @id: instance ID
26 * @irq: irq line 27 * @irq: irq line
27 * @regs: memory mapped I/O space 28 * @regs: memory mapped I/O space
28 * @clk: hclk clock 29 * @clk: hclk clock
@@ -31,6 +32,7 @@ struct dw_dma;
31 */ 32 */
32struct dw_dma_chip { 33struct dw_dma_chip {
33 struct device *dev; 34 struct device *dev;
35 int id;
34 int irq; 36 int irq;
35 void __iomem *regs; 37 void __iomem *regs;
36 struct clk *clk; 38 struct clk *clk;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index feee6ec6a13b..533680860865 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -894,6 +894,17 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
894 len, flags); 894 len, flags);
895} 895}
896 896
897static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
898 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
899 size_t len, unsigned long flags)
900{
901 if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy)
902 return NULL;
903
904 return chan->device->device_prep_dma_memcpy(chan, dest, src,
905 len, flags);
906}
907
897static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( 908static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
898 struct dma_chan *chan, 909 struct dma_chan *chan,
899 struct scatterlist *dst_sg, unsigned int dst_nents, 910 struct scatterlist *dst_sg, unsigned int dst_nents,
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 07c52c0af62d..5b6adf964248 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -190,8 +190,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
190 * part of the memory details to the memory controller. 190 * part of the memory details to the memory controller.
191 * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers. 191 * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers.
192 * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F. 192 * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F.
193 * Those memories are labed as "PC2-" instead of "PC" to 193 * Those memories are labeled as "PC2-" instead of "PC" to
194 * differenciate from DDR. 194 * differentiate from DDR.
195 * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205 195 * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205
196 * and JESD206. 196 * and JESD206.
197 * Those memories are accessed per DIMM slot, and not by 197 * Those memories are accessed per DIMM slot, and not by
diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h
index 051b21fedf68..2fd3993c370b 100644
--- a/include/linux/efi-bgrt.h
+++ b/include/linux/efi-bgrt.h
@@ -1,20 +1,19 @@
1#ifndef _LINUX_EFI_BGRT_H 1#ifndef _LINUX_EFI_BGRT_H
2#define _LINUX_EFI_BGRT_H 2#define _LINUX_EFI_BGRT_H
3 3
4#ifdef CONFIG_ACPI_BGRT
5
6#include <linux/acpi.h> 4#include <linux/acpi.h>
7 5
8void efi_bgrt_init(void); 6#ifdef CONFIG_ACPI_BGRT
7
8void efi_bgrt_init(struct acpi_table_header *table);
9 9
10/* The BGRT data itself; only valid if bgrt_image != NULL. */ 10/* The BGRT data itself; only valid if bgrt_image != NULL. */
11extern void *bgrt_image;
12extern size_t bgrt_image_size; 11extern size_t bgrt_image_size;
13extern struct acpi_table_bgrt *bgrt_tab; 12extern struct acpi_table_bgrt bgrt_tab;
14 13
15#else /* !CONFIG_ACPI_BGRT */ 14#else /* !CONFIG_ACPI_BGRT */
16 15
17static inline void efi_bgrt_init(void) {} 16static inline void efi_bgrt_init(struct acpi_table_header *table) {}
18 17
19#endif /* !CONFIG_ACPI_BGRT */ 18#endif /* !CONFIG_ACPI_BGRT */
20 19
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 5b1af30ece55..94d34e0be24f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -509,24 +509,6 @@ typedef struct {
509 u64 query_variable_info; 509 u64 query_variable_info;
510} efi_runtime_services_64_t; 510} efi_runtime_services_64_t;
511 511
512typedef struct {
513 efi_table_hdr_t hdr;
514 void *get_time;
515 void *set_time;
516 void *get_wakeup_time;
517 void *set_wakeup_time;
518 void *set_virtual_address_map;
519 void *convert_pointer;
520 void *get_variable;
521 void *get_next_variable;
522 void *set_variable;
523 void *get_next_high_mono_count;
524 void *reset_system;
525 void *update_capsule;
526 void *query_capsule_caps;
527 void *query_variable_info;
528} efi_runtime_services_t;
529
530typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc); 512typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
531typedef efi_status_t efi_set_time_t (efi_time_t *tm); 513typedef efi_status_t efi_set_time_t (efi_time_t *tm);
532typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending, 514typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
@@ -561,6 +543,24 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
561 unsigned long size, 543 unsigned long size,
562 bool nonblocking); 544 bool nonblocking);
563 545
546typedef struct {
547 efi_table_hdr_t hdr;
548 efi_get_time_t *get_time;
549 efi_set_time_t *set_time;
550 efi_get_wakeup_time_t *get_wakeup_time;
551 efi_set_wakeup_time_t *set_wakeup_time;
552 efi_set_virtual_address_map_t *set_virtual_address_map;
553 void *convert_pointer;
554 efi_get_variable_t *get_variable;
555 efi_get_next_variable_t *get_next_variable;
556 efi_set_variable_t *set_variable;
557 efi_get_next_high_mono_count_t *get_next_high_mono_count;
558 efi_reset_system_t *reset_system;
559 efi_update_capsule_t *update_capsule;
560 efi_query_capsule_caps_t *query_capsule_caps;
561 efi_query_variable_info_t *query_variable_info;
562} efi_runtime_services_t;
563
564void efi_native_runtime_setup(void); 564void efi_native_runtime_setup(void);
565 565
566/* 566/*
@@ -611,6 +611,9 @@ void efi_native_runtime_setup(void);
611#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) 611#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
612#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) 612#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
613 613
614#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
615#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
616
614/* 617/*
615 * This GUID is used to pass to the kernel proper the struct screen_info 618 * This GUID is used to pass to the kernel proper the struct screen_info
616 * structure that was populated by the stub based on the GOP protocol instance 619 * structure that was populated by the stub based on the GOP protocol instance
@@ -1065,6 +1068,7 @@ extern int __init efi_setup_pcdp_console(char *);
1065#define EFI_ARCH_1 7 /* First arch-specific bit */ 1068#define EFI_ARCH_1 7 /* First arch-specific bit */
1066#define EFI_DBG 8 /* Print additional debug info at runtime */ 1069#define EFI_DBG 8 /* Print additional debug info at runtime */
1067#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ 1070#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
1071#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
1068 1072
1069#ifdef CONFIG_EFI 1073#ifdef CONFIG_EFI
1070/* 1074/*
@@ -1240,17 +1244,17 @@ struct efivar_entry {
1240 bool deleting; 1244 bool deleting;
1241}; 1245};
1242 1246
1243struct efi_simple_text_output_protocol_32 { 1247typedef struct {
1244 u32 reset; 1248 u32 reset;
1245 u32 output_string; 1249 u32 output_string;
1246 u32 test_string; 1250 u32 test_string;
1247}; 1251} efi_simple_text_output_protocol_32_t;
1248 1252
1249struct efi_simple_text_output_protocol_64 { 1253typedef struct {
1250 u64 reset; 1254 u64 reset;
1251 u64 output_string; 1255 u64 output_string;
1252 u64 test_string; 1256 u64 test_string;
1253}; 1257} efi_simple_text_output_protocol_64_t;
1254 1258
1255struct efi_simple_text_output_protocol { 1259struct efi_simple_text_output_protocol {
1256 void *reset; 1260 void *reset;
@@ -1476,6 +1480,14 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
1476bool efi_runtime_disabled(void); 1480bool efi_runtime_disabled(void);
1477extern void efi_call_virt_check_flags(unsigned long flags, const char *call); 1481extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
1478 1482
1483enum efi_secureboot_mode {
1484 efi_secureboot_mode_unset,
1485 efi_secureboot_mode_unknown,
1486 efi_secureboot_mode_disabled,
1487 efi_secureboot_mode_enabled,
1488};
1489enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
1490
1479/* 1491/*
1480 * Arch code can implement the following three template macros, avoiding 1492 * Arch code can implement the following three template macros, avoiding
1481 * reptition for the void/non-void return cases of {__,}efi_call_virt(): 1493 * reptition for the void/non-void return cases of {__,}efi_call_virt():
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index b276e9ef0e0b..aebecc4ed088 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -9,12 +9,22 @@
9struct io_cq; 9struct io_cq;
10struct elevator_type; 10struct elevator_type;
11 11
12typedef int (elevator_merge_fn) (struct request_queue *, struct request **, 12/*
13 * Return values from elevator merger
14 */
15enum elv_merge {
16 ELEVATOR_NO_MERGE = 0,
17 ELEVATOR_FRONT_MERGE = 1,
18 ELEVATOR_BACK_MERGE = 2,
19 ELEVATOR_DISCARD_MERGE = 3,
20};
21
22typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **,
13 struct bio *); 23 struct bio *);
14 24
15typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *); 25typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
16 26
17typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); 27typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge);
18 28
19typedef int (elevator_allow_bio_merge_fn) (struct request_queue *, 29typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
20 struct request *, struct bio *); 30 struct request *, struct bio *);
@@ -77,6 +87,34 @@ struct elevator_ops
77 elevator_registered_fn *elevator_registered_fn; 87 elevator_registered_fn *elevator_registered_fn;
78}; 88};
79 89
90struct blk_mq_alloc_data;
91struct blk_mq_hw_ctx;
92
93struct elevator_mq_ops {
94 int (*init_sched)(struct request_queue *, struct elevator_type *);
95 void (*exit_sched)(struct elevator_queue *);
96
97 bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
98 bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
99 int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
100 void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
101 void (*requests_merged)(struct request_queue *, struct request *, struct request *);
102 struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *);
103 void (*put_request)(struct request *);
104 void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
105 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
106 bool (*has_work)(struct blk_mq_hw_ctx *);
107 void (*completed_request)(struct blk_mq_hw_ctx *, struct request *);
108 void (*started_request)(struct request *);
109 void (*requeue_request)(struct request *);
110 struct request *(*former_request)(struct request_queue *, struct request *);
111 struct request *(*next_request)(struct request_queue *, struct request *);
112 int (*get_rq_priv)(struct request_queue *, struct request *, struct bio *);
113 void (*put_rq_priv)(struct request_queue *, struct request *);
114 void (*init_icq)(struct io_cq *);
115 void (*exit_icq)(struct io_cq *);
116};
117
80#define ELV_NAME_MAX (16) 118#define ELV_NAME_MAX (16)
81 119
82struct elv_fs_entry { 120struct elv_fs_entry {
@@ -94,12 +132,16 @@ struct elevator_type
94 struct kmem_cache *icq_cache; 132 struct kmem_cache *icq_cache;
95 133
96 /* fields provided by elevator implementation */ 134 /* fields provided by elevator implementation */
97 struct elevator_ops ops; 135 union {
136 struct elevator_ops sq;
137 struct elevator_mq_ops mq;
138 } ops;
98 size_t icq_size; /* see iocontext.h */ 139 size_t icq_size; /* see iocontext.h */
99 size_t icq_align; /* ditto */ 140 size_t icq_align; /* ditto */
100 struct elv_fs_entry *elevator_attrs; 141 struct elv_fs_entry *elevator_attrs;
101 char elevator_name[ELV_NAME_MAX]; 142 char elevator_name[ELV_NAME_MAX];
102 struct module *elevator_owner; 143 struct module *elevator_owner;
144 bool uses_mq;
103 145
104 /* managed by elevator core */ 146 /* managed by elevator core */
105 char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ 147 char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
@@ -123,6 +165,7 @@ struct elevator_queue
123 struct kobject kobj; 165 struct kobject kobj;
124 struct mutex sysfs_lock; 166 struct mutex sysfs_lock;
125 unsigned int registered:1; 167 unsigned int registered:1;
168 unsigned int uses_mq:1;
126 DECLARE_HASHTABLE(hash, ELV_HASH_BITS); 169 DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
127}; 170};
128 171
@@ -133,12 +176,15 @@ extern void elv_dispatch_sort(struct request_queue *, struct request *);
133extern void elv_dispatch_add_tail(struct request_queue *, struct request *); 176extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
134extern void elv_add_request(struct request_queue *, struct request *, int); 177extern void elv_add_request(struct request_queue *, struct request *, int);
135extern void __elv_add_request(struct request_queue *, struct request *, int); 178extern void __elv_add_request(struct request_queue *, struct request *, int);
136extern int elv_merge(struct request_queue *, struct request **, struct bio *); 179extern enum elv_merge elv_merge(struct request_queue *, struct request **,
180 struct bio *);
137extern void elv_merge_requests(struct request_queue *, struct request *, 181extern void elv_merge_requests(struct request_queue *, struct request *,
138 struct request *); 182 struct request *);
139extern void elv_merged_request(struct request_queue *, struct request *, int); 183extern void elv_merged_request(struct request_queue *, struct request *,
184 enum elv_merge);
140extern void elv_bio_merged(struct request_queue *q, struct request *, 185extern void elv_bio_merged(struct request_queue *q, struct request *,
141 struct bio *); 186 struct bio *);
187extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
142extern void elv_requeue_request(struct request_queue *, struct request *); 188extern void elv_requeue_request(struct request_queue *, struct request *);
143extern struct request *elv_former_request(struct request_queue *, struct request *); 189extern struct request *elv_former_request(struct request_queue *, struct request *);
144extern struct request *elv_latter_request(struct request_queue *, struct request *); 190extern struct request *elv_latter_request(struct request_queue *, struct request *);
@@ -185,13 +231,6 @@ extern void elv_rb_del(struct rb_root *, struct request *);
185extern struct request *elv_rb_find(struct rb_root *, sector_t); 231extern struct request *elv_rb_find(struct rb_root *, sector_t);
186 232
187/* 233/*
188 * Return values from elevator merger
189 */
190#define ELEVATOR_NO_MERGE 0
191#define ELEVATOR_FRONT_MERGE 1
192#define ELEVATOR_BACK_MERGE 2
193
194/*
195 * Insertion selection 234 * Insertion selection
196 */ 235 */
197#define ELEVATOR_INSERT_FRONT 1 236#define ELEVATOR_INSERT_FRONT 1
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 6fec9e81bd70..c62b709b1ce0 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -54,6 +54,11 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
54#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) 54#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
55#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) 55#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
56 56
57struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
58 unsigned int txqs,
59 unsigned int rxqs);
60#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
61
57struct sk_buff **eth_gro_receive(struct sk_buff **head, 62struct sk_buff **eth_gro_receive(struct sk_buff **head,
58 struct sk_buff *skb); 63 struct sk_buff *skb);
59int eth_gro_complete(struct sk_buff *skb, int nhoff); 64int eth_gro_complete(struct sk_buff *skb, int nhoff);
@@ -397,6 +402,66 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
397} 402}
398 403
399/** 404/**
405 * ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
406 * @addr: Pointer to a six-byte array containing the Ethernet address
407 *
408 * Return a u64 value of the address
409 */
410static inline u64 ether_addr_to_u64(const u8 *addr)
411{
412 u64 u = 0;
413 int i;
414
415 for (i = 0; i < ETH_ALEN; i++)
416 u = u << 8 | addr[i];
417
418 return u;
419}
420
421/**
422 * u64_to_ether_addr - Convert a u64 to an Ethernet address.
423 * @u: u64 to convert to an Ethernet MAC address
424 * @addr: Pointer to a six-byte array to contain the Ethernet address
425 */
426static inline void u64_to_ether_addr(u64 u, u8 *addr)
427{
428 int i;
429
430 for (i = ETH_ALEN - 1; i >= 0; i--) {
431 addr[i] = u & 0xff;
432 u = u >> 8;
433 }
434}
435
436/**
437 * eth_addr_dec - Decrement the given MAC address
438 *
439 * @addr: Pointer to a six-byte array containing Ethernet address to decrement
440 */
441static inline void eth_addr_dec(u8 *addr)
442{
443 u64 u = ether_addr_to_u64(addr);
444
445 u--;
446 u64_to_ether_addr(u, addr);
447}
448
449/**
450 * ether_addr_greater - Compare two Ethernet addresses
451 * @addr1: Pointer to a six-byte array containing the Ethernet address
452 * @addr2: Pointer other six-byte array containing the Ethernet address
453 *
454 * Compare two Ethernet addresses, returns true addr1 is greater than addr2
455 */
456static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
457{
458 u64 u1 = ether_addr_to_u64(addr1);
459 u64 u2 = ether_addr_to_u64(addr2);
460
461 return u1 > u2;
462}
463
464/**
400 * is_etherdev_addr - Tell if given Ethernet address belongs to the device. 465 * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
401 * @dev: Pointer to a device structure 466 * @dev: Pointer to a device structure
402 * @addr: Pointer to a six-byte array containing the Ethernet address 467 * @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/export.h b/include/linux/export.h
index 2a0f61fbc731..1a1dfdb2a5c6 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -43,12 +43,19 @@ extern struct module __this_module;
43#ifdef CONFIG_MODVERSIONS 43#ifdef CONFIG_MODVERSIONS
44/* Mark the CRC weak since genksyms apparently decides not to 44/* Mark the CRC weak since genksyms apparently decides not to
45 * generate a checksums for some symbols */ 45 * generate a checksums for some symbols */
46#if defined(CONFIG_MODULE_REL_CRCS)
46#define __CRC_SYMBOL(sym, sec) \ 47#define __CRC_SYMBOL(sym, sec) \
47 extern __visible void *__crc_##sym __attribute__((weak)); \ 48 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
48 static const unsigned long __kcrctab_##sym \ 49 " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
49 __used \ 50 " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
50 __attribute__((section("___kcrctab" sec "+" #sym), used)) \ 51 " .previous \n");
51 = (unsigned long) &__crc_##sym; 52#else
53#define __CRC_SYMBOL(sym, sec) \
54 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
55 " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
56 " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
57 " .previous \n");
58#endif
52#else 59#else
53#define __CRC_SYMBOL(sym, sec) 60#define __CRC_SYMBOL(sym, sec)
54#endif 61#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b871c0cb1f02..7010fb01a81a 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -46,7 +46,18 @@
46#define EXTCON_USB 1 46#define EXTCON_USB 1
47#define EXTCON_USB_HOST 2 47#define EXTCON_USB_HOST 2
48 48
49/* Charging external connector */ 49/*
50 * Charging external connector
51 *
52 * When one SDP charger connector was reported, we should also report
53 * the USB connector, which means EXTCON_CHG_USB_SDP should always
54 * appear together with EXTCON_USB. The same as ACA charger connector,
55 * EXTCON_CHG_USB_ACA would normally appear with EXTCON_USB_HOST.
56 *
57 * The EXTCON_CHG_USB_SLOW connector can provide at least 500mA of
58 * current at 5V. The EXTCON_CHG_USB_FAST connector can provide at
59 * least 1A of current at 5V.
60 */
50#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */ 61#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */
51#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */ 62#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */
52#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */ 63#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */
@@ -54,6 +65,7 @@
54#define EXTCON_CHG_USB_FAST 9 65#define EXTCON_CHG_USB_FAST 9
55#define EXTCON_CHG_USB_SLOW 10 66#define EXTCON_CHG_USB_SLOW 10
56#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */ 67#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */
68#define EXTCON_CHG_USB_PD 12 /* USB Power Delivery */
57 69
58/* Jack external connector */ 70/* Jack external connector */
59#define EXTCON_JACK_MICROPHONE 20 71#define EXTCON_JACK_MICROPHONE 20
@@ -160,62 +172,7 @@ union extcon_property_value {
160}; 172};
161 173
162struct extcon_cable; 174struct extcon_cable;
163 175struct extcon_dev;
164/**
165 * struct extcon_dev - An extcon device represents one external connector.
166 * @name: The name of this extcon device. Parent device name is
167 * used if NULL.
168 * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
169 * If supported_cable is NULL, cable name related APIs
170 * are disabled.
171 * @mutually_exclusive: Array of mutually exclusive set of cables that cannot
172 * be attached simultaneously. The array should be
173 * ending with NULL or be NULL (no mutually exclusive
174 * cables). For example, if it is { 0x7, 0x30, 0}, then,
175 * {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
176 * be attached simulataneously. {0x7, 0} is equivalent to
177 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
178 * can be no simultaneous connections.
179 * @dev: Device of this extcon.
180 * @state: Attach/detach state of this extcon. Do not provide at
181 * register-time.
182 * @nh: Notifier for the state change events from this extcon
183 * @entry: To support list of extcon devices so that users can
184 * search for extcon devices based on the extcon name.
185 * @lock:
186 * @max_supported: Internal value to store the number of cables.
187 * @extcon_dev_type: Device_type struct to provide attribute_groups
188 * customized for each extcon device.
189 * @cables: Sysfs subdirectories. Each represents one cable.
190 *
191 * In most cases, users only need to provide "User initializing data" of
192 * this struct when registering an extcon. In some exceptional cases,
193 * optional callbacks may be needed. However, the values in "internal data"
194 * are overwritten by register function.
195 */
196struct extcon_dev {
197 /* Optional user initializing data */
198 const char *name;
199 const unsigned int *supported_cable;
200 const u32 *mutually_exclusive;
201
202 /* Internal data. Please do not set. */
203 struct device dev;
204 struct raw_notifier_head *nh;
205 struct list_head entry;
206 int max_supported;
207 spinlock_t lock; /* could be called by irq handler */
208 u32 state;
209
210 /* /sys/class/extcon/.../cable.n/... */
211 struct device_type extcon_dev_type;
212 struct extcon_cable *cables;
213
214 /* /sys/class/extcon/.../mutually_exclusive/... */
215 struct attribute_group attr_g_muex;
216 struct attribute **attrs_muex;
217 struct device_attribute *d_attrs_muex;
218};
219 176
220#if IS_ENABLED(CONFIG_EXTCON) 177#if IS_ENABLED(CONFIG_EXTCON)
221 178
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index a0e03b13b449..2aa32075bca1 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -59,7 +59,7 @@ struct adc_jack_pdata {
59 const char *name; 59 const char *name;
60 const char *consumer_channel; 60 const char *consumer_channel;
61 61
62 const enum extcon *cable_names; 62 const unsigned int *cable_names;
63 63
64 /* The last entry's state should be 0 */ 64 /* The last entry's state should be 0 */
65 struct adc_jack_cond *adc_conditions; 65 struct adc_jack_cond *adc_conditions;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a0934e6c9bab..0c167fdee5f7 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -54,9 +54,17 @@ struct bpf_prog_aux;
54#define BPF_REG_AX MAX_BPF_REG 54#define BPF_REG_AX MAX_BPF_REG
55#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) 55#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
56 56
57/* As per nm, we expose JITed images as text (code) section for
58 * kallsyms. That way, tools like perf can find it to match
59 * addresses.
60 */
61#define BPF_SYM_ELF_TYPE 't'
62
57/* BPF program can access up to 512 bytes of stack space. */ 63/* BPF program can access up to 512 bytes of stack space. */
58#define MAX_BPF_STACK 512 64#define MAX_BPF_STACK 512
59 65
66#define BPF_TAG_SIZE 8
67
60/* Helper macros for filter block array initializers. */ 68/* Helper macros for filter block array initializers. */
61 69
62/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 70/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -408,7 +416,7 @@ struct bpf_prog {
408 kmemcheck_bitfield_end(meta); 416 kmemcheck_bitfield_end(meta);
409 enum bpf_prog_type type; /* Type of BPF program */ 417 enum bpf_prog_type type; /* Type of BPF program */
410 u32 len; /* Number of filter blocks */ 418 u32 len; /* Number of filter blocks */
411 u32 digest[SHA_DIGEST_WORDS]; /* Program digest */ 419 u8 tag[BPF_TAG_SIZE];
412 struct bpf_prog_aux *aux; /* Auxiliary fields */ 420 struct bpf_prog_aux *aux; /* Auxiliary fields */
413 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 421 struct sock_fprog_kern *orig_prog; /* Original BPF program */
414 unsigned int (*bpf_func)(const void *ctx, 422 unsigned int (*bpf_func)(const void *ctx,
@@ -519,7 +527,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
519 return prog->len * sizeof(struct bpf_insn); 527 return prog->len * sizeof(struct bpf_insn);
520} 528}
521 529
522static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog) 530static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
523{ 531{
524 return round_up(bpf_prog_insn_size(prog) + 532 return round_up(bpf_prog_insn_size(prog) +
525 sizeof(__be64) + 1, SHA_MESSAGE_BYTES); 533 sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
@@ -543,7 +551,7 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
543 551
544#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) 552#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
545 553
546#ifdef CONFIG_DEBUG_SET_MODULE_RONX 554#ifdef CONFIG_ARCH_HAS_SET_MEMORY
547static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 555static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
548{ 556{
549 set_memory_ro((unsigned long)fp, fp->pages); 557 set_memory_ro((unsigned long)fp, fp->pages);
@@ -553,6 +561,16 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
553{ 561{
554 set_memory_rw((unsigned long)fp, fp->pages); 562 set_memory_rw((unsigned long)fp, fp->pages);
555} 563}
564
565static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
566{
567 set_memory_ro((unsigned long)hdr, hdr->pages);
568}
569
570static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
571{
572 set_memory_rw((unsigned long)hdr, hdr->pages);
573}
556#else 574#else
557static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 575static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
558{ 576{
@@ -561,7 +579,24 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
561static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) 579static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
562{ 580{
563} 581}
564#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ 582
583static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
584{
585}
586
587static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
588{
589}
590#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
591
592static inline struct bpf_binary_header *
593bpf_jit_binary_hdr(const struct bpf_prog *fp)
594{
595 unsigned long real_start = (unsigned long)fp->bpf_func;
596 unsigned long addr = real_start & PAGE_MASK;
597
598 return (void *)addr;
599}
565 600
566int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); 601int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
567static inline int sk_filter(struct sock *sk, struct sk_buff *skb) 602static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
@@ -605,6 +640,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
605u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 640u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
606 641
607struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); 642struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
643void bpf_jit_compile(struct bpf_prog *prog);
608bool bpf_helper_changes_pkt_data(void *func); 644bool bpf_helper_changes_pkt_data(void *func);
609 645
610struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 646struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
@@ -614,6 +650,7 @@ void bpf_warn_invalid_xdp_action(u32 act);
614#ifdef CONFIG_BPF_JIT 650#ifdef CONFIG_BPF_JIT
615extern int bpf_jit_enable; 651extern int bpf_jit_enable;
616extern int bpf_jit_harden; 652extern int bpf_jit_harden;
653extern int bpf_jit_kallsyms;
617 654
618typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 655typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
619 656
@@ -623,7 +660,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
623 bpf_jit_fill_hole_t bpf_fill_ill_insns); 660 bpf_jit_fill_hole_t bpf_fill_ill_insns);
624void bpf_jit_binary_free(struct bpf_binary_header *hdr); 661void bpf_jit_binary_free(struct bpf_binary_header *hdr);
625 662
626void bpf_jit_compile(struct bpf_prog *fp);
627void bpf_jit_free(struct bpf_prog *fp); 663void bpf_jit_free(struct bpf_prog *fp);
628 664
629struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); 665struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
@@ -649,6 +685,11 @@ static inline bool bpf_jit_is_ebpf(void)
649# endif 685# endif
650} 686}
651 687
688static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
689{
690 return fp->jited && bpf_jit_is_ebpf();
691}
692
652static inline bool bpf_jit_blinding_enabled(void) 693static inline bool bpf_jit_blinding_enabled(void)
653{ 694{
654 /* These are the prerequisites, should someone ever have the 695 /* These are the prerequisites, should someone ever have the
@@ -666,15 +707,91 @@ static inline bool bpf_jit_blinding_enabled(void)
666 707
667 return true; 708 return true;
668} 709}
669#else 710
670static inline void bpf_jit_compile(struct bpf_prog *fp) 711static inline bool bpf_jit_kallsyms_enabled(void)
712{
713 /* There are a couple of corner cases where kallsyms should
714 * not be enabled f.e. on hardening.
715 */
716 if (bpf_jit_harden)
717 return false;
718 if (!bpf_jit_kallsyms)
719 return false;
720 if (bpf_jit_kallsyms == 1)
721 return true;
722
723 return false;
724}
725
726const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
727 unsigned long *off, char *sym);
728bool is_bpf_text_address(unsigned long addr);
729int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
730 char *sym);
731
732static inline const char *
733bpf_address_lookup(unsigned long addr, unsigned long *size,
734 unsigned long *off, char **modname, char *sym)
735{
736 const char *ret = __bpf_address_lookup(addr, size, off, sym);
737
738 if (ret && modname)
739 *modname = NULL;
740 return ret;
741}
742
743void bpf_prog_kallsyms_add(struct bpf_prog *fp);
744void bpf_prog_kallsyms_del(struct bpf_prog *fp);
745
746#else /* CONFIG_BPF_JIT */
747
748static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
671{ 749{
750 return false;
672} 751}
673 752
674static inline void bpf_jit_free(struct bpf_prog *fp) 753static inline void bpf_jit_free(struct bpf_prog *fp)
675{ 754{
676 bpf_prog_unlock_free(fp); 755 bpf_prog_unlock_free(fp);
677} 756}
757
758static inline bool bpf_jit_kallsyms_enabled(void)
759{
760 return false;
761}
762
763static inline const char *
764__bpf_address_lookup(unsigned long addr, unsigned long *size,
765 unsigned long *off, char *sym)
766{
767 return NULL;
768}
769
770static inline bool is_bpf_text_address(unsigned long addr)
771{
772 return false;
773}
774
775static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
776 char *type, char *sym)
777{
778 return -ERANGE;
779}
780
781static inline const char *
782bpf_address_lookup(unsigned long addr, unsigned long *size,
783 unsigned long *off, char **modname, char *sym)
784{
785 return NULL;
786}
787
788static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
789{
790}
791
792static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
793{
794}
678#endif /* CONFIG_BPF_JIT */ 795#endif /* CONFIG_BPF_JIT */
679 796
680#define BPF_ANC BIT(15) 797#define BPF_ANC BIT(15)
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 16551d5eac36..57beb5d09bfc 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -22,6 +22,7 @@
22#define _LINUX_FPGA_MGR_H 22#define _LINUX_FPGA_MGR_H
23 23
24struct fpga_manager; 24struct fpga_manager;
25struct sg_table;
25 26
26/** 27/**
27 * enum fpga_mgr_states - fpga framework states 28 * enum fpga_mgr_states - fpga framework states
@@ -88,6 +89,7 @@ struct fpga_image_info {
88 * @state: returns an enum value of the FPGA's state 89 * @state: returns an enum value of the FPGA's state
89 * @write_init: prepare the FPGA to receive confuration data 90 * @write_init: prepare the FPGA to receive confuration data
90 * @write: write count bytes of configuration data to the FPGA 91 * @write: write count bytes of configuration data to the FPGA
92 * @write_sg: write the scatter list of configuration data to the FPGA
91 * @write_complete: set FPGA to operating state after writing is done 93 * @write_complete: set FPGA to operating state after writing is done
92 * @fpga_remove: optional: Set FPGA into a specific state during driver remove 94 * @fpga_remove: optional: Set FPGA into a specific state during driver remove
93 * 95 *
@@ -102,6 +104,7 @@ struct fpga_manager_ops {
102 struct fpga_image_info *info, 104 struct fpga_image_info *info,
103 const char *buf, size_t count); 105 const char *buf, size_t count);
104 int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); 106 int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
107 int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt);
105 int (*write_complete)(struct fpga_manager *mgr, 108 int (*write_complete)(struct fpga_manager *mgr,
106 struct fpga_image_info *info); 109 struct fpga_image_info *info);
107 void (*fpga_remove)(struct fpga_manager *mgr); 110 void (*fpga_remove)(struct fpga_manager *mgr);
@@ -129,6 +132,8 @@ struct fpga_manager {
129 132
130int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, 133int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
131 const char *buf, size_t count); 134 const char *buf, size_t count);
135int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
136 struct sg_table *sgt);
132 137
133int fpga_mgr_firmware_load(struct fpga_manager *mgr, 138int fpga_mgr_firmware_load(struct fpga_manager *mgr,
134 struct fpga_image_info *info, 139 struct fpga_image_info *info,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2ba074328894..c930cbc19342 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -423,6 +423,7 @@ struct block_device {
423 int bd_invalidated; 423 int bd_invalidated;
424 struct gendisk * bd_disk; 424 struct gendisk * bd_disk;
425 struct request_queue * bd_queue; 425 struct request_queue * bd_queue;
426 struct backing_dev_info *bd_bdi;
426 struct list_head bd_list; 427 struct list_head bd_list;
427 /* 428 /*
428 * Private data. You must have bd_claim'ed the block_device 429 * Private data. You must have bd_claim'ed the block_device
@@ -2342,6 +2343,7 @@ extern struct kmem_cache *names_cachep;
2342#ifdef CONFIG_BLOCK 2343#ifdef CONFIG_BLOCK
2343extern int register_blkdev(unsigned int, const char *); 2344extern int register_blkdev(unsigned int, const char *);
2344extern void unregister_blkdev(unsigned int, const char *); 2345extern void unregister_blkdev(unsigned int, const char *);
2346extern void bdev_unhash_inode(dev_t dev);
2345extern struct block_device *bdget(dev_t); 2347extern struct block_device *bdget(dev_t);
2346extern struct block_device *bdgrab(struct block_device *bdev); 2348extern struct block_device *bdgrab(struct block_device *bdev);
2347extern void bd_set_size(struct block_device *, loff_t size); 2349extern void bd_set_size(struct block_device *, loff_t size);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 13ba552e6c09..4c467ef50159 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -360,6 +360,7 @@ struct fscache_object {
360#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ 360#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
361#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ 361#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
362#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ 362#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
363#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
363 364
364 struct list_head cache_link; /* link in cache->object_list */ 365 struct list_head cache_link; /* link in cache->object_list */
365 struct hlist_node cookie_link; /* link in cookie->backing_objects */ 366 struct hlist_node cookie_link; /* link in cookie->backing_objects */
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
new file mode 100644
index 000000000000..547f81592ba1
--- /dev/null
+++ b/include/linux/fscrypt_common.h
@@ -0,0 +1,146 @@
1/*
2 * fscrypt_common.h: common declarations for per-file encryption
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * Written by Michael Halcrow, 2015.
7 * Modified by Jaegeuk Kim, 2015.
8 */
9
10#ifndef _LINUX_FSCRYPT_COMMON_H
11#define _LINUX_FSCRYPT_COMMON_H
12
13#include <linux/key.h>
14#include <linux/fs.h>
15#include <linux/mm.h>
16#include <linux/bio.h>
17#include <linux/dcache.h>
18#include <crypto/skcipher.h>
19#include <uapi/linux/fs.h>
20
21#define FS_CRYPTO_BLOCK_SIZE 16
22
23struct fscrypt_info;
24
25struct fscrypt_ctx {
26 union {
27 struct {
28 struct page *bounce_page; /* Ciphertext page */
29 struct page *control_page; /* Original page */
30 } w;
31 struct {
32 struct bio *bio;
33 struct work_struct work;
34 } r;
35 struct list_head free_list; /* Free list */
36 };
37 u8 flags; /* Flags */
38};
39
40/**
41 * For encrypted symlinks, the ciphertext length is stored at the beginning
42 * of the string in little-endian format.
43 */
44struct fscrypt_symlink_data {
45 __le16 len;
46 char encrypted_path[1];
47} __packed;
48
49/**
50 * This function is used to calculate the disk space required to
51 * store a filename of length l in encrypted symlink format.
52 */
53static inline u32 fscrypt_symlink_data_len(u32 l)
54{
55 if (l < FS_CRYPTO_BLOCK_SIZE)
56 l = FS_CRYPTO_BLOCK_SIZE;
57 return (l + sizeof(struct fscrypt_symlink_data) - 1);
58}
59
60struct fscrypt_str {
61 unsigned char *name;
62 u32 len;
63};
64
65struct fscrypt_name {
66 const struct qstr *usr_fname;
67 struct fscrypt_str disk_name;
68 u32 hash;
69 u32 minor_hash;
70 struct fscrypt_str crypto_buf;
71};
72
73#define FSTR_INIT(n, l) { .name = n, .len = l }
74#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
75#define fname_name(p) ((p)->disk_name.name)
76#define fname_len(p) ((p)->disk_name.len)
77
78/*
79 * fscrypt superblock flags
80 */
81#define FS_CFLG_OWN_PAGES (1U << 1)
82
83/*
84 * crypto opertions for filesystems
85 */
86struct fscrypt_operations {
87 unsigned int flags;
88 const char *key_prefix;
89 int (*get_context)(struct inode *, void *, size_t);
90 int (*prepare_context)(struct inode *);
91 int (*set_context)(struct inode *, const void *, size_t, void *);
92 int (*dummy_context)(struct inode *);
93 bool (*is_encrypted)(struct inode *);
94 bool (*empty_dir)(struct inode *);
95 unsigned (*max_namelen)(struct inode *);
96};
97
98static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
99{
100 if (inode->i_sb->s_cop->dummy_context &&
101 inode->i_sb->s_cop->dummy_context(inode))
102 return true;
103 return false;
104}
105
106static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
107{
108 return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
109}
110
111static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
112{
113 return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
114}
115
116static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
117{
118 if (str->len == 1 && str->name[0] == '.')
119 return true;
120
121 if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
122 return true;
123
124 return false;
125}
126
127static inline struct page *fscrypt_control_page(struct page *page)
128{
129#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
130 return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
131#else
132 WARN_ON_ONCE(1);
133 return ERR_PTR(-EINVAL);
134#endif
135}
136
137static inline int fscrypt_has_encryption_key(const struct inode *inode)
138{
139#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
140 return (inode->i_crypt_info != NULL);
141#else
142 return 0;
143#endif
144}
145
146#endif /* _LINUX_FSCRYPT_COMMON_H */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
new file mode 100644
index 000000000000..3511ca798804
--- /dev/null
+++ b/include/linux/fscrypt_notsupp.h
@@ -0,0 +1,168 @@
1/*
2 * fscrypt_notsupp.h
3 *
4 * This stubs out the fscrypt functions for filesystems configured without
5 * encryption support.
6 */
7
8#ifndef _LINUX_FSCRYPT_NOTSUPP_H
9#define _LINUX_FSCRYPT_NOTSUPP_H
10
11#include <linux/fscrypt_common.h>
12
13/* crypto.c */
14static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
15 gfp_t gfp_flags)
16{
17 return ERR_PTR(-EOPNOTSUPP);
18}
19
20static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
21{
22 return;
23}
24
25static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
26 struct page *page,
27 unsigned int len,
28 unsigned int offs,
29 u64 lblk_num, gfp_t gfp_flags)
30{
31 return ERR_PTR(-EOPNOTSUPP);
32}
33
34static inline int fscrypt_decrypt_page(const struct inode *inode,
35 struct page *page,
36 unsigned int len, unsigned int offs,
37 u64 lblk_num)
38{
39 return -EOPNOTSUPP;
40}
41
42
43static inline void fscrypt_restore_control_page(struct page *page)
44{
45 return;
46}
47
48static inline void fscrypt_set_d_op(struct dentry *dentry)
49{
50 return;
51}
52
53static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
54{
55 return;
56}
57
58/* policy.c */
59static inline int fscrypt_ioctl_set_policy(struct file *filp,
60 const void __user *arg)
61{
62 return -EOPNOTSUPP;
63}
64
65static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
66{
67 return -EOPNOTSUPP;
68}
69
70static inline int fscrypt_has_permitted_context(struct inode *parent,
71 struct inode *child)
72{
73 return 0;
74}
75
76static inline int fscrypt_inherit_context(struct inode *parent,
77 struct inode *child,
78 void *fs_data, bool preload)
79{
80 return -EOPNOTSUPP;
81}
82
83/* keyinfo.c */
84static inline int fscrypt_get_encryption_info(struct inode *inode)
85{
86 return -EOPNOTSUPP;
87}
88
89static inline void fscrypt_put_encryption_info(struct inode *inode,
90 struct fscrypt_info *ci)
91{
92 return;
93}
94
95 /* fname.c */
96static inline int fscrypt_setup_filename(struct inode *dir,
97 const struct qstr *iname,
98 int lookup, struct fscrypt_name *fname)
99{
100 if (dir->i_sb->s_cop->is_encrypted(dir))
101 return -EOPNOTSUPP;
102
103 memset(fname, 0, sizeof(struct fscrypt_name));
104 fname->usr_fname = iname;
105 fname->disk_name.name = (unsigned char *)iname->name;
106 fname->disk_name.len = iname->len;
107 return 0;
108}
109
110static inline void fscrypt_free_filename(struct fscrypt_name *fname)
111{
112 return;
113}
114
115static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
116 u32 ilen)
117{
118 /* never happens */
119 WARN_ON(1);
120 return 0;
121}
122
123static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
124 u32 ilen,
125 struct fscrypt_str *crypto_str)
126{
127 return -EOPNOTSUPP;
128}
129
130static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
131{
132 return;
133}
134
135static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
136 u32 hash, u32 minor_hash,
137 const struct fscrypt_str *iname,
138 struct fscrypt_str *oname)
139{
140 return -EOPNOTSUPP;
141}
142
143static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
144 const struct qstr *iname,
145 struct fscrypt_str *oname)
146{
147 return -EOPNOTSUPP;
148}
149
150/* bio.c */
151static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
152 struct bio *bio)
153{
154 return;
155}
156
157static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
158{
159 return;
160}
161
162static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
163 sector_t pblk, unsigned int len)
164{
165 return -EOPNOTSUPP;
166}
167
168#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
new file mode 100644
index 000000000000..a140f47e9b27
--- /dev/null
+++ b/include/linux/fscrypt_supp.h
@@ -0,0 +1,66 @@
1/*
2 * fscrypt_supp.h
3 *
4 * This is included by filesystems configured with encryption support.
5 */
6
7#ifndef _LINUX_FSCRYPT_SUPP_H
8#define _LINUX_FSCRYPT_SUPP_H
9
10#include <linux/fscrypt_common.h>
11
12/* crypto.c */
13extern struct kmem_cache *fscrypt_info_cachep;
14extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
15extern void fscrypt_release_ctx(struct fscrypt_ctx *);
16extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
17 unsigned int, unsigned int,
18 u64, gfp_t);
19extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
20 unsigned int, u64);
21extern void fscrypt_restore_control_page(struct page *);
22
23extern const struct dentry_operations fscrypt_d_ops;
24
25static inline void fscrypt_set_d_op(struct dentry *dentry)
26{
27 d_set_d_op(dentry, &fscrypt_d_ops);
28}
29
30static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
31{
32 spin_lock(&dentry->d_lock);
33 dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
34 spin_unlock(&dentry->d_lock);
35}
36
37/* policy.c */
38extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
39extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
40extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
41extern int fscrypt_inherit_context(struct inode *, struct inode *,
42 void *, bool);
43/* keyinfo.c */
44extern int fscrypt_get_encryption_info(struct inode *);
45extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
46
47/* fname.c */
48extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
49 int lookup, struct fscrypt_name *);
50extern void fscrypt_free_filename(struct fscrypt_name *);
51extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
52extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
53 struct fscrypt_str *);
54extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
55extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
56 const struct fscrypt_str *, struct fscrypt_str *);
57extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
58 struct fscrypt_str *);
59
60/* bio.c */
61extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
62extern void fscrypt_pullback_bio_page(struct page **, bool);
63extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
64 unsigned int);
65
66#endif /* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
deleted file mode 100644
index c074b670aa99..000000000000
--- a/include/linux/fscrypto.h
+++ /dev/null
@@ -1,345 +0,0 @@
1/*
2 * General per-file encryption definition
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * Written by Michael Halcrow, 2015.
7 * Modified by Jaegeuk Kim, 2015.
8 */
9
10#ifndef _LINUX_FSCRYPTO_H
11#define _LINUX_FSCRYPTO_H
12
13#include <linux/key.h>
14#include <linux/fs.h>
15#include <linux/mm.h>
16#include <linux/bio.h>
17#include <linux/dcache.h>
18#include <crypto/skcipher.h>
19#include <uapi/linux/fs.h>
20
21#define FS_CRYPTO_BLOCK_SIZE 16
22
23struct fscrypt_info;
24
25struct fscrypt_ctx {
26 union {
27 struct {
28 struct page *bounce_page; /* Ciphertext page */
29 struct page *control_page; /* Original page */
30 } w;
31 struct {
32 struct bio *bio;
33 struct work_struct work;
34 } r;
35 struct list_head free_list; /* Free list */
36 };
37 u8 flags; /* Flags */
38 u8 mode; /* Encryption mode for tfm */
39};
40
41/**
42 * For encrypted symlinks, the ciphertext length is stored at the beginning
43 * of the string in little-endian format.
44 */
45struct fscrypt_symlink_data {
46 __le16 len;
47 char encrypted_path[1];
48} __packed;
49
50/**
51 * This function is used to calculate the disk space required to
52 * store a filename of length l in encrypted symlink format.
53 */
54static inline u32 fscrypt_symlink_data_len(u32 l)
55{
56 if (l < FS_CRYPTO_BLOCK_SIZE)
57 l = FS_CRYPTO_BLOCK_SIZE;
58 return (l + sizeof(struct fscrypt_symlink_data) - 1);
59}
60
61struct fscrypt_str {
62 unsigned char *name;
63 u32 len;
64};
65
66struct fscrypt_name {
67 const struct qstr *usr_fname;
68 struct fscrypt_str disk_name;
69 u32 hash;
70 u32 minor_hash;
71 struct fscrypt_str crypto_buf;
72};
73
74#define FSTR_INIT(n, l) { .name = n, .len = l }
75#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
76#define fname_name(p) ((p)->disk_name.name)
77#define fname_len(p) ((p)->disk_name.len)
78
79/*
80 * fscrypt superblock flags
81 */
82#define FS_CFLG_OWN_PAGES (1U << 1)
83
84/*
85 * crypto opertions for filesystems
86 */
87struct fscrypt_operations {
88 unsigned int flags;
89 int (*get_context)(struct inode *, void *, size_t);
90 int (*key_prefix)(struct inode *, u8 **);
91 int (*prepare_context)(struct inode *);
92 int (*set_context)(struct inode *, const void *, size_t, void *);
93 int (*dummy_context)(struct inode *);
94 bool (*is_encrypted)(struct inode *);
95 bool (*empty_dir)(struct inode *);
96 unsigned (*max_namelen)(struct inode *);
97};
98
99static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
100{
101 if (inode->i_sb->s_cop->dummy_context &&
102 inode->i_sb->s_cop->dummy_context(inode))
103 return true;
104 return false;
105}
106
107static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
108{
109 return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
110}
111
112static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
113{
114 return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
115}
116
117static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
118{
119 if (str->len == 1 && str->name[0] == '.')
120 return true;
121
122 if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
123 return true;
124
125 return false;
126}
127
128static inline struct page *fscrypt_control_page(struct page *page)
129{
130#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
131 return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
132#else
133 WARN_ON_ONCE(1);
134 return ERR_PTR(-EINVAL);
135#endif
136}
137
138static inline int fscrypt_has_encryption_key(const struct inode *inode)
139{
140#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
141 return (inode->i_crypt_info != NULL);
142#else
143 return 0;
144#endif
145}
146
147static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
148{
149#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
150 spin_lock(&dentry->d_lock);
151 dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
152 spin_unlock(&dentry->d_lock);
153#endif
154}
155
156#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
157extern const struct dentry_operations fscrypt_d_ops;
158#endif
159
160static inline void fscrypt_set_d_op(struct dentry *dentry)
161{
162#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
163 d_set_d_op(dentry, &fscrypt_d_ops);
164#endif
165}
166
167#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
168/* crypto.c */
169extern struct kmem_cache *fscrypt_info_cachep;
170extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
171extern void fscrypt_release_ctx(struct fscrypt_ctx *);
172extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
173 unsigned int, unsigned int,
174 u64, gfp_t);
175extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
176 unsigned int, u64);
177extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
178extern void fscrypt_pullback_bio_page(struct page **, bool);
179extern void fscrypt_restore_control_page(struct page *);
180extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
181 unsigned int);
182/* policy.c */
183extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
184extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
185extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
186extern int fscrypt_inherit_context(struct inode *, struct inode *,
187 void *, bool);
188/* keyinfo.c */
189extern int fscrypt_get_encryption_info(struct inode *);
190extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
191
192/* fname.c */
193extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
194 int lookup, struct fscrypt_name *);
195extern void fscrypt_free_filename(struct fscrypt_name *);
196extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
197extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
198 struct fscrypt_str *);
199extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
200extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
201 const struct fscrypt_str *, struct fscrypt_str *);
202extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
203 struct fscrypt_str *);
204#endif
205
206/* crypto.c */
207static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(const struct inode *i,
208 gfp_t f)
209{
210 return ERR_PTR(-EOPNOTSUPP);
211}
212
213static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
214{
215 return;
216}
217
218static inline struct page *fscrypt_notsupp_encrypt_page(const struct inode *i,
219 struct page *p,
220 unsigned int len,
221 unsigned int offs,
222 u64 lblk_num, gfp_t f)
223{
224 return ERR_PTR(-EOPNOTSUPP);
225}
226
227static inline int fscrypt_notsupp_decrypt_page(const struct inode *i, struct page *p,
228 unsigned int len, unsigned int offs,
229 u64 lblk_num)
230{
231 return -EOPNOTSUPP;
232}
233
234static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c,
235 struct bio *b)
236{
237 return;
238}
239
240static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b)
241{
242 return;
243}
244
245static inline void fscrypt_notsupp_restore_control_page(struct page *p)
246{
247 return;
248}
249
250static inline int fscrypt_notsupp_zeroout_range(const struct inode *i, pgoff_t p,
251 sector_t s, unsigned int f)
252{
253 return -EOPNOTSUPP;
254}
255
256/* policy.c */
257static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f,
258 const void __user *arg)
259{
260 return -EOPNOTSUPP;
261}
262
263static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f,
264 void __user *arg)
265{
266 return -EOPNOTSUPP;
267}
268
269static inline int fscrypt_notsupp_has_permitted_context(struct inode *p,
270 struct inode *i)
271{
272 return 0;
273}
274
275static inline int fscrypt_notsupp_inherit_context(struct inode *p,
276 struct inode *i, void *v, bool b)
277{
278 return -EOPNOTSUPP;
279}
280
281/* keyinfo.c */
282static inline int fscrypt_notsupp_get_encryption_info(struct inode *i)
283{
284 return -EOPNOTSUPP;
285}
286
287static inline void fscrypt_notsupp_put_encryption_info(struct inode *i,
288 struct fscrypt_info *f)
289{
290 return;
291}
292
293 /* fname.c */
294static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
295 const struct qstr *iname,
296 int lookup, struct fscrypt_name *fname)
297{
298 if (dir->i_sb->s_cop->is_encrypted(dir))
299 return -EOPNOTSUPP;
300
301 memset(fname, 0, sizeof(struct fscrypt_name));
302 fname->usr_fname = iname;
303 fname->disk_name.name = (unsigned char *)iname->name;
304 fname->disk_name.len = iname->len;
305 return 0;
306}
307
308static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname)
309{
310 return;
311}
312
313static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s)
314{
315 /* never happens */
316 WARN_ON(1);
317 return 0;
318}
319
320static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode,
321 u32 ilen, struct fscrypt_str *crypto_str)
322{
323 return -EOPNOTSUPP;
324}
325
326static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c)
327{
328 return;
329}
330
331static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode,
332 u32 hash, u32 minor_hash,
333 const struct fscrypt_str *iname,
334 struct fscrypt_str *oname)
335{
336 return -EOPNOTSUPP;
337}
338
339static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode,
340 const struct qstr *iname,
341 struct fscrypt_str *oname)
342{
343 return -EOPNOTSUPP;
344}
345#endif /* _LINUX_FSCRYPTO_H */
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
new file mode 100644
index 000000000000..273cbf6400ea
--- /dev/null
+++ b/include/linux/fsi.h
@@ -0,0 +1,50 @@
1/* FSI device & driver interfaces
2 *
3 * Copyright (C) IBM Corporation 2016
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef LINUX_FSI_H
16#define LINUX_FSI_H
17
18#include <linux/device.h>
19
20struct fsi_device {
21 struct device dev;
22 u8 engine_type;
23 u8 version;
24};
25
26struct fsi_device_id {
27 u8 engine_type;
28 u8 version;
29};
30
31#define FSI_VERSION_ANY 0
32
33#define FSI_DEVICE(t) \
34 .engine_type = (t), .version = FSI_VERSION_ANY,
35
36#define FSI_DEVICE_VERSIONED(t, v) \
37 .engine_type = (t), .version = (v),
38
39
40struct fsi_driver {
41 struct device_driver drv;
42 const struct fsi_device_id *id_table;
43};
44
45#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
46#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
47
48extern struct bus_type fsi_bus_type;
49
50#endif /* LINUX_FSI_H */
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 3f9778cbc79d..c332f0a45607 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -733,8 +733,12 @@ struct fsl_ifc_nand {
733 __be32 nand_erattr1; 733 __be32 nand_erattr1;
734 u32 res19[0x10]; 734 u32 res19[0x10];
735 __be32 nand_fsr; 735 __be32 nand_fsr;
736 u32 res20[0x3]; 736 u32 res20;
737 __be32 nand_eccstat[6]; 737 /* The V1 nand_eccstat is actually 4 words that overlaps the
738 * V2 nand_eccstat.
739 */
740 __be32 v1_nand_eccstat[2];
741 __be32 v2_nand_eccstat[6];
738 u32 res21[0x1c]; 742 u32 res21[0x1c];
739 __be32 nanndcr; 743 __be32 nanndcr;
740 u32 res22[0x2]; 744 u32 res22[0x2];
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 76f39754e7b0..a999d281a2f1 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -167,6 +167,13 @@ struct blk_integrity {
167}; 167};
168 168
169#endif /* CONFIG_BLK_DEV_INTEGRITY */ 169#endif /* CONFIG_BLK_DEV_INTEGRITY */
170struct disk_devt {
171 atomic_t count;
172 void (*release)(struct disk_devt *disk_devt);
173};
174
175void put_disk_devt(struct disk_devt *disk_devt);
176void get_disk_devt(struct disk_devt *disk_devt);
170 177
171struct gendisk { 178struct gendisk {
172 /* major, first_minor and minors are input parameters only, 179 /* major, first_minor and minors are input parameters only,
@@ -176,6 +183,7 @@ struct gendisk {
176 int first_minor; 183 int first_minor;
177 int minors; /* maximum number of minors, =1 for 184 int minors; /* maximum number of minors, =1 for
178 * disks that can't be partitioned. */ 185 * disks that can't be partitioned. */
186 struct disk_devt *disk_devt;
179 187
180 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 188 char disk_name[DISK_NAME_LEN]; /* name of major driver */
181 char *(*devnode)(struct gendisk *gd, umode_t *mode); 189 char *(*devnode)(struct gendisk *gd, umode_t *mode);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c2748accea71..846f3b989480 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -8,6 +8,7 @@
8#include <linux/irqdomain.h> 8#include <linux/irqdomain.h>
9#include <linux/lockdep.h> 9#include <linux/lockdep.h>
10#include <linux/pinctrl/pinctrl.h> 10#include <linux/pinctrl/pinctrl.h>
11#include <linux/pinctrl/pinconf-generic.h>
11 12
12struct gpio_desc; 13struct gpio_desc;
13struct of_phandle_args; 14struct of_phandle_args;
@@ -19,18 +20,6 @@ struct module;
19#ifdef CONFIG_GPIOLIB 20#ifdef CONFIG_GPIOLIB
20 21
21/** 22/**
22 * enum single_ended_mode - mode for single ended operation
23 * @LINE_MODE_PUSH_PULL: normal mode for a GPIO line, drive actively high/low
24 * @LINE_MODE_OPEN_DRAIN: set line to be open drain
25 * @LINE_MODE_OPEN_SOURCE: set line to be open source
26 */
27enum single_ended_mode {
28 LINE_MODE_PUSH_PULL,
29 LINE_MODE_OPEN_DRAIN,
30 LINE_MODE_OPEN_SOURCE,
31};
32
33/**
34 * struct gpio_chip - abstract a GPIO controller 23 * struct gpio_chip - abstract a GPIO controller
35 * @label: a functional name for the GPIO device, such as a part 24 * @label: a functional name for the GPIO device, such as a part
36 * number or the name of the SoC IP-block implementing it. 25 * number or the name of the SoC IP-block implementing it.
@@ -48,16 +37,8 @@ enum single_ended_mode {
48 * @get: returns value for signal "offset", 0=low, 1=high, or negative error 37 * @get: returns value for signal "offset", 0=low, 1=high, or negative error
49 * @set: assigns output value for signal "offset" 38 * @set: assigns output value for signal "offset"
50 * @set_multiple: assigns output values for multiple signals defined by "mask" 39 * @set_multiple: assigns output values for multiple signals defined by "mask"
51 * @set_debounce: optional hook for setting debounce time for specified gpio in 40 * @set_config: optional hook for all kinds of settings. Uses the same
52 * interrupt triggered gpio chips 41 * packed config format as generic pinconf.
53 * @set_single_ended: optional hook for setting a line as open drain, open
54 * source, or non-single ended (restore from open drain/source to normal
55 * push-pull mode) this should be implemented if the hardware supports
56 * open drain or open source settings. The GPIOlib will otherwise try
57 * to emulate open drain/source by not actively driving lines high/low
58 * if a consumer request this. The driver may return -ENOTSUPP if e.g.
59 * it supports just open drain but not open source and is called
60 * with LINE_MODE_OPEN_SOURCE as mode argument.
61 * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; 42 * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
62 * implementation may not sleep 43 * implementation may not sleep
63 * @dbg_show: optional routine to show contents in debugfs; default code 44 * @dbg_show: optional routine to show contents in debugfs; default code
@@ -150,13 +131,9 @@ struct gpio_chip {
150 void (*set_multiple)(struct gpio_chip *chip, 131 void (*set_multiple)(struct gpio_chip *chip,
151 unsigned long *mask, 132 unsigned long *mask,
152 unsigned long *bits); 133 unsigned long *bits);
153 int (*set_debounce)(struct gpio_chip *chip, 134 int (*set_config)(struct gpio_chip *chip,
154 unsigned offset, 135 unsigned offset,
155 unsigned debounce); 136 unsigned long config);
156 int (*set_single_ended)(struct gpio_chip *chip,
157 unsigned offset,
158 enum single_ended_mode mode);
159
160 int (*to_irq)(struct gpio_chip *chip, 137 int (*to_irq)(struct gpio_chip *chip,
161 unsigned offset); 138 unsigned offset);
162 139
@@ -274,42 +251,74 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
274 struct irq_chip *irqchip, 251 struct irq_chip *irqchip,
275 int parent_irq); 252 int parent_irq);
276 253
277int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, 254int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
255 struct irq_chip *irqchip,
256 unsigned int first_irq,
257 irq_flow_handler_t handler,
258 unsigned int type,
259 bool nested,
260 struct lock_class_key *lock_key);
261
262#ifdef CONFIG_LOCKDEP
263
264/*
265 * Lockdep requires that each irqchip instance be created with a
266 * unique key so as to avoid unnecessary warnings. This upfront
267 * boilerplate static inlines provides such a key for each
268 * unique instance.
269 */
270static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
271 struct irq_chip *irqchip,
272 unsigned int first_irq,
273 irq_flow_handler_t handler,
274 unsigned int type)
275{
276 static struct lock_class_key key;
277
278 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
279 handler, type, false, &key);
280}
281
282static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
278 struct irq_chip *irqchip, 283 struct irq_chip *irqchip,
279 unsigned int first_irq, 284 unsigned int first_irq,
280 irq_flow_handler_t handler, 285 irq_flow_handler_t handler,
281 unsigned int type, 286 unsigned int type)
282 bool nested, 287{
283 struct lock_class_key *lock_key); 288
289 static struct lock_class_key key;
290
291 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
292 handler, type, true, &key);
293}
294#else
295static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
296 struct irq_chip *irqchip,
297 unsigned int first_irq,
298 irq_flow_handler_t handler,
299 unsigned int type)
300{
301 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
302 handler, type, false, NULL);
303}
284 304
285/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
286static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, 305static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
287 struct irq_chip *irqchip, 306 struct irq_chip *irqchip,
288 unsigned int first_irq, 307 unsigned int first_irq,
289 irq_flow_handler_t handler, 308 irq_flow_handler_t handler,
290 unsigned int type) 309 unsigned int type)
291{ 310{
292 return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq, 311 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
293 handler, type, true, NULL); 312 handler, type, true, NULL);
294} 313}
295 314#endif /* CONFIG_LOCKDEP */
296#ifdef CONFIG_LOCKDEP
297#define gpiochip_irqchip_add(...) \
298( \
299 ({ \
300 static struct lock_class_key _key; \
301 _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
302 }) \
303)
304#else
305#define gpiochip_irqchip_add(...) \
306 _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
307#endif
308 315
309#endif /* CONFIG_GPIOLIB_IRQCHIP */ 316#endif /* CONFIG_GPIOLIB_IRQCHIP */
310 317
311int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); 318int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
312void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); 319void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
320int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
321 unsigned long config);
313 322
314#ifdef CONFIG_PINCTRL 323#ifdef CONFIG_PINCTRL
315 324
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index dd85f3503410..7ef111d3ecc5 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -232,6 +232,7 @@ struct hid_sensor_common {
232 atomic_t data_ready; 232 atomic_t data_ready;
233 atomic_t user_requested_state; 233 atomic_t user_requested_state;
234 struct iio_trigger *trigger; 234 struct iio_trigger *trigger;
235 int timestamp_ns_scale;
235 struct hid_sensor_hub_attribute_info poll; 236 struct hid_sensor_hub_attribute_info poll;
236 struct hid_sensor_hub_attribute_info report_state; 237 struct hid_sensor_hub_attribute_info report_state;
237 struct hid_sensor_hub_attribute_info power_state; 238 struct hid_sensor_hub_attribute_info power_state;
@@ -271,4 +272,7 @@ int hid_sensor_format_scale(u32 usage_id,
271 272
272s32 hid_sensor_read_poll_value(struct hid_sensor_common *st); 273s32 hid_sensor_read_poll_value(struct hid_sensor_common *st);
273 274
275int64_t hid_sensor_convert_timestamp(struct hid_sensor_common *st,
276 int64_t raw_value);
277
274#endif 278#endif
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index f2ee90aed0c2..30c7dc45e45f 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -52,6 +52,9 @@
52#define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458 52#define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458
53#define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459 53#define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459
54 54
55/* Gravity vector */
56#define HID_USAGE_SENSOR_GRAVITY_VECTOR 0x20007B
57
55/* ORIENTATION: Compass 3D: (200083) */ 58/* ORIENTATION: Compass 3D: (200083) */
56#define HID_USAGE_SENSOR_COMPASS_3D 0x200083 59#define HID_USAGE_SENSOR_COMPASS_3D 0x200083
57#define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470 60#define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470
@@ -95,6 +98,7 @@
95#define HID_USAGE_SENSOR_TIME_HOUR 0x200525 98#define HID_USAGE_SENSOR_TIME_HOUR 0x200525
96#define HID_USAGE_SENSOR_TIME_MINUTE 0x200526 99#define HID_USAGE_SENSOR_TIME_MINUTE 0x200526
97#define HID_USAGE_SENSOR_TIME_SECOND 0x200527 100#define HID_USAGE_SENSOR_TIME_SECOND 0x200527
101#define HID_USAGE_SENSOR_TIME_TIMESTAMP 0x200529
98 102
99/* Units */ 103/* Units */
100#define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00 104#define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index cdab81ba29f8..e52b427223ba 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -88,12 +88,6 @@ enum hrtimer_restart {
88 * @base: pointer to the timer base (per cpu and per clock) 88 * @base: pointer to the timer base (per cpu and per clock)
89 * @state: state information (See bit values above) 89 * @state: state information (See bit values above)
90 * @is_rel: Set if the timer was armed relative 90 * @is_rel: Set if the timer was armed relative
91 * @start_pid: timer statistics field to store the pid of the task which
92 * started the timer
93 * @start_site: timer statistics field to store the site where the timer
94 * was started
95 * @start_comm: timer statistics field to store the name of the process which
96 * started the timer
97 * 91 *
98 * The hrtimer structure must be initialized by hrtimer_init() 92 * The hrtimer structure must be initialized by hrtimer_init()
99 */ 93 */
@@ -104,11 +98,6 @@ struct hrtimer {
104 struct hrtimer_clock_base *base; 98 struct hrtimer_clock_base *base;
105 u8 state; 99 u8 state;
106 u8 is_rel; 100 u8 is_rel;
107#ifdef CONFIG_TIMER_STATS
108 int start_pid;
109 void *start_site;
110 char start_comm[16];
111#endif
112}; 101};
113 102
114/** 103/**
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 42fe43fb0c80..62bbf3c1aa4a 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -32,11 +32,10 @@
32#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/timer.h> 34#include <linux/timer.h>
35#include <linux/workqueue.h>
36#include <linux/completion.h> 35#include <linux/completion.h>
37#include <linux/device.h> 36#include <linux/device.h>
38#include <linux/mod_devicetable.h> 37#include <linux/mod_devicetable.h>
39 38#include <linux/interrupt.h>
40 39
41#define MAX_PAGE_BUFFER_COUNT 32 40#define MAX_PAGE_BUFFER_COUNT 32
42#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ 41#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
@@ -128,6 +127,7 @@ struct hv_ring_buffer_info {
128 u32 ring_data_startoffset; 127 u32 ring_data_startoffset;
129 u32 priv_write_index; 128 u32 priv_write_index;
130 u32 priv_read_index; 129 u32 priv_read_index;
130 u32 cached_read_index;
131}; 131};
132 132
133/* 133/*
@@ -138,8 +138,8 @@ struct hv_ring_buffer_info {
138 * for the specified ring buffer 138 * for the specified ring buffer
139 */ 139 */
140static inline void 140static inline void
141hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, 141hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
142 u32 *read, u32 *write) 142 u32 *read, u32 *write)
143{ 143{
144 u32 read_loc, write_loc, dsize; 144 u32 read_loc, write_loc, dsize;
145 145
@@ -153,7 +153,7 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
153 *read = dsize - *write; 153 *read = dsize - *write;
154} 154}
155 155
156static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) 156static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
157{ 157{
158 u32 read_loc, write_loc, dsize, read; 158 u32 read_loc, write_loc, dsize, read;
159 159
@@ -167,7 +167,7 @@ static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
167 return read; 167 return read;
168} 168}
169 169
170static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) 170static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
171{ 171{
172 u32 read_loc, write_loc, dsize, write; 172 u32 read_loc, write_loc, dsize, write;
173 173
@@ -180,6 +180,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
180 return write; 180 return write;
181} 181}
182 182
183static inline u32 hv_get_cached_bytes_to_write(
184 const struct hv_ring_buffer_info *rbi)
185{
186 u32 read_loc, write_loc, dsize, write;
187
188 dsize = rbi->ring_datasize;
189 read_loc = rbi->cached_read_index;
190 write_loc = rbi->ring_buffer->write_index;
191
192 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
193 read_loc - write_loc;
194 return write;
195}
183/* 196/*
184 * VMBUS version is 32 bit entity broken up into 197 * VMBUS version is 32 bit entity broken up into
185 * two 16 bit quantities: major_number. minor_number. 198 * two 16 bit quantities: major_number. minor_number.
@@ -627,6 +640,7 @@ struct vmbus_channel_msginfo {
627 640
628 /* Synchronize the request/response if needed */ 641 /* Synchronize the request/response if needed */
629 struct completion waitevent; 642 struct completion waitevent;
643 struct vmbus_channel *waiting_channel;
630 union { 644 union {
631 struct vmbus_channel_version_supported version_supported; 645 struct vmbus_channel_version_supported version_supported;
632 struct vmbus_channel_open_result open_result; 646 struct vmbus_channel_open_result open_result;
@@ -669,11 +683,6 @@ struct hv_input_signal_event_buffer {
669 struct hv_input_signal_event event; 683 struct hv_input_signal_event event;
670}; 684};
671 685
672enum hv_signal_policy {
673 HV_SIGNAL_POLICY_DEFAULT = 0,
674 HV_SIGNAL_POLICY_EXPLICIT,
675};
676
677enum hv_numa_policy { 686enum hv_numa_policy {
678 HV_BALANCED = 0, 687 HV_BALANCED = 0,
679 HV_LOCALIZED, 688 HV_LOCALIZED,
@@ -733,26 +742,27 @@ struct vmbus_channel {
733 742
734 struct vmbus_close_msg close_msg; 743 struct vmbus_close_msg close_msg;
735 744
736 /* Channel callback are invoked in this workqueue context */ 745 /* Channel callback's invoked in softirq context */
737 /* HANDLE dataWorkQueue; */ 746 struct tasklet_struct callback_event;
738
739 void (*onchannel_callback)(void *context); 747 void (*onchannel_callback)(void *context);
740 void *channel_callback_context; 748 void *channel_callback_context;
741 749
742 /* 750 /*
743 * A channel can be marked for efficient (batched) 751 * A channel can be marked for one of three modes of reading:
744 * reading: 752 * BATCHED - callback called from taslket and should read
745 * If batched_reading is set to "true", we read until the 753 * channel until empty. Interrupts from the host
746 * channel is empty and hold off interrupts from the host 754 * are masked while read is in process (default).
747 * during the entire read process. 755 * DIRECT - callback called from tasklet (softirq).
748 * If batched_reading is set to "false", the client is not 756 * ISR - callback called in interrupt context and must
749 * going to perform batched reading. 757 * invoke its own deferred processing.
750 * 758 * Host interrupts are disabled and must be re-enabled
751 * By default we will enable batched reading; specific 759 * when ring is empty.
752 * drivers that don't want this behavior can turn it off.
753 */ 760 */
754 761 enum hv_callback_mode {
755 bool batched_reading; 762 HV_CALL_BATCHED,
763 HV_CALL_DIRECT,
764 HV_CALL_ISR
765 } callback_mode;
756 766
757 bool is_dedicated_interrupt; 767 bool is_dedicated_interrupt;
758 struct hv_input_signal_event_buffer sig_buf; 768 struct hv_input_signal_event_buffer sig_buf;
@@ -836,23 +846,6 @@ struct vmbus_channel {
836 */ 846 */
837 struct list_head percpu_list; 847 struct list_head percpu_list;
838 /* 848 /*
839 * Host signaling policy: The default policy will be
840 * based on the ring buffer state. We will also support
841 * a policy where the client driver can have explicit
842 * signaling control.
843 */
844 enum hv_signal_policy signal_policy;
845 /*
846 * On the channel send side, many of the VMBUS
847 * device drivers explicity serialize access to the
848 * outgoing ring buffer. Give more control to the
849 * VMBUS device drivers in terms how to serialize
850 * accesss to the outgoing ring buffer.
851 * The default behavior will be to aquire the
852 * ring lock to preserve the current behavior.
853 */
854 bool acquire_ring_lock;
855 /*
856 * For performance critical channels (storage, networking 849 * For performance critical channels (storage, networking
857 * etc,), Hyper-V has a mechanism to enhance the throughput 850 * etc,), Hyper-V has a mechanism to enhance the throughput
858 * at the expense of latency: 851 * at the expense of latency:
@@ -892,32 +885,22 @@ struct vmbus_channel {
892 885
893}; 886};
894 887
895static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
896{
897 c->acquire_ring_lock = state;
898}
899
900static inline bool is_hvsock_channel(const struct vmbus_channel *c) 888static inline bool is_hvsock_channel(const struct vmbus_channel *c)
901{ 889{
902 return !!(c->offermsg.offer.chn_flags & 890 return !!(c->offermsg.offer.chn_flags &
903 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); 891 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
904} 892}
905 893
906static inline void set_channel_signal_state(struct vmbus_channel *c,
907 enum hv_signal_policy policy)
908{
909 c->signal_policy = policy;
910}
911
912static inline void set_channel_affinity_state(struct vmbus_channel *c, 894static inline void set_channel_affinity_state(struct vmbus_channel *c,
913 enum hv_numa_policy policy) 895 enum hv_numa_policy policy)
914{ 896{
915 c->affinity_policy = policy; 897 c->affinity_policy = policy;
916} 898}
917 899
918static inline void set_channel_read_state(struct vmbus_channel *c, bool state) 900static inline void set_channel_read_mode(struct vmbus_channel *c,
901 enum hv_callback_mode mode)
919{ 902{
920 c->batched_reading = state; 903 c->callback_mode = mode;
921} 904}
922 905
923static inline void set_per_channel_state(struct vmbus_channel *c, void *s) 906static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
@@ -1040,8 +1023,7 @@ extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
1040 u32 bufferLen, 1023 u32 bufferLen,
1041 u64 requestid, 1024 u64 requestid,
1042 enum vmbus_packet_type type, 1025 enum vmbus_packet_type type,
1043 u32 flags, 1026 u32 flags);
1044 bool kick_q);
1045 1027
1046extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 1028extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1047 struct hv_page_buffer pagebuffers[], 1029 struct hv_page_buffer pagebuffers[],
@@ -1056,8 +1038,7 @@ extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
1056 void *buffer, 1038 void *buffer,
1057 u32 bufferlen, 1039 u32 bufferlen,
1058 u64 requestid, 1040 u64 requestid,
1059 u32 flags, 1041 u32 flags);
1060 bool kick_q);
1061 1042
1062extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, 1043extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
1063 struct hv_multipage_buffer *mpb, 1044 struct hv_multipage_buffer *mpb,
@@ -1444,9 +1425,10 @@ struct hyperv_service_callback {
1444}; 1425};
1445 1426
1446#define MAX_SRV_VER 0x7ffffff 1427#define MAX_SRV_VER 0x7ffffff
1447extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, 1428extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1448 struct icmsg_negotiate *, u8 *, int, 1429 const int *fw_version, int fw_vercnt,
1449 int); 1430 const int *srv_version, int srv_vercnt,
1431 int *nego_fw_version, int *nego_srv_version);
1450 1432
1451void hv_event_tasklet_disable(struct vmbus_channel *channel); 1433void hv_event_tasklet_disable(struct vmbus_channel *channel);
1452void hv_event_tasklet_enable(struct vmbus_channel *channel); 1434void hv_event_tasklet_enable(struct vmbus_channel *channel);
@@ -1466,9 +1448,9 @@ void vmbus_set_event(struct vmbus_channel *channel);
1466 1448
1467/* Get the start of the ring buffer. */ 1449/* Get the start of the ring buffer. */
1468static inline void * 1450static inline void *
1469hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) 1451hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1470{ 1452{
1471 return (void *)ring_info->ring_buffer->buffer; 1453 return ring_info->ring_buffer->buffer;
1472} 1454}
1473 1455
1474/* 1456/*
@@ -1488,7 +1470,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
1488 1470
1489static inline void hv_signal_on_read(struct vmbus_channel *channel) 1471static inline void hv_signal_on_read(struct vmbus_channel *channel)
1490{ 1472{
1491 u32 cur_write_sz; 1473 u32 cur_write_sz, cached_write_sz;
1492 u32 pending_sz; 1474 u32 pending_sz;
1493 struct hv_ring_buffer_info *rbi = &channel->inbound; 1475 struct hv_ring_buffer_info *rbi = &channel->inbound;
1494 1476
@@ -1512,12 +1494,54 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
1512 1494
1513 cur_write_sz = hv_get_bytes_to_write(rbi); 1495 cur_write_sz = hv_get_bytes_to_write(rbi);
1514 1496
1515 if (cur_write_sz >= pending_sz) 1497 if (cur_write_sz < pending_sz)
1498 return;
1499
1500 cached_write_sz = hv_get_cached_bytes_to_write(rbi);
1501 if (cached_write_sz < pending_sz)
1516 vmbus_setevent(channel); 1502 vmbus_setevent(channel);
1517 1503
1518 return; 1504 return;
1519} 1505}
1520 1506
1507static inline void
1508init_cached_read_index(struct vmbus_channel *channel)
1509{
1510 struct hv_ring_buffer_info *rbi = &channel->inbound;
1511
1512 rbi->cached_read_index = rbi->ring_buffer->read_index;
1513}
1514
1515/*
1516 * Mask off host interrupt callback notifications
1517 */
1518static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1519{
1520 rbi->ring_buffer->interrupt_mask = 1;
1521
1522 /* make sure mask update is not reordered */
1523 virt_mb();
1524}
1525
1526/*
1527 * Re-enable host callback and return number of outstanding bytes
1528 */
1529static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1530{
1531
1532 rbi->ring_buffer->interrupt_mask = 0;
1533
1534 /* make sure mask update is not reordered */
1535 virt_mb();
1536
1537 /*
1538 * Now check to see if the ring buffer is still empty.
1539 * If it is not, we raced and we need to process new
1540 * incoming messages.
1541 */
1542 return hv_get_bytes_to_read(rbi);
1543}
1544
1521/* 1545/*
1522 * An API to support in-place processing of incoming VMBUS packets. 1546 * An API to support in-place processing of incoming VMBUS packets.
1523 */ 1547 */
@@ -1569,6 +1593,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
1569 * This call commits the read index and potentially signals the host. 1593 * This call commits the read index and potentially signals the host.
1570 * Here is the pattern for using the "in-place" consumption APIs: 1594 * Here is the pattern for using the "in-place" consumption APIs:
1571 * 1595 *
1596 * init_cached_read_index();
1597 *
1572 * while (get_next_pkt_raw() { 1598 * while (get_next_pkt_raw() {
1573 * process the packet "in-place"; 1599 * process the packet "in-place";
1574 * put_pkt_raw(); 1600 * put_pkt_raw();
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 4b45ec46161f..7b23a3316dcb 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -51,6 +51,7 @@ enum i2c_slave_event;
51typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); 51typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
52 52
53struct module; 53struct module;
54struct property_entry;
54 55
55#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 56#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
56/* 57/*
@@ -299,6 +300,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
299 * @archdata: copied into i2c_client.dev.archdata 300 * @archdata: copied into i2c_client.dev.archdata
300 * @of_node: pointer to OpenFirmware device node 301 * @of_node: pointer to OpenFirmware device node
301 * @fwnode: device node supplied by the platform firmware 302 * @fwnode: device node supplied by the platform firmware
303 * @properties: additional device properties for the device
302 * @irq: stored in i2c_client.irq 304 * @irq: stored in i2c_client.irq
303 * 305 *
304 * I2C doesn't actually support hardware probing, although controllers and 306 * I2C doesn't actually support hardware probing, although controllers and
@@ -320,6 +322,7 @@ struct i2c_board_info {
320 struct dev_archdata *archdata; 322 struct dev_archdata *archdata;
321 struct device_node *of_node; 323 struct device_node *of_node;
322 struct fwnode_handle *fwnode; 324 struct fwnode_handle *fwnode;
325 const struct property_entry *properties;
323 int irq; 326 int irq;
324}; 327};
325 328
diff --git a/include/linux/ide.h b/include/linux/ide.h
index a633898f36ac..2f51c1724b5a 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -20,6 +20,7 @@
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21/* for request_sense */ 21/* for request_sense */
22#include <linux/cdrom.h> 22#include <linux/cdrom.h>
23#include <scsi/scsi_cmnd.h>
23#include <asm/byteorder.h> 24#include <asm/byteorder.h>
24#include <asm/io.h> 25#include <asm/io.h>
25 26
@@ -39,18 +40,53 @@
39 40
40struct device; 41struct device;
41 42
42/* IDE-specific values for req->cmd_type */ 43/* values for ide_request.type */
43enum ata_cmd_type_bits { 44enum ata_priv_type {
44 REQ_TYPE_ATA_TASKFILE = REQ_TYPE_DRV_PRIV + 1, 45 ATA_PRIV_MISC,
45 REQ_TYPE_ATA_PC, 46 ATA_PRIV_TASKFILE,
46 REQ_TYPE_ATA_SENSE, /* sense request */ 47 ATA_PRIV_PC,
47 REQ_TYPE_ATA_PM_SUSPEND,/* suspend request */ 48 ATA_PRIV_SENSE, /* sense request */
48 REQ_TYPE_ATA_PM_RESUME, /* resume request */ 49 ATA_PRIV_PM_SUSPEND, /* suspend request */
50 ATA_PRIV_PM_RESUME, /* resume request */
49}; 51};
50 52
51#define ata_pm_request(rq) \ 53struct ide_request {
52 ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \ 54 struct scsi_request sreq;
53 (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME) 55 u8 sense[SCSI_SENSE_BUFFERSIZE];
56 u8 type;
57};
58
59static inline struct ide_request *ide_req(struct request *rq)
60{
61 return blk_mq_rq_to_pdu(rq);
62}
63
64static inline bool ata_misc_request(struct request *rq)
65{
66 return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
67}
68
69static inline bool ata_taskfile_request(struct request *rq)
70{
71 return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
72}
73
74static inline bool ata_pc_request(struct request *rq)
75{
76 return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
77}
78
79static inline bool ata_sense_request(struct request *rq)
80{
81 return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
82}
83
84static inline bool ata_pm_request(struct request *rq)
85{
86 return blk_rq_is_private(rq) &&
87 (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
88 ide_req(rq)->type == ATA_PRIV_PM_RESUME);
89}
54 90
55/* Error codes returned in rq->errors to the higher part of the driver. */ 91/* Error codes returned in rq->errors to the higher part of the driver. */
56enum { 92enum {
@@ -579,7 +615,7 @@ struct ide_drive_s {
579 615
580 /* current sense rq and buffer */ 616 /* current sense rq and buffer */
581 bool sense_rq_armed; 617 bool sense_rq_armed;
582 struct request sense_rq; 618 struct request *sense_rq;
583 struct request_sense sense_data; 619 struct request_sense sense_data;
584}; 620};
585 621
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index fe849329511a..0dd9498c694f 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -185,6 +185,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
185 185
186/* number of user priorities 802.11 uses */ 186/* number of user priorities 802.11 uses */
187#define IEEE80211_NUM_UPS 8 187#define IEEE80211_NUM_UPS 8
188/* number of ACs */
189#define IEEE80211_NUM_ACS 4
188 190
189#define IEEE80211_QOS_CTL_LEN 2 191#define IEEE80211_QOS_CTL_LEN 2
190/* 1d tag mask */ 192/* 1d tag mask */
@@ -1041,8 +1043,9 @@ struct ieee80211_mgmt {
1041 } u; 1043 } u;
1042} __packed __aligned(2); 1044} __packed __aligned(2);
1043 1045
1044/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ 1046/* Supported rates membership selectors */
1045#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 1047#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
1048#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
1046 1049
1047/* mgmt header + 1 byte category code */ 1050/* mgmt header + 1 byte category code */
1048#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) 1051#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -2322,31 +2325,33 @@ enum ieee80211_sa_query_action {
2322}; 2325};
2323 2326
2324 2327
2328#define SUITE(oui, id) (((oui) << 8) | (id))
2329
2325/* cipher suite selectors */ 2330/* cipher suite selectors */
2326#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00 2331#define WLAN_CIPHER_SUITE_USE_GROUP SUITE(0x000FAC, 0)
2327#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01 2332#define WLAN_CIPHER_SUITE_WEP40 SUITE(0x000FAC, 1)
2328#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02 2333#define WLAN_CIPHER_SUITE_TKIP SUITE(0x000FAC, 2)
2329/* reserved: 0x000FAC03 */ 2334/* reserved: SUITE(0x000FAC, 3) */
2330#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04 2335#define WLAN_CIPHER_SUITE_CCMP SUITE(0x000FAC, 4)
2331#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 2336#define WLAN_CIPHER_SUITE_WEP104 SUITE(0x000FAC, 5)
2332#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 2337#define WLAN_CIPHER_SUITE_AES_CMAC SUITE(0x000FAC, 6)
2333#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08 2338#define WLAN_CIPHER_SUITE_GCMP SUITE(0x000FAC, 8)
2334#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09 2339#define WLAN_CIPHER_SUITE_GCMP_256 SUITE(0x000FAC, 9)
2335#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A 2340#define WLAN_CIPHER_SUITE_CCMP_256 SUITE(0x000FAC, 10)
2336#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B 2341#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11)
2337#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C 2342#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12)
2338#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D 2343#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13)
2339 2344
2340#define WLAN_CIPHER_SUITE_SMS4 0x00147201 2345#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1)
2341 2346
2342/* AKM suite selectors */ 2347/* AKM suite selectors */
2343#define WLAN_AKM_SUITE_8021X 0x000FAC01 2348#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
2344#define WLAN_AKM_SUITE_PSK 0x000FAC02 2349#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
2345#define WLAN_AKM_SUITE_8021X_SHA256 0x000FAC05 2350#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
2346#define WLAN_AKM_SUITE_PSK_SHA256 0x000FAC06 2351#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
2347#define WLAN_AKM_SUITE_TDLS 0x000FAC07 2352#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
2348#define WLAN_AKM_SUITE_SAE 0x000FAC08 2353#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
2349#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09 2354#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
2350 2355
2351#define WLAN_MAX_KEY_LEN 32 2356#define WLAN_MAX_KEY_LEN 32
2352 2357
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c6587c01d951..c5847dc75a93 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,6 +46,8 @@ struct br_ip_list {
46#define BR_LEARNING_SYNC BIT(9) 46#define BR_LEARNING_SYNC BIT(9)
47#define BR_PROXYARP_WIFI BIT(10) 47#define BR_PROXYARP_WIFI BIT(10)
48#define BR_MCAST_FLOOD BIT(11) 48#define BR_MCAST_FLOOD BIT(11)
49#define BR_MULTICAST_TO_UNICAST BIT(12)
50#define BR_VLAN_TUNNEL BIT(13)
49 51
50#define BR_DEFAULT_AGEING_TIME (300 * HZ) 52#define BR_DEFAULT_AGEING_TIME (300 * HZ)
51 53
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 4316aa173dde..46df7e565d6f 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -66,8 +66,6 @@ struct dlci_local
66 66
67struct frad_local 67struct frad_local
68{ 68{
69 struct net_device_stats stats;
70
71 /* devices which this FRAD is slaved to */ 69 /* devices which this FRAD is slaved to */
72 struct net_device *master[CONFIG_DLCI_MAX]; 70 struct net_device *master[CONFIG_DLCI_MAX];
73 short dlci[CONFIG_DLCI_MAX]; 71 short dlci[CONFIG_DLCI_MAX];
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index a4ccc3122f93..c9ec1343d187 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -9,19 +9,6 @@
9#include <net/netlink.h> 9#include <net/netlink.h>
10#include <linux/u64_stats_sync.h> 10#include <linux/u64_stats_sync.h>
11 11
12#if IS_ENABLED(CONFIG_MACVTAP)
13struct socket *macvtap_get_socket(struct file *);
14#else
15#include <linux/err.h>
16#include <linux/errno.h>
17struct file;
18struct socket;
19static inline struct socket *macvtap_get_socket(struct file *f)
20{
21 return ERR_PTR(-EINVAL);
22}
23#endif /* CONFIG_MACVTAP */
24
25struct macvlan_port; 12struct macvlan_port;
26struct macvtap_queue; 13struct macvtap_queue;
27 14
@@ -29,7 +16,7 @@ struct macvtap_queue;
29 * Maximum times a macvtap device can be opened. This can be used to 16 * Maximum times a macvtap device can be opened. This can be used to
30 * configure the number of receive queue, e.g. for multiqueue virtio. 17 * configure the number of receive queue, e.g. for multiqueue virtio.
31 */ 18 */
32#define MAX_MACVTAP_QUEUES 256 19#define MAX_TAP_QUEUES 256
33 20
34#define MACVLAN_MC_FILTER_BITS 8 21#define MACVLAN_MC_FILTER_BITS 8
35#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) 22#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
@@ -49,7 +36,7 @@ struct macvlan_dev {
49 enum macvlan_mode mode; 36 enum macvlan_mode mode;
50 u16 flags; 37 u16 flags;
51 /* This array tracks active taps. */ 38 /* This array tracks active taps. */
52 struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES]; 39 struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
53 /* This list tracks all taps (both enabled and disabled) */ 40 /* This list tracks all taps (both enabled and disabled) */
54 struct list_head queue_list; 41 struct list_head queue_list;
55 int numvtaps; 42 int numvtaps;
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
new file mode 100644
index 000000000000..3482c3c2037d
--- /dev/null
+++ b/include/linux/if_tap.h
@@ -0,0 +1,75 @@
1#ifndef _LINUX_IF_TAP_H_
2#define _LINUX_IF_TAP_H_
3
4#if IS_ENABLED(CONFIG_TAP)
5struct socket *tap_get_socket(struct file *);
6#else
7#include <linux/err.h>
8#include <linux/errno.h>
9struct file;
10struct socket;
11static inline struct socket *tap_get_socket(struct file *f)
12{
13 return ERR_PTR(-EINVAL);
14}
15#endif /* CONFIG_TAP */
16
17#include <net/sock.h>
18#include <linux/skb_array.h>
19
20#define MAX_TAP_QUEUES 256
21
22struct tap_queue;
23
24struct tap_dev {
25 struct net_device *dev;
26 u16 flags;
27 /* This array tracks active taps. */
28 struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
29 /* This list tracks all taps (both enabled and disabled) */
30 struct list_head queue_list;
31 int numvtaps;
32 int numqueues;
33 netdev_features_t tap_features;
34 int minor;
35
36 void (*update_features)(struct tap_dev *tap, netdev_features_t features);
37 void (*count_tx_dropped)(struct tap_dev *tap);
38 void (*count_rx_dropped)(struct tap_dev *tap);
39};
40
41/*
42 * A tap queue is the central object of tap module, it connects
43 * an open character device to virtual interface. There can be
44 * multiple queues on one interface, which map back to queues
45 * implemented in hardware on the underlying device.
46 *
47 * tap_proto is used to allocate queues through the sock allocation
48 * mechanism.
49 *
50 */
51
52struct tap_queue {
53 struct sock sk;
54 struct socket sock;
55 struct socket_wq wq;
56 int vnet_hdr_sz;
57 struct tap_dev __rcu *tap;
58 struct file *file;
59 unsigned int flags;
60 u16 queue_index;
61 bool enabled;
62 struct list_head next;
63 struct skb_array skb_array;
64};
65
66rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
67void tap_del_queues(struct tap_dev *tap);
68int tap_get_minor(dev_t major, struct tap_dev *tap);
69void tap_free_minor(dev_t major, struct tap_dev *tap);
70int tap_queue_resize(struct tap_dev *tap);
71int tap_create_cdev(struct cdev *tap_cdev,
72 dev_t *tap_major, const char *device_name);
73void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
74
75#endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 70a5164f4728..48767c776119 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -11,139 +11,15 @@
11#define _IIO_BUFFER_GENERIC_H_ 11#define _IIO_BUFFER_GENERIC_H_
12#include <linux/sysfs.h> 12#include <linux/sysfs.h>
13#include <linux/iio/iio.h> 13#include <linux/iio/iio.h>
14#include <linux/kref.h>
15
16#ifdef CONFIG_IIO_BUFFER
17 14
18struct iio_buffer; 15struct iio_buffer;
19 16
20/** 17void iio_buffer_set_attrs(struct iio_buffer *buffer,
21 * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be 18 const struct attribute **attrs);
22 * configured. It has a fixed value which will be buffer specific.
23 */
24#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0)
25
26/**
27 * struct iio_buffer_access_funcs - access functions for buffers.
28 * @store_to: actually store stuff to the buffer
29 * @read_first_n: try to get a specified number of bytes (must exist)
30 * @data_available: indicates how much data is available for reading from
31 * the buffer.
32 * @request_update: if a parameter change has been marked, update underlying
33 * storage.
34 * @set_bytes_per_datum:set number of bytes per datum
35 * @set_length: set number of datums in buffer
36 * @enable: called if the buffer is attached to a device and the
37 * device starts sampling. Calls are balanced with
38 * @disable.
39 * @disable: called if the buffer is attached to a device and the
40 * device stops sampling. Calles are balanced with @enable.
41 * @release: called when the last reference to the buffer is dropped,
42 * should free all resources allocated by the buffer.
43 * @modes: Supported operating modes by this buffer type
44 * @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
45 *
46 * The purpose of this structure is to make the buffer element
47 * modular as event for a given driver, different usecases may require
48 * different buffer designs (space efficiency vs speed for example).
49 *
50 * It is worth noting that a given buffer implementation may only support a
51 * small proportion of these functions. The core code 'should' cope fine with
52 * any of them not existing.
53 **/
54struct iio_buffer_access_funcs {
55 int (*store_to)(struct iio_buffer *buffer, const void *data);
56 int (*read_first_n)(struct iio_buffer *buffer,
57 size_t n,
58 char __user *buf);
59 size_t (*data_available)(struct iio_buffer *buffer);
60
61 int (*request_update)(struct iio_buffer *buffer);
62
63 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
64 int (*set_length)(struct iio_buffer *buffer, int length);
65
66 int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
67 int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
68
69 void (*release)(struct iio_buffer *buffer);
70
71 unsigned int modes;
72 unsigned int flags;
73};
74
75/**
76 * struct iio_buffer - general buffer structure
77 * @length: [DEVICE] number of datums in buffer
78 * @bytes_per_datum: [DEVICE] size of individual datum including timestamp
79 * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
80 * control method is used
81 * @scan_mask: [INTERN] bitmask used in masking scan mode elements
82 * @scan_timestamp: [INTERN] does the scan mode include a timestamp
83 * @access: [DRIVER] buffer access functions associated with the
84 * implementation.
85 * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
86 * @buffer_group: [INTERN] attributes of the buffer group
87 * @scan_el_group: [DRIVER] attribute group for those attributes not
88 * created from the iio_chan_info array.
89 * @pollq: [INTERN] wait queue to allow for polling on the buffer.
90 * @stufftoread: [INTERN] flag to indicate new data.
91 * @attrs: [INTERN] standard attributes of the buffer
92 * @demux_list: [INTERN] list of operations required to demux the scan.
93 * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
94 * @buffer_list: [INTERN] entry in the devices list of current buffers.
95 * @ref: [INTERN] reference count of the buffer.
96 * @watermark: [INTERN] number of datums to wait for poll/read.
97 */
98struct iio_buffer {
99 int length;
100 int bytes_per_datum;
101 struct attribute_group *scan_el_attrs;
102 long *scan_mask;
103 bool scan_timestamp;
104 const struct iio_buffer_access_funcs *access;
105 struct list_head scan_el_dev_attr_list;
106 struct attribute_group buffer_group;
107 struct attribute_group scan_el_group;
108 wait_queue_head_t pollq;
109 bool stufftoread;
110 const struct attribute **attrs;
111 struct list_head demux_list;
112 void *demux_bounce;
113 struct list_head buffer_list;
114 struct kref ref;
115 unsigned int watermark;
116};
117
118/**
119 * iio_update_buffers() - add or remove buffer from active list
120 * @indio_dev: device to add buffer to
121 * @insert_buffer: buffer to insert
122 * @remove_buffer: buffer_to_remove
123 *
124 * Note this will tear down the all buffering and build it up again
125 */
126int iio_update_buffers(struct iio_dev *indio_dev,
127 struct iio_buffer *insert_buffer,
128 struct iio_buffer *remove_buffer);
129
130/**
131 * iio_buffer_init() - Initialize the buffer structure
132 * @buffer: buffer to be initialized
133 **/
134void iio_buffer_init(struct iio_buffer *buffer);
135 19
136int iio_scan_mask_query(struct iio_dev *indio_dev,
137 struct iio_buffer *buffer, int bit);
138
139/**
140 * iio_push_to_buffers() - push to a registered buffer.
141 * @indio_dev: iio_dev structure for device.
142 * @data: Full scan.
143 */
144int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data); 20int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
145 21
146/* 22/**
147 * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers 23 * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
148 * @indio_dev: iio_dev structure for device. 24 * @indio_dev: iio_dev structure for device.
149 * @data: sample data 25 * @data: sample data
@@ -168,34 +44,10 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
168 return iio_push_to_buffers(indio_dev, data); 44 return iio_push_to_buffers(indio_dev, data);
169} 45}
170 46
171int iio_update_demux(struct iio_dev *indio_dev);
172
173bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 47bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
174 const unsigned long *mask); 48 const unsigned long *mask);
175
176struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
177void iio_buffer_put(struct iio_buffer *buffer);
178
179/**
180 * iio_device_attach_buffer - Attach a buffer to a IIO device
181 * @indio_dev: The device the buffer should be attached to
182 * @buffer: The buffer to attach to the device
183 *
184 * This function attaches a buffer to a IIO device. The buffer stays attached to
185 * the device until the device is freed. The function should only be called at
186 * most once per device.
187 */
188static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
189 struct iio_buffer *buffer)
190{
191 indio_dev->buffer = iio_buffer_get(buffer);
192}
193
194#else /* CONFIG_IIO_BUFFER */
195
196static inline void iio_buffer_get(struct iio_buffer *buffer) {}
197static inline void iio_buffer_put(struct iio_buffer *buffer) {}
198 49
199#endif /* CONFIG_IIO_BUFFER */ 50void iio_device_attach_buffer(struct iio_dev *indio_dev,
51 struct iio_buffer *buffer);
200 52
201#endif /* _IIO_BUFFER_GENERIC_H_ */ 53#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
new file mode 100644
index 000000000000..8daba198fafa
--- /dev/null
+++ b/include/linux/iio/buffer_impl.h
@@ -0,0 +1,162 @@
1#ifndef _IIO_BUFFER_GENERIC_IMPL_H_
2#define _IIO_BUFFER_GENERIC_IMPL_H_
3#include <linux/sysfs.h>
4#include <linux/kref.h>
5
6#ifdef CONFIG_IIO_BUFFER
7
8struct iio_dev;
9struct iio_buffer;
10
11/**
12 * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
13 * configured. It has a fixed value which will be buffer specific.
14 */
15#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0)
16
17/**
18 * struct iio_buffer_access_funcs - access functions for buffers.
19 * @store_to: actually store stuff to the buffer
20 * @read_first_n: try to get a specified number of bytes (must exist)
21 * @data_available: indicates how much data is available for reading from
22 * the buffer.
23 * @request_update: if a parameter change has been marked, update underlying
24 * storage.
25 * @set_bytes_per_datum:set number of bytes per datum
26 * @set_length: set number of datums in buffer
27 * @enable: called if the buffer is attached to a device and the
28 * device starts sampling. Calls are balanced with
29 * @disable.
30 * @disable: called if the buffer is attached to a device and the
31 * device stops sampling. Calles are balanced with @enable.
32 * @release: called when the last reference to the buffer is dropped,
33 * should free all resources allocated by the buffer.
34 * @modes: Supported operating modes by this buffer type
35 * @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
36 *
37 * The purpose of this structure is to make the buffer element
38 * modular as event for a given driver, different usecases may require
39 * different buffer designs (space efficiency vs speed for example).
40 *
41 * It is worth noting that a given buffer implementation may only support a
42 * small proportion of these functions. The core code 'should' cope fine with
43 * any of them not existing.
44 **/
45struct iio_buffer_access_funcs {
46 int (*store_to)(struct iio_buffer *buffer, const void *data);
47 int (*read_first_n)(struct iio_buffer *buffer,
48 size_t n,
49 char __user *buf);
50 size_t (*data_available)(struct iio_buffer *buffer);
51
52 int (*request_update)(struct iio_buffer *buffer);
53
54 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
55 int (*set_length)(struct iio_buffer *buffer, int length);
56
57 int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
58 int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
59
60 void (*release)(struct iio_buffer *buffer);
61
62 unsigned int modes;
63 unsigned int flags;
64};
65
66/**
67 * struct iio_buffer - general buffer structure
68 *
69 * Note that the internals of this structure should only be of interest to
70 * those writing new buffer implementations.
71 */
72struct iio_buffer {
73 /** @length: Number of datums in buffer. */
74 int length;
75
76 /** @bytes_per_datum: Size of individual datum including timestamp. */
77 int bytes_per_datum;
78
79 /**
80 * @access: Buffer access functions associated with the
81 * implementation.
82 */
83 const struct iio_buffer_access_funcs *access;
84
85 /** @scan_mask: Bitmask used in masking scan mode elements. */
86 long *scan_mask;
87
88 /** @demux_list: List of operations required to demux the scan. */
89 struct list_head demux_list;
90
91 /** @pollq: Wait queue to allow for polling on the buffer. */
92 wait_queue_head_t pollq;
93
94 /** @watermark: Number of datums to wait for poll/read. */
95 unsigned int watermark;
96
97 /* private: */
98 /*
99 * @scan_el_attrs: Control of scan elements if that scan mode
100 * control method is used.
101 */
102 struct attribute_group *scan_el_attrs;
103
104 /* @scan_timestamp: Does the scan mode include a timestamp. */
105 bool scan_timestamp;
106
107 /* @scan_el_dev_attr_list: List of scan element related attributes. */
108 struct list_head scan_el_dev_attr_list;
109
110 /* @buffer_group: Attributes of the buffer group. */
111 struct attribute_group buffer_group;
112
113 /*
114 * @scan_el_group: Attribute group for those attributes not
115 * created from the iio_chan_info array.
116 */
117 struct attribute_group scan_el_group;
118
119 /* @stufftoread: Flag to indicate new data. */
120 bool stufftoread;
121
122 /* @attrs: Standard attributes of the buffer. */
123 const struct attribute **attrs;
124
125 /* @demux_bounce: Buffer for doing gather from incoming scan. */
126 void *demux_bounce;
127
128 /* @buffer_list: Entry in the devices list of current buffers. */
129 struct list_head buffer_list;
130
131 /* @ref: Reference count of the buffer. */
132 struct kref ref;
133};
134
135/**
136 * iio_update_buffers() - add or remove buffer from active list
137 * @indio_dev: device to add buffer to
138 * @insert_buffer: buffer to insert
139 * @remove_buffer: buffer_to_remove
140 *
141 * Note this will tear down the all buffering and build it up again
142 */
143int iio_update_buffers(struct iio_dev *indio_dev,
144 struct iio_buffer *insert_buffer,
145 struct iio_buffer *remove_buffer);
146
147/**
148 * iio_buffer_init() - Initialize the buffer structure
149 * @buffer: buffer to be initialized
150 **/
151void iio_buffer_init(struct iio_buffer *buffer);
152
153struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
154void iio_buffer_put(struct iio_buffer *buffer);
155
156#else /* CONFIG_IIO_BUFFER */
157
158static inline void iio_buffer_get(struct iio_buffer *buffer) {}
159static inline void iio_buffer_put(struct iio_buffer *buffer) {}
160
161#endif /* CONFIG_IIO_BUFFER */
162#endif /* _IIO_BUFFER_GENERIC_IMPL_H_ */
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
index 1796af093368..254de3c7dde8 100644
--- a/include/linux/iio/common/st_sensors_i2c.h
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -28,4 +28,13 @@ static inline void st_sensors_of_i2c_probe(struct i2c_client *client,
28} 28}
29#endif 29#endif
30 30
31#ifdef CONFIG_ACPI
32int st_sensors_match_acpi_device(struct device *dev);
33#else
34static inline int st_sensors_match_acpi_device(struct device *dev)
35{
36 return -ENODEV;
37}
38#endif
39
31#endif /* ST_SENSORS_I2C_H */ 40#endif /* ST_SENSORS_I2C_H */
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 1683bc710d14..027cfa9c3703 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -1,9 +1,8 @@
1#ifndef __LINUX_IIO_KFIFO_BUF_H__ 1#ifndef __LINUX_IIO_KFIFO_BUF_H__
2#define __LINUX_IIO_KFIFO_BUF_H__ 2#define __LINUX_IIO_KFIFO_BUF_H__
3 3
4#include <linux/kfifo.h> 4struct iio_buffer;
5#include <linux/iio/iio.h> 5struct device;
6#include <linux/iio/buffer.h>
7 6
8struct iio_buffer *iio_kfifo_allocate(void); 7struct iio_buffer *iio_kfifo_allocate(void);
9void iio_kfifo_free(struct iio_buffer *r); 8void iio_kfifo_free(struct iio_buffer *r);
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
new file mode 100644
index 000000000000..55535aef2e6c
--- /dev/null
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright (C) STMicroelectronics 2016
3 *
4 * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
5 *
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef _STM32_TIMER_TRIGGER_H_
10#define _STM32_TIMER_TRIGGER_H_
11
12#define TIM1_TRGO "tim1_trgo"
13#define TIM1_CH1 "tim1_ch1"
14#define TIM1_CH2 "tim1_ch2"
15#define TIM1_CH3 "tim1_ch3"
16#define TIM1_CH4 "tim1_ch4"
17
18#define TIM2_TRGO "tim2_trgo"
19#define TIM2_CH1 "tim2_ch1"
20#define TIM2_CH2 "tim2_ch2"
21#define TIM2_CH3 "tim2_ch3"
22#define TIM2_CH4 "tim2_ch4"
23
24#define TIM3_TRGO "tim3_trgo"
25#define TIM3_CH1 "tim3_ch1"
26#define TIM3_CH2 "tim3_ch2"
27#define TIM3_CH3 "tim3_ch3"
28#define TIM3_CH4 "tim3_ch4"
29
30#define TIM4_TRGO "tim4_trgo"
31#define TIM4_CH1 "tim4_ch1"
32#define TIM4_CH2 "tim4_ch2"
33#define TIM4_CH3 "tim4_ch3"
34#define TIM4_CH4 "tim4_ch4"
35
36#define TIM5_TRGO "tim5_trgo"
37#define TIM5_CH1 "tim5_ch1"
38#define TIM5_CH2 "tim5_ch2"
39#define TIM5_CH3 "tim5_ch3"
40#define TIM5_CH4 "tim5_ch4"
41
42#define TIM6_TRGO "tim6_trgo"
43
44#define TIM7_TRGO "tim7_trgo"
45
46#define TIM8_TRGO "tim8_trgo"
47#define TIM8_CH1 "tim8_ch1"
48#define TIM8_CH2 "tim8_ch2"
49#define TIM8_CH3 "tim8_ch3"
50#define TIM8_CH4 "tim8_ch4"
51
52#define TIM9_TRGO "tim9_trgo"
53#define TIM9_CH1 "tim9_ch1"
54#define TIM9_CH2 "tim9_ch2"
55
56#define TIM12_TRGO "tim12_trgo"
57#define TIM12_CH1 "tim12_ch1"
58#define TIM12_CH2 "tim12_ch2"
59
60bool is_stm32_timer_trigger(struct iio_trigger *trig);
61
62#endif
diff --git a/include/linux/init.h b/include/linux/init.h
index 885c3e6d0f9d..79af0962fd52 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -126,10 +126,10 @@ void prepare_namespace(void);
126void __init load_default_modules(void); 126void __init load_default_modules(void);
127int __init init_rootfs(void); 127int __init init_rootfs(void);
128 128
129#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX) 129#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
130extern bool rodata_enabled; 130extern bool rodata_enabled;
131#endif 131#endif
132#ifdef CONFIG_DEBUG_RODATA 132#ifdef CONFIG_STRICT_KERNEL_RWX
133void mark_rodata_ro(void); 133void mark_rodata_ro(void);
134#endif 134#endif
135 135
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 325f649d77ff..3a85d61f7614 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -42,6 +42,27 @@ extern struct fs_struct init_fs;
42#define INIT_PREV_CPUTIME(x) 42#define INIT_PREV_CPUTIME(x)
43#endif 43#endif
44 44
45#ifdef CONFIG_POSIX_TIMERS
46#define INIT_POSIX_TIMERS(s) \
47 .posix_timers = LIST_HEAD_INIT(s.posix_timers),
48#define INIT_CPU_TIMERS(s) \
49 .cpu_timers = { \
50 LIST_HEAD_INIT(s.cpu_timers[0]), \
51 LIST_HEAD_INIT(s.cpu_timers[1]), \
52 LIST_HEAD_INIT(s.cpu_timers[2]), \
53 },
54#define INIT_CPUTIMER(s) \
55 .cputimer = { \
56 .cputime_atomic = INIT_CPUTIME_ATOMIC, \
57 .running = false, \
58 .checking_timer = false, \
59 },
60#else
61#define INIT_POSIX_TIMERS(s)
62#define INIT_CPU_TIMERS(s)
63#define INIT_CPUTIMER(s)
64#endif
65
45#define INIT_SIGNALS(sig) { \ 66#define INIT_SIGNALS(sig) { \
46 .nr_threads = 1, \ 67 .nr_threads = 1, \
47 .thread_head = LIST_HEAD_INIT(init_task.thread_node), \ 68 .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
@@ -49,14 +70,10 @@ extern struct fs_struct init_fs;
49 .shared_pending = { \ 70 .shared_pending = { \
50 .list = LIST_HEAD_INIT(sig.shared_pending.list), \ 71 .list = LIST_HEAD_INIT(sig.shared_pending.list), \
51 .signal = {{0}}}, \ 72 .signal = {{0}}}, \
52 .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ 73 INIT_POSIX_TIMERS(sig) \
53 .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ 74 INIT_CPU_TIMERS(sig) \
54 .rlim = INIT_RLIMITS, \ 75 .rlim = INIT_RLIMITS, \
55 .cputimer = { \ 76 INIT_CPUTIMER(sig) \
56 .cputime_atomic = INIT_CPUTIME_ATOMIC, \
57 .running = false, \
58 .checking_timer = false, \
59 }, \
60 INIT_PREV_CPUTIME(sig) \ 77 INIT_PREV_CPUTIME(sig) \
61 .cred_guard_mutex = \ 78 .cred_guard_mutex = \
62 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 79 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
@@ -247,7 +264,7 @@ extern struct task_group root_task_group;
247 .blocked = {{0}}, \ 264 .blocked = {{0}}, \
248 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ 265 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
249 .journal_info = NULL, \ 266 .journal_info = NULL, \
250 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 267 INIT_CPU_TIMERS(tsk) \
251 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 268 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
252 .timer_slack_ns = 50000, /* 50 usec default slack */ \ 269 .timer_slack_ns = 50000, /* 50 usec default slack */ \
253 .pids = { \ 270 .pids = { \
@@ -274,13 +291,6 @@ extern struct task_group root_task_group;
274} 291}
275 292
276 293
277#define INIT_CPU_TIMERS(cpu_timers) \
278{ \
279 LIST_HEAD_INIT(cpu_timers[0]), \
280 LIST_HEAD_INIT(cpu_timers[1]), \
281 LIST_HEAD_INIT(cpu_timers[2]), \
282}
283
284/* Attach to the init_task data structure for proper alignment */ 294/* Attach to the init_task data structure for proper alignment */
285#define __init_task_data __attribute__((__section__(".data..init_task"))) 295#define __init_task_data __attribute__((__section__(".data..init_task")))
286 296
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d49e26c6cdc7..c573a52ae440 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -29,6 +29,7 @@
29#include <linux/dma_remapping.h> 29#include <linux/dma_remapping.h>
30#include <linux/mmu_notifier.h> 30#include <linux/mmu_notifier.h>
31#include <linux/list.h> 31#include <linux/list.h>
32#include <linux/iommu.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/iommu.h> 34#include <asm/iommu.h>
34 35
@@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
153#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) 154#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
154#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) 155#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
155#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) 156#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
156#define DMA_TLB_IIRG(type) ((type >> 60) & 7) 157#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
157#define DMA_TLB_IAIG(val) (((val) >> 57) & 7) 158#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
158#define DMA_TLB_READ_DRAIN (((u64)1) << 49) 159#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
159#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) 160#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
160#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) 161#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
@@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
164 165
165/* INVALID_DESC */ 166/* INVALID_DESC */
166#define DMA_CCMD_INVL_GRANU_OFFSET 61 167#define DMA_CCMD_INVL_GRANU_OFFSET 61
167#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) 168#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
168#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) 169#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
169#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) 170#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
170#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) 171#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
171#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) 172#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
172#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) 173#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
@@ -316,8 +317,8 @@ enum {
316#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) 317#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
317#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) 318#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
318#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) 319#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
319#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) 320#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
320#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) 321#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
321#define QI_DEV_EIOTLB_MAX_INVS 32 322#define QI_DEV_EIOTLB_MAX_INVS 32
322 323
323#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) 324#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
@@ -439,7 +440,7 @@ struct intel_iommu {
439 struct irq_domain *ir_domain; 440 struct irq_domain *ir_domain;
440 struct irq_domain *ir_msi_domain; 441 struct irq_domain *ir_msi_domain;
441#endif 442#endif
442 struct device *iommu_dev; /* IOMMU-sysfs device */ 443 struct iommu_device iommu; /* IOMMU core code handle */
443 int node; 444 int node;
444 u32 flags; /* Software defined flags */ 445 u32 flags; /* Software defined flags */
445}; 446};
diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h
deleted file mode 100644
index 920109a29191..000000000000
--- a/include/linux/intel_pmic_gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
1#ifndef LINUX_INTEL_PMIC_H
2#define LINUX_INTEL_PMIC_H
3
4struct intel_pmic_gpio_platform_data {
5 /* the first IRQ of the chip */
6 unsigned irq_base;
7 /* number assigned to the first GPIO */
8 unsigned gpio_base;
9 /* sram address for gpiointr register, the langwell chip will map
10 * the PMIC spi GPIO expander's GPIOINTR register in sram.
11 */
12 unsigned gpiointr;
13};
14
15#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0ff5111f6959..6a6de187ddc0 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -31,6 +31,13 @@
31#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 31#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
32#define IOMMU_NOEXEC (1 << 3) 32#define IOMMU_NOEXEC (1 << 3)
33#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 33#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
34/*
35 * This is to make the IOMMU API setup privileged
36 * mapppings accessible by the master only at higher
37 * privileged execution level and inaccessible at
38 * less privileged levels.
39 */
40#define IOMMU_PRIV (1 << 5)
34 41
35struct iommu_ops; 42struct iommu_ops;
36struct iommu_group; 43struct iommu_group;
@@ -117,18 +124,25 @@ enum iommu_attr {
117 DOMAIN_ATTR_MAX, 124 DOMAIN_ATTR_MAX,
118}; 125};
119 126
127/* These are the possible reserved region types */
128#define IOMMU_RESV_DIRECT (1 << 0)
129#define IOMMU_RESV_RESERVED (1 << 1)
130#define IOMMU_RESV_MSI (1 << 2)
131
120/** 132/**
121 * struct iommu_dm_region - descriptor for a direct mapped memory region 133 * struct iommu_resv_region - descriptor for a reserved memory region
122 * @list: Linked list pointers 134 * @list: Linked list pointers
123 * @start: System physical start address of the region 135 * @start: System physical start address of the region
124 * @length: Length of the region in bytes 136 * @length: Length of the region in bytes
125 * @prot: IOMMU Protection flags (READ/WRITE/...) 137 * @prot: IOMMU Protection flags (READ/WRITE/...)
138 * @type: Type of the reserved region
126 */ 139 */
127struct iommu_dm_region { 140struct iommu_resv_region {
128 struct list_head list; 141 struct list_head list;
129 phys_addr_t start; 142 phys_addr_t start;
130 size_t length; 143 size_t length;
131 int prot; 144 int prot;
145 int type;
132}; 146};
133 147
134#ifdef CONFIG_IOMMU_API 148#ifdef CONFIG_IOMMU_API
@@ -150,9 +164,9 @@ struct iommu_dm_region {
150 * @device_group: find iommu group for a particular device 164 * @device_group: find iommu group for a particular device
151 * @domain_get_attr: Query domain attributes 165 * @domain_get_attr: Query domain attributes
152 * @domain_set_attr: Change domain attributes 166 * @domain_set_attr: Change domain attributes
153 * @get_dm_regions: Request list of direct mapping requirements for a device 167 * @get_resv_regions: Request list of reserved regions for a device
154 * @put_dm_regions: Free list of direct mapping requirements for a device 168 * @put_resv_regions: Free list of reserved regions for a device
155 * @apply_dm_region: Temporary helper call-back for iova reserved ranges 169 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
156 * @domain_window_enable: Configure and enable a particular window for a domain 170 * @domain_window_enable: Configure and enable a particular window for a domain
157 * @domain_window_disable: Disable a particular window for a domain 171 * @domain_window_disable: Disable a particular window for a domain
158 * @domain_set_windows: Set the number of windows for a domain 172 * @domain_set_windows: Set the number of windows for a domain
@@ -184,11 +198,12 @@ struct iommu_ops {
184 int (*domain_set_attr)(struct iommu_domain *domain, 198 int (*domain_set_attr)(struct iommu_domain *domain,
185 enum iommu_attr attr, void *data); 199 enum iommu_attr attr, void *data);
186 200
187 /* Request/Free a list of direct mapping requirements for a device */ 201 /* Request/Free a list of reserved regions for a device */
188 void (*get_dm_regions)(struct device *dev, struct list_head *list); 202 void (*get_resv_regions)(struct device *dev, struct list_head *list);
189 void (*put_dm_regions)(struct device *dev, struct list_head *list); 203 void (*put_resv_regions)(struct device *dev, struct list_head *list);
190 void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, 204 void (*apply_resv_region)(struct device *dev,
191 struct iommu_dm_region *region); 205 struct iommu_domain *domain,
206 struct iommu_resv_region *region);
192 207
193 /* Window handling functions */ 208 /* Window handling functions */
194 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, 209 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
@@ -204,6 +219,42 @@ struct iommu_ops {
204 unsigned long pgsize_bitmap; 219 unsigned long pgsize_bitmap;
205}; 220};
206 221
222/**
223 * struct iommu_device - IOMMU core representation of one IOMMU hardware
224 * instance
225 * @list: Used by the iommu-core to keep a list of registered iommus
226 * @ops: iommu-ops for talking to this iommu
227 * @dev: struct device for sysfs handling
228 */
229struct iommu_device {
230 struct list_head list;
231 const struct iommu_ops *ops;
232 struct fwnode_handle *fwnode;
233 struct device dev;
234};
235
236int iommu_device_register(struct iommu_device *iommu);
237void iommu_device_unregister(struct iommu_device *iommu);
238int iommu_device_sysfs_add(struct iommu_device *iommu,
239 struct device *parent,
240 const struct attribute_group **groups,
241 const char *fmt, ...) __printf(4, 5);
242void iommu_device_sysfs_remove(struct iommu_device *iommu);
243int iommu_device_link(struct iommu_device *iommu, struct device *link);
244void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
245
246static inline void iommu_device_set_ops(struct iommu_device *iommu,
247 const struct iommu_ops *ops)
248{
249 iommu->ops = ops;
250}
251
252static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
253 struct fwnode_handle *fwnode)
254{
255 iommu->fwnode = fwnode;
256}
257
207#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ 258#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
208#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ 259#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
209#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ 260#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
233extern void iommu_set_fault_handler(struct iommu_domain *domain, 284extern void iommu_set_fault_handler(struct iommu_domain *domain,
234 iommu_fault_handler_t handler, void *token); 285 iommu_fault_handler_t handler, void *token);
235 286
236extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); 287extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
237extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); 288extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
238extern int iommu_request_dm_for_dev(struct device *dev); 289extern int iommu_request_dm_for_dev(struct device *dev);
290extern struct iommu_resv_region *
291iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
292extern int iommu_get_group_resv_regions(struct iommu_group *group,
293 struct list_head *head);
239 294
240extern int iommu_attach_group(struct iommu_domain *domain, 295extern int iommu_attach_group(struct iommu_domain *domain,
241 struct iommu_group *group); 296 struct iommu_group *group);
@@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
267 void *data); 322 void *data);
268extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, 323extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
269 void *data); 324 void *data);
270struct device *iommu_device_create(struct device *parent, void *drvdata,
271 const struct attribute_group **groups,
272 const char *fmt, ...) __printf(4, 5);
273void iommu_device_destroy(struct device *dev);
274int iommu_device_link(struct device *dev, struct device *link);
275void iommu_device_unlink(struct device *dev, struct device *link);
276 325
277/* Window handling function prototypes */ 326/* Window handling function prototypes */
278extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 327extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
352 const struct iommu_ops *ops); 401 const struct iommu_ops *ops);
353void iommu_fwspec_free(struct device *dev); 402void iommu_fwspec_free(struct device *dev);
354int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 403int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
355void iommu_register_instance(struct fwnode_handle *fwnode, 404const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
356 const struct iommu_ops *ops);
357const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
358 405
359#else /* CONFIG_IOMMU_API */ 406#else /* CONFIG_IOMMU_API */
360 407
361struct iommu_ops {}; 408struct iommu_ops {};
362struct iommu_group {}; 409struct iommu_group {};
363struct iommu_fwspec {}; 410struct iommu_fwspec {};
411struct iommu_device {};
364 412
365static inline bool iommu_present(struct bus_type *bus) 413static inline bool iommu_present(struct bus_type *bus)
366{ 414{
@@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
443{ 491{
444} 492}
445 493
446static inline void iommu_get_dm_regions(struct device *dev, 494static inline void iommu_get_resv_regions(struct device *dev,
447 struct list_head *list) 495 struct list_head *list)
448{ 496{
449} 497}
450 498
451static inline void iommu_put_dm_regions(struct device *dev, 499static inline void iommu_put_resv_regions(struct device *dev,
452 struct list_head *list) 500 struct list_head *list)
453{ 501{
454} 502}
455 503
504static inline int iommu_get_group_resv_regions(struct iommu_group *group,
505 struct list_head *head)
506{
507 return -ENODEV;
508}
509
456static inline int iommu_request_dm_for_dev(struct device *dev) 510static inline int iommu_request_dm_for_dev(struct device *dev)
457{ 511{
458 return -ENODEV; 512 return -ENODEV;
@@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
546 return -EINVAL; 600 return -EINVAL;
547} 601}
548 602
549static inline struct device *iommu_device_create(struct device *parent, 603static inline int iommu_device_register(struct iommu_device *iommu)
550 void *drvdata, 604{
551 const struct attribute_group **groups, 605 return -ENODEV;
552 const char *fmt, ...) 606}
607
608static inline void iommu_device_set_ops(struct iommu_device *iommu,
609 const struct iommu_ops *ops)
610{
611}
612
613static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
614 struct fwnode_handle *fwnode)
615{
616}
617
618static inline void iommu_device_unregister(struct iommu_device *iommu)
553{ 619{
554 return ERR_PTR(-ENODEV);
555} 620}
556 621
557static inline void iommu_device_destroy(struct device *dev) 622static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
623 struct device *parent,
624 const struct attribute_group **groups,
625 const char *fmt, ...)
626{
627 return -ENODEV;
628}
629
630static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
558{ 631{
559} 632}
560 633
@@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
584 return -ENODEV; 657 return -ENODEV;
585} 658}
586 659
587static inline void iommu_register_instance(struct fwnode_handle *fwnode,
588 const struct iommu_ops *ops)
589{
590}
591
592static inline 660static inline
593const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) 661const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
594{ 662{
595 return NULL; 663 return NULL;
596} 664}
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 671d014e6429..71be5b330d21 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -69,6 +69,7 @@ struct ipv6_devconf {
69 __s32 seg6_require_hmac; 69 __s32 seg6_require_hmac;
70#endif 70#endif
71 __u32 enhanced_dad; 71 __u32 enhanced_dad;
72 __u32 addr_gen_mode;
72 73
73 struct ctl_table_header *sysctl_header; 74 struct ctl_table_header *sysctl_header;
74}; 75};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e79875574b39..f887351aa80e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -184,6 +184,7 @@ struct irq_data {
184 * 184 *
185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits 185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits
186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending 186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
187 * IRQD_ACTIVATED - Interrupt has already been activated
187 * IRQD_NO_BALANCING - Balancing disabled for this IRQ 188 * IRQD_NO_BALANCING - Balancing disabled for this IRQ
188 * IRQD_PER_CPU - Interrupt is per cpu 189 * IRQD_PER_CPU - Interrupt is per cpu
189 * IRQD_AFFINITY_SET - Interrupt affinity was set 190 * IRQD_AFFINITY_SET - Interrupt affinity was set
@@ -202,6 +203,7 @@ struct irq_data {
202enum { 203enum {
203 IRQD_TRIGGER_MASK = 0xf, 204 IRQD_TRIGGER_MASK = 0xf,
204 IRQD_SETAFFINITY_PENDING = (1 << 8), 205 IRQD_SETAFFINITY_PENDING = (1 << 8),
206 IRQD_ACTIVATED = (1 << 9),
205 IRQD_NO_BALANCING = (1 << 10), 207 IRQD_NO_BALANCING = (1 << 10),
206 IRQD_PER_CPU = (1 << 11), 208 IRQD_PER_CPU = (1 << 11),
207 IRQD_AFFINITY_SET = (1 << 12), 209 IRQD_AFFINITY_SET = (1 << 12),
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
312 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; 314 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
313} 315}
314 316
317static inline bool irqd_is_activated(struct irq_data *d)
318{
319 return __irqd_to_state(d) & IRQD_ACTIVATED;
320}
321
322static inline void irqd_set_activated(struct irq_data *d)
323{
324 __irqd_to_state(d) |= IRQD_ACTIVATED;
325}
326
327static inline void irqd_clr_activated(struct irq_data *d)
328{
329 __irqd_to_state(d) &= ~IRQD_ACTIVATED;
330}
331
315#undef __irqd_to_state 332#undef __irqd_to_state
316 333
317static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 334static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -715,6 +732,10 @@ unsigned int arch_dynirq_lower_bound(unsigned int from);
715int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 732int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
716 struct module *owner, const struct cpumask *affinity); 733 struct module *owner, const struct cpumask *affinity);
717 734
735int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
736 unsigned int cnt, int node, struct module *owner,
737 const struct cpumask *affinity);
738
718/* use macros to avoid needing export.h for THIS_MODULE */ 739/* use macros to avoid needing export.h for THIS_MODULE */
719#define irq_alloc_descs(irq, from, cnt, node) \ 740#define irq_alloc_descs(irq, from, cnt, node) \
720 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) 741 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
@@ -731,6 +752,21 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
731#define irq_alloc_descs_from(from, cnt, node) \ 752#define irq_alloc_descs_from(from, cnt, node) \
732 irq_alloc_descs(-1, from, cnt, node) 753 irq_alloc_descs(-1, from, cnt, node)
733 754
755#define devm_irq_alloc_descs(dev, irq, from, cnt, node) \
756 __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
757
758#define devm_irq_alloc_desc(dev, node) \
759 devm_irq_alloc_descs(dev, -1, 0, 1, node)
760
761#define devm_irq_alloc_desc_at(dev, at, node) \
762 devm_irq_alloc_descs(dev, at, at, 1, node)
763
764#define devm_irq_alloc_desc_from(dev, from, node) \
765 devm_irq_alloc_descs(dev, -1, from, 1, node)
766
767#define devm_irq_alloc_descs_from(dev, from, cnt, node) \
768 devm_irq_alloc_descs(dev, -1, from, cnt, node)
769
734void irq_free_descs(unsigned int irq, unsigned int cnt); 770void irq_free_descs(unsigned int irq, unsigned int cnt);
735static inline void irq_free_desc(unsigned int irq) 771static inline void irq_free_desc(unsigned int irq)
736{ 772{
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index e808f8ae6f14..725e86b506f3 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -73,7 +73,6 @@
73 73
74#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) 74#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
75#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) 75#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
76#define GICD_TYPER_LPIS (1U << 17)
77 76
78#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) 77#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
79#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) 78#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -306,7 +305,7 @@
306#define GITS_BASER_TYPE_NONE 0 305#define GITS_BASER_TYPE_NONE 0
307#define GITS_BASER_TYPE_DEVICE 1 306#define GITS_BASER_TYPE_DEVICE 1
308#define GITS_BASER_TYPE_VCPU 2 307#define GITS_BASER_TYPE_VCPU 2
309#define GITS_BASER_TYPE_CPU 3 308#define GITS_BASER_TYPE_RESERVED3 3
310#define GITS_BASER_TYPE_COLLECTION 4 309#define GITS_BASER_TYPE_COLLECTION 4
311#define GITS_BASER_TYPE_RESERVED5 5 310#define GITS_BASER_TYPE_RESERVED5 5
312#define GITS_BASER_TYPE_RESERVED6 6 311#define GITS_BASER_TYPE_RESERVED6 6
@@ -320,8 +319,6 @@
320#define GITS_CMD_MAPD 0x08 319#define GITS_CMD_MAPD 0x08
321#define GITS_CMD_MAPC 0x09 320#define GITS_CMD_MAPC 0x09
322#define GITS_CMD_MAPTI 0x0a 321#define GITS_CMD_MAPTI 0x0a
323/* older GIC documentation used MAPVI for this command */
324#define GITS_CMD_MAPVI GITS_CMD_MAPTI
325#define GITS_CMD_MAPI 0x0b 322#define GITS_CMD_MAPI 0x0b
326#define GITS_CMD_MOVI 0x01 323#define GITS_CMD_MOVI 0x01
327#define GITS_CMD_DISCARD 0x0f 324#define GITS_CMD_DISCARD 0x0f
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ffb84604c1de..188eced6813e 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -183,6 +183,12 @@ enum {
183 /* Irq domain is an IPI domain with single virq */ 183 /* Irq domain is an IPI domain with single virq */
184 IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), 184 IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
185 185
186 /* Irq domain implements MSIs */
187 IRQ_DOMAIN_FLAG_MSI = (1 << 4),
188
189 /* Irq domain implements MSI remapping */
190 IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
191
186 /* 192 /*
187 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved 193 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
188 * for implementation specific purposes and ignored by the 194 * for implementation specific purposes and ignored by the
@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
216 void *host_data); 222 void *host_data);
217extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, 223extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
218 enum irq_domain_bus_token bus_token); 224 enum irq_domain_bus_token bus_token);
225extern bool irq_domain_check_msi_remap(void);
219extern void irq_set_default_host(struct irq_domain *host); 226extern void irq_set_default_host(struct irq_domain *host);
220extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, 227extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
221 irq_hw_number_t hwirq, int node, 228 irq_hw_number_t hwirq, int node,
@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
446{ 453{
447 return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; 454 return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
448} 455}
456
457static inline bool irq_domain_is_msi(struct irq_domain *domain)
458{
459 return domain->flags & IRQ_DOMAIN_FLAG_MSI;
460}
461
462static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
463{
464 return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
465}
466
467extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
468
449#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 469#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
450static inline void irq_domain_activate_irq(struct irq_data *data) { } 470static inline void irq_domain_activate_irq(struct irq_data *data) { }
451static inline void irq_domain_deactivate_irq(struct irq_data *data) { } 471static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
477{ 497{
478 return false; 498 return false;
479} 499}
500
501static inline bool irq_domain_is_msi(struct irq_domain *domain)
502{
503 return false;
504}
505
506static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
507{
508 return false;
509}
510
511static inline bool
512irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
513{
514 return false;
515}
480#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 516#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
481 517
482#else /* CONFIG_IRQ_DOMAIN */ 518#else /* CONFIG_IRQ_DOMAIN */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 589d14e970ad..624215cebee5 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -293,6 +293,8 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
293 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; 293 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
294} 294}
295 295
296extern u64 jiffies64_to_nsecs(u64 j);
297
296extern unsigned long __msecs_to_jiffies(const unsigned int m); 298extern unsigned long __msecs_to_jiffies(const unsigned int m);
297#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) 299#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
298/* 300/*
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index a0547c571800..b63d6b7b0db0 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -402,6 +402,6 @@ extern bool ____wrong_branch_error(void);
402#define static_branch_enable(x) static_key_enable(&(x)->key) 402#define static_branch_enable(x) static_key_enable(&(x)->key)
403#define static_branch_disable(x) static_key_disable(&(x)->key) 403#define static_branch_disable(x) static_key_disable(&(x)->key)
404 404
405#endif /* _LINUX_JUMP_LABEL_H */
406
407#endif /* __ASSEMBLY__ */ 405#endif /* __ASSEMBLY__ */
406
407#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 56aec84237ad..cb09238f6d32 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -514,8 +514,8 @@ extern enum system_states {
514#define TAINT_FLAGS_COUNT 16 514#define TAINT_FLAGS_COUNT 16
515 515
516struct taint_flag { 516struct taint_flag {
517 char true; /* character printed when tainted */ 517 char c_true; /* character printed when tainted */
518 char false; /* character printed when not tainted */ 518 char c_false; /* character printed when not tainted */
519 bool module; /* also show as a per-module taint flag */ 519 bool module; /* also show as a per-module taint flag */
520}; 520};
521 521
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 00f776816aa3..66be8b6beceb 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -9,7 +9,6 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/vtime.h> 10#include <linux/vtime.h>
11#include <asm/irq.h> 11#include <asm/irq.h>
12#include <linux/cputime.h>
13 12
14/* 13/*
15 * 'kernel_stat.h' contains the definitions needed for doing 14 * 'kernel_stat.h' contains the definitions needed for doing
@@ -78,15 +77,18 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
78 return kstat_cpu(cpu).irqs_sum; 77 return kstat_cpu(cpu).irqs_sum;
79} 78}
80 79
81extern void account_user_time(struct task_struct *, cputime_t); 80extern void account_user_time(struct task_struct *, u64);
82extern void account_system_time(struct task_struct *, int, cputime_t); 81extern void account_guest_time(struct task_struct *, u64);
83extern void account_steal_time(cputime_t); 82extern void account_system_time(struct task_struct *, int, u64);
84extern void account_idle_time(cputime_t); 83extern void account_system_index_time(struct task_struct *, u64,
84 enum cpu_usage_stat);
85extern void account_steal_time(u64);
86extern void account_idle_time(u64);
85 87
86#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 88#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
87static inline void account_process_tick(struct task_struct *tsk, int user) 89static inline void account_process_tick(struct task_struct *tsk, int user)
88{ 90{
89 vtime_account_user(tsk); 91 vtime_flush(tsk);
90} 92}
91#else 93#else
92extern void account_process_tick(struct task_struct *, int user); 94extern void account_process_tick(struct task_struct *, int user);
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index fcfd2bf14d3f..c4e441e00db5 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -56,7 +56,7 @@ struct file;
56struct subprocess_info { 56struct subprocess_info {
57 struct work_struct work; 57 struct work_struct work;
58 struct completion *complete; 58 struct completion *complete;
59 char *path; 59 const char *path;
60 char **argv; 60 char **argv;
61 char **envp; 61 char **envp;
62 int wait; 62 int wait;
@@ -67,10 +67,11 @@ struct subprocess_info {
67}; 67};
68 68
69extern int 69extern int
70call_usermodehelper(char *path, char **argv, char **envp, int wait); 70call_usermodehelper(const char *path, char **argv, char **envp, int wait);
71 71
72extern struct subprocess_info * 72extern struct subprocess_info *
73call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask, 73call_usermodehelper_setup(const char *path, char **argv, char **envp,
74 gfp_t gfp_mask,
74 int (*init)(struct subprocess_info *info, struct cred *new), 75 int (*init)(struct subprocess_info *info, struct cred *new),
75 void (*cleanup)(struct subprocess_info *), void *data); 76 void (*cleanup)(struct subprocess_info *), void *data);
76 77
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 8f6849084248..16ddfb8b304a 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -278,9 +278,13 @@ struct kprobe_insn_cache {
278 int nr_garbage; 278 int nr_garbage;
279}; 279};
280 280
281#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
281extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c); 282extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
282extern void __free_insn_slot(struct kprobe_insn_cache *c, 283extern void __free_insn_slot(struct kprobe_insn_cache *c,
283 kprobe_opcode_t *slot, int dirty); 284 kprobe_opcode_t *slot, int dirty);
285/* sleep-less address checking routine */
286extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
287 unsigned long addr);
284 288
285#define DEFINE_INSN_CACHE_OPS(__name) \ 289#define DEFINE_INSN_CACHE_OPS(__name) \
286extern struct kprobe_insn_cache kprobe_##__name##_slots; \ 290extern struct kprobe_insn_cache kprobe_##__name##_slots; \
@@ -294,6 +298,18 @@ static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
294{ \ 298{ \
295 __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \ 299 __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
296} \ 300} \
301 \
302static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
303{ \
304 return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
305}
306#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
307#define DEFINE_INSN_CACHE_OPS(__name) \
308static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
309{ \
310 return 0; \
311}
312#endif
297 313
298DEFINE_INSN_CACHE_OPS(insn); 314DEFINE_INSN_CACHE_OPS(insn);
299 315
@@ -330,7 +346,6 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
330 int write, void __user *buffer, 346 int write, void __user *buffer,
331 size_t *length, loff_t *ppos); 347 size_t *length, loff_t *ppos);
332#endif 348#endif
333
334#endif /* CONFIG_OPTPROBES */ 349#endif /* CONFIG_OPTPROBES */
335#ifdef CONFIG_KPROBES_ON_FTRACE 350#ifdef CONFIG_KPROBES_ON_FTRACE
336extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 351extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
@@ -481,6 +496,19 @@ static inline int enable_jprobe(struct jprobe *jp)
481 return enable_kprobe(&jp->kp); 496 return enable_kprobe(&jp->kp);
482} 497}
483 498
499#ifndef CONFIG_KPROBES
500static inline bool is_kprobe_insn_slot(unsigned long addr)
501{
502 return false;
503}
504#endif
505#ifndef CONFIG_OPTPROBES
506static inline bool is_kprobe_optinsn_slot(unsigned long addr)
507{
508 return false;
509}
510#endif
511
484#ifdef CONFIG_KPROBES 512#ifdef CONFIG_KPROBES
485/* 513/*
486 * Blacklist ganerating macro. Specify functions which is not probed 514 * Blacklist ganerating macro. Specify functions which is not probed
diff --git a/include/linux/kref.h b/include/linux/kref.h
index e15828fd71f1..f4156f88f557 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -15,22 +15,27 @@
15#ifndef _KREF_H_ 15#ifndef _KREF_H_
16#define _KREF_H_ 16#define _KREF_H_
17 17
18#include <linux/bug.h> 18#include <linux/spinlock.h>
19#include <linux/atomic.h> 19#include <linux/refcount.h>
20#include <linux/kernel.h>
21#include <linux/mutex.h>
22 20
23struct kref { 21struct kref {
24 atomic_t refcount; 22 refcount_t refcount;
25}; 23};
26 24
25#define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), }
26
27/** 27/**
28 * kref_init - initialize object. 28 * kref_init - initialize object.
29 * @kref: object in question. 29 * @kref: object in question.
30 */ 30 */
31static inline void kref_init(struct kref *kref) 31static inline void kref_init(struct kref *kref)
32{ 32{
33 atomic_set(&kref->refcount, 1); 33 refcount_set(&kref->refcount, 1);
34}
35
36static inline unsigned int kref_read(const struct kref *kref)
37{
38 return refcount_read(&kref->refcount);
34} 39}
35 40
36/** 41/**
@@ -39,17 +44,12 @@ static inline void kref_init(struct kref *kref)
39 */ 44 */
40static inline void kref_get(struct kref *kref) 45static inline void kref_get(struct kref *kref)
41{ 46{
42 /* If refcount was 0 before incrementing then we have a race 47 refcount_inc(&kref->refcount);
43 * condition when this kref is freeing by some other thread right now.
44 * In this case one should use kref_get_unless_zero()
45 */
46 WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
47} 48}
48 49
49/** 50/**
50 * kref_sub - subtract a number of refcounts for object. 51 * kref_put - decrement refcount for object.
51 * @kref: object. 52 * @kref: object.
52 * @count: Number of recounts to subtract.
53 * @release: pointer to the function that will clean up the object when the 53 * @release: pointer to the function that will clean up the object when the
54 * last reference to the object is released. 54 * last reference to the object is released.
55 * This pointer is required, and it is not acceptable to pass kfree 55 * This pointer is required, and it is not acceptable to pass kfree
@@ -58,57 +58,43 @@ static inline void kref_get(struct kref *kref)
58 * maintainer, and anyone else who happens to notice it. You have 58 * maintainer, and anyone else who happens to notice it. You have
59 * been warned. 59 * been warned.
60 * 60 *
61 * Subtract @count from the refcount, and if 0, call release(). 61 * Decrement the refcount, and if 0, call release().
62 * Return 1 if the object was removed, otherwise return 0. Beware, if this 62 * Return 1 if the object was removed, otherwise return 0. Beware, if this
63 * function returns 0, you still can not count on the kref from remaining in 63 * function returns 0, you still can not count on the kref from remaining in
64 * memory. Only use the return value if you want to see if the kref is now 64 * memory. Only use the return value if you want to see if the kref is now
65 * gone, not present. 65 * gone, not present.
66 */ 66 */
67static inline int kref_sub(struct kref *kref, unsigned int count, 67static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
68 void (*release)(struct kref *kref))
69{ 68{
70 WARN_ON(release == NULL); 69 WARN_ON(release == NULL);
71 70
72 if (atomic_sub_and_test((int) count, &kref->refcount)) { 71 if (refcount_dec_and_test(&kref->refcount)) {
73 release(kref); 72 release(kref);
74 return 1; 73 return 1;
75 } 74 }
76 return 0; 75 return 0;
77} 76}
78 77
79/**
80 * kref_put - decrement refcount for object.
81 * @kref: object.
82 * @release: pointer to the function that will clean up the object when the
83 * last reference to the object is released.
84 * This pointer is required, and it is not acceptable to pass kfree
85 * in as this function. If the caller does pass kfree to this
86 * function, you will be publicly mocked mercilessly by the kref
87 * maintainer, and anyone else who happens to notice it. You have
88 * been warned.
89 *
90 * Decrement the refcount, and if 0, call release().
91 * Return 1 if the object was removed, otherwise return 0. Beware, if this
92 * function returns 0, you still can not count on the kref from remaining in
93 * memory. Only use the return value if you want to see if the kref is now
94 * gone, not present.
95 */
96static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
97{
98 return kref_sub(kref, 1, release);
99}
100
101static inline int kref_put_mutex(struct kref *kref, 78static inline int kref_put_mutex(struct kref *kref,
102 void (*release)(struct kref *kref), 79 void (*release)(struct kref *kref),
103 struct mutex *lock) 80 struct mutex *lock)
104{ 81{
105 WARN_ON(release == NULL); 82 WARN_ON(release == NULL);
106 if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { 83
107 mutex_lock(lock); 84 if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
108 if (unlikely(!atomic_dec_and_test(&kref->refcount))) { 85 release(kref);
109 mutex_unlock(lock); 86 return 1;
110 return 0; 87 }
111 } 88 return 0;
89}
90
91static inline int kref_put_lock(struct kref *kref,
92 void (*release)(struct kref *kref),
93 spinlock_t *lock)
94{
95 WARN_ON(release == NULL);
96
97 if (refcount_dec_and_lock(&kref->refcount, lock)) {
112 release(kref); 98 release(kref);
113 return 1; 99 return 1;
114 } 100 }
@@ -133,6 +119,6 @@ static inline int kref_put_mutex(struct kref *kref,
133 */ 119 */
134static inline int __must_check kref_get_unless_zero(struct kref *kref) 120static inline int __must_check kref_get_unless_zero(struct kref *kref)
135{ 121{
136 return atomic_add_unless(&kref->refcount, 1, 0); 122 return refcount_inc_not_zero(&kref->refcount);
137} 123}
138#endif /* _KREF_H_ */ 124#endif /* _KREF_H_ */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 569cb531094c..38c0bd7ca107 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -13,6 +13,7 @@
13#define __LINUX_LEDS_H_INCLUDED 13#define __LINUX_LEDS_H_INCLUDED
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/kernfs.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/mutex.h> 18#include <linux/mutex.h>
18#include <linux/rwsem.h> 19#include <linux/rwsem.h>
@@ -27,6 +28,7 @@ struct device;
27 28
28enum led_brightness { 29enum led_brightness {
29 LED_OFF = 0, 30 LED_OFF = 0,
31 LED_ON = 1,
30 LED_HALF = 127, 32 LED_HALF = 127,
31 LED_FULL = 255, 33 LED_FULL = 255,
32}; 34};
@@ -46,6 +48,7 @@ struct led_classdev {
46#define LED_DEV_CAP_FLASH (1 << 18) 48#define LED_DEV_CAP_FLASH (1 << 18)
47#define LED_HW_PLUGGABLE (1 << 19) 49#define LED_HW_PLUGGABLE (1 << 19)
48#define LED_PANIC_INDICATOR (1 << 20) 50#define LED_PANIC_INDICATOR (1 << 20)
51#define LED_BRIGHT_HW_CHANGED (1 << 21)
49 52
50 /* set_brightness_work / blink_timer flags, atomic, private. */ 53 /* set_brightness_work / blink_timer flags, atomic, private. */
51 unsigned long work_flags; 54 unsigned long work_flags;
@@ -110,6 +113,11 @@ struct led_classdev {
110 bool activated; 113 bool activated;
111#endif 114#endif
112 115
116#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
117 int brightness_hw_changed;
118 struct kernfs_node *brightness_hw_changed_kn;
119#endif
120
113 /* Ensures consistent access to the LED Flash Class device */ 121 /* Ensures consistent access to the LED Flash Class device */
114 struct mutex led_access; 122 struct mutex led_access;
115}; 123};
@@ -422,4 +430,12 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
422} 430}
423#endif 431#endif
424 432
433#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
434extern void led_classdev_notify_brightness_hw_changed(
435 struct led_classdev *led_cdev, enum led_brightness brightness);
436#else
437static inline void led_classdev_notify_brightness_hw_changed(
438 struct led_classdev *led_cdev, enum led_brightness brightness) { }
439#endif
440
425#endif /* __LINUX_LEDS_H_INCLUDED */ 441#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c170be548b7f..c9a69fc8821e 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -968,7 +968,7 @@ struct ata_port_operations {
968 void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); 968 void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf);
969 void (*sff_exec_command)(struct ata_port *ap, 969 void (*sff_exec_command)(struct ata_port *ap,
970 const struct ata_taskfile *tf); 970 const struct ata_taskfile *tf);
971 unsigned int (*sff_data_xfer)(struct ata_device *dev, 971 unsigned int (*sff_data_xfer)(struct ata_queued_cmd *qc,
972 unsigned char *buf, unsigned int buflen, int rw); 972 unsigned char *buf, unsigned int buflen, int rw);
973 void (*sff_irq_on)(struct ata_port *); 973 void (*sff_irq_on)(struct ata_port *);
974 bool (*sff_irq_check)(struct ata_port *); 974 bool (*sff_irq_check)(struct ata_port *);
@@ -1130,6 +1130,7 @@ extern int ata_sas_port_start(struct ata_port *ap);
1130extern void ata_sas_port_stop(struct ata_port *ap); 1130extern void ata_sas_port_stop(struct ata_port *ap);
1131extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); 1131extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
1132extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); 1132extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
1133extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
1133extern int sata_scr_valid(struct ata_link *link); 1134extern int sata_scr_valid(struct ata_link *link);
1134extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); 1135extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
1135extern int sata_scr_write(struct ata_link *link, int reg, u32 val); 1136extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
@@ -1355,6 +1356,7 @@ extern struct device_attribute *ata_common_sdev_attrs[];
1355 .proc_name = drv_name, \ 1356 .proc_name = drv_name, \
1356 .slave_configure = ata_scsi_slave_config, \ 1357 .slave_configure = ata_scsi_slave_config, \
1357 .slave_destroy = ata_scsi_slave_destroy, \ 1358 .slave_destroy = ata_scsi_slave_destroy, \
1359 .eh_timed_out = ata_scsi_timed_out, \
1358 .bios_param = ata_std_bios_param, \ 1360 .bios_param = ata_std_bios_param, \
1359 .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ 1361 .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
1360 .sdev_attrs = ata_common_sdev_attrs 1362 .sdev_attrs = ata_common_sdev_attrs
@@ -1823,11 +1825,11 @@ extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
1823extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 1825extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
1824extern void ata_sff_exec_command(struct ata_port *ap, 1826extern void ata_sff_exec_command(struct ata_port *ap,
1825 const struct ata_taskfile *tf); 1827 const struct ata_taskfile *tf);
1826extern unsigned int ata_sff_data_xfer(struct ata_device *dev, 1828extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
1827 unsigned char *buf, unsigned int buflen, int rw); 1829 unsigned char *buf, unsigned int buflen, int rw);
1828extern unsigned int ata_sff_data_xfer32(struct ata_device *dev, 1830extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
1829 unsigned char *buf, unsigned int buflen, int rw); 1831 unsigned char *buf, unsigned int buflen, int rw);
1830extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, 1832extern unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc,
1831 unsigned char *buf, unsigned int buflen, int rw); 1833 unsigned char *buf, unsigned int buflen, int rw);
1832extern void ata_sff_irq_on(struct ata_port *ap); 1834extern void ata_sff_irq_on(struct ata_port *ap);
1833extern void ata_sff_irq_clear(struct ata_port *ap); 1835extern void ata_sff_irq_clear(struct ata_port *ap);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 7c273bbc5351..ca45e4a088a9 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -80,8 +80,6 @@ struct nvm_dev_ops {
80 unsigned int max_phys_sect; 80 unsigned int max_phys_sect;
81}; 81};
82 82
83
84
85#ifdef CONFIG_NVM 83#ifdef CONFIG_NVM
86 84
87#include <linux/blkdev.h> 85#include <linux/blkdev.h>
@@ -109,6 +107,7 @@ enum {
109 NVM_RSP_ERR_FAILWRITE = 0x40ff, 107 NVM_RSP_ERR_FAILWRITE = 0x40ff,
110 NVM_RSP_ERR_EMPTYPAGE = 0x42ff, 108 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
111 NVM_RSP_ERR_FAILECC = 0x4281, 109 NVM_RSP_ERR_FAILECC = 0x4281,
110 NVM_RSP_ERR_FAILCRC = 0x4004,
112 NVM_RSP_WARN_HIGHECC = 0x4700, 111 NVM_RSP_WARN_HIGHECC = 0x4700,
113 112
114 /* Device opcodes */ 113 /* Device opcodes */
@@ -202,11 +201,10 @@ struct nvm_addr_format {
202struct nvm_id { 201struct nvm_id {
203 u8 ver_id; 202 u8 ver_id;
204 u8 vmnt; 203 u8 vmnt;
205 u8 cgrps;
206 u32 cap; 204 u32 cap;
207 u32 dom; 205 u32 dom;
208 struct nvm_addr_format ppaf; 206 struct nvm_addr_format ppaf;
209 struct nvm_id_group groups[4]; 207 struct nvm_id_group grp;
210} __packed; 208} __packed;
211 209
212struct nvm_target { 210struct nvm_target {
@@ -216,10 +214,6 @@ struct nvm_target {
216 struct gendisk *disk; 214 struct gendisk *disk;
217}; 215};
218 216
219struct nvm_tgt_instance {
220 struct nvm_tgt_type *tt;
221};
222
223#define ADDR_EMPTY (~0ULL) 217#define ADDR_EMPTY (~0ULL)
224 218
225#define NVM_VERSION_MAJOR 1 219#define NVM_VERSION_MAJOR 1
@@ -230,7 +224,6 @@ struct nvm_rq;
230typedef void (nvm_end_io_fn)(struct nvm_rq *); 224typedef void (nvm_end_io_fn)(struct nvm_rq *);
231 225
232struct nvm_rq { 226struct nvm_rq {
233 struct nvm_tgt_instance *ins;
234 struct nvm_tgt_dev *dev; 227 struct nvm_tgt_dev *dev;
235 228
236 struct bio *bio; 229 struct bio *bio;
@@ -254,6 +247,8 @@ struct nvm_rq {
254 247
255 u64 ppa_status; /* ppa media status */ 248 u64 ppa_status; /* ppa media status */
256 int error; 249 int error;
250
251 void *private;
257}; 252};
258 253
259static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) 254static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
@@ -272,15 +267,6 @@ enum {
272 NVM_BLK_ST_BAD = 0x8, /* Bad block */ 267 NVM_BLK_ST_BAD = 0x8, /* Bad block */
273}; 268};
274 269
275/* system block cpu representation */
276struct nvm_sb_info {
277 unsigned long seqnr;
278 unsigned long erase_cnt;
279 unsigned int version;
280 char mmtype[NVM_MMTYPE_LEN];
281 struct ppa_addr fs_ppa;
282};
283
284/* Device generic information */ 270/* Device generic information */
285struct nvm_geo { 271struct nvm_geo {
286 int nr_chnls; 272 int nr_chnls;
@@ -308,6 +294,7 @@ struct nvm_geo {
308 int sec_per_lun; 294 int sec_per_lun;
309}; 295};
310 296
297/* sub-device structure */
311struct nvm_tgt_dev { 298struct nvm_tgt_dev {
312 /* Device information */ 299 /* Device information */
313 struct nvm_geo geo; 300 struct nvm_geo geo;
@@ -329,17 +316,10 @@ struct nvm_dev {
329 316
330 struct list_head devices; 317 struct list_head devices;
331 318
332 /* Media manager */
333 struct nvmm_type *mt;
334 void *mp;
335
336 /* System blocks */
337 struct nvm_sb_info sb;
338
339 /* Device information */ 319 /* Device information */
340 struct nvm_geo geo; 320 struct nvm_geo geo;
341 321
342 /* lower page table */ 322 /* lower page table */
343 int lps_per_blk; 323 int lps_per_blk;
344 int *lptbl; 324 int *lptbl;
345 325
@@ -359,6 +339,10 @@ struct nvm_dev {
359 339
360 struct mutex mlock; 340 struct mutex mlock;
361 spinlock_t lock; 341 spinlock_t lock;
342
343 /* target management */
344 struct list_head area_list;
345 struct list_head targets;
362}; 346};
363 347
364static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo, 348static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
@@ -391,10 +375,10 @@ static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
391 return l; 375 return l;
392} 376}
393 377
394static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, 378static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
395 struct ppa_addr r) 379 struct ppa_addr r)
396{ 380{
397 struct nvm_geo *geo = &dev->geo; 381 struct nvm_geo *geo = &tgt_dev->geo;
398 struct ppa_addr l; 382 struct ppa_addr l;
399 383
400 l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset; 384 l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset;
@@ -407,10 +391,10 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
407 return l; 391 return l;
408} 392}
409 393
410static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, 394static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
411 struct ppa_addr r) 395 struct ppa_addr r)
412{ 396{
413 struct nvm_geo *geo = &dev->geo; 397 struct nvm_geo *geo = &tgt_dev->geo;
414 struct ppa_addr l; 398 struct ppa_addr l;
415 399
416 l.ppa = 0; 400 l.ppa = 0;
@@ -452,15 +436,12 @@ static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
452 (ppa1.g.blk == ppa2.g.blk)); 436 (ppa1.g.blk == ppa2.g.blk));
453} 437}
454 438
455static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
456{
457 return dev->lptbl[slc_pg];
458}
459
460typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); 439typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
461typedef sector_t (nvm_tgt_capacity_fn)(void *); 440typedef sector_t (nvm_tgt_capacity_fn)(void *);
462typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *); 441typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
463typedef void (nvm_tgt_exit_fn)(void *); 442typedef void (nvm_tgt_exit_fn)(void *);
443typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
444typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
464 445
465struct nvm_tgt_type { 446struct nvm_tgt_type {
466 const char *name; 447 const char *name;
@@ -469,12 +450,15 @@ struct nvm_tgt_type {
469 /* target entry points */ 450 /* target entry points */
470 nvm_tgt_make_rq_fn *make_rq; 451 nvm_tgt_make_rq_fn *make_rq;
471 nvm_tgt_capacity_fn *capacity; 452 nvm_tgt_capacity_fn *capacity;
472 nvm_end_io_fn *end_io;
473 453
474 /* module-specific init/teardown */ 454 /* module-specific init/teardown */
475 nvm_tgt_init_fn *init; 455 nvm_tgt_init_fn *init;
476 nvm_tgt_exit_fn *exit; 456 nvm_tgt_exit_fn *exit;
477 457
458 /* sysfs */
459 nvm_tgt_sysfs_init_fn *sysfs_init;
460 nvm_tgt_sysfs_exit_fn *sysfs_exit;
461
478 /* For internal use */ 462 /* For internal use */
479 struct list_head list; 463 struct list_head list;
480}; 464};
@@ -487,103 +471,29 @@ extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
487extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); 471extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
488extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); 472extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
489 473
490typedef int (nvmm_register_fn)(struct nvm_dev *);
491typedef void (nvmm_unregister_fn)(struct nvm_dev *);
492
493typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *);
494typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *);
495typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *);
496typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int);
497typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
498typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
499typedef struct ppa_addr (nvmm_trans_ppa_fn)(struct nvm_tgt_dev *,
500 struct ppa_addr, int);
501typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int);
502
503enum {
504 TRANS_TGT_TO_DEV = 0x0,
505 TRANS_DEV_TO_TGT = 0x1,
506};
507
508struct nvmm_type {
509 const char *name;
510 unsigned int version[3];
511
512 nvmm_register_fn *register_mgr;
513 nvmm_unregister_fn *unregister_mgr;
514
515 nvmm_create_tgt_fn *create_tgt;
516 nvmm_remove_tgt_fn *remove_tgt;
517
518 nvmm_submit_io_fn *submit_io;
519 nvmm_erase_blk_fn *erase_blk;
520
521 nvmm_get_area_fn *get_area;
522 nvmm_put_area_fn *put_area;
523
524 nvmm_trans_ppa_fn *trans_ppa;
525 nvmm_part_to_tgt_fn *part_to_tgt;
526
527 struct list_head list;
528};
529
530extern int nvm_register_mgr(struct nvmm_type *);
531extern void nvm_unregister_mgr(struct nvmm_type *);
532
533extern struct nvm_dev *nvm_alloc_dev(int); 474extern struct nvm_dev *nvm_alloc_dev(int);
534extern int nvm_register(struct nvm_dev *); 475extern int nvm_register(struct nvm_dev *);
535extern void nvm_unregister(struct nvm_dev *); 476extern void nvm_unregister(struct nvm_dev *);
536 477
537extern int nvm_set_bb_tbl(struct nvm_dev *, struct ppa_addr *, int, int);
538extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, 478extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
539 int, int); 479 int, int);
540extern int nvm_max_phys_sects(struct nvm_tgt_dev *); 480extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
541extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); 481extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
542extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
543extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
544extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, 482extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
545 const struct ppa_addr *, int, int); 483 const struct ppa_addr *, int, int);
546extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); 484extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
547extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int);
548extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int); 485extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
549extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *, 486extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
550 void *); 487 void *);
551extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t); 488extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
552extern void nvm_put_area(struct nvm_tgt_dev *, sector_t); 489extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
553extern void nvm_end_io(struct nvm_rq *, int); 490extern void nvm_end_io(struct nvm_rq *);
554extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
555 void *, int);
556extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
557 int, void *, int);
558extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); 491extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
559extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
560extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); 492extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
561 493
562/* sysblk.c */
563#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
564
565/* system block on disk representation */
566struct nvm_system_block {
567 __be32 magic; /* magic signature */
568 __be32 seqnr; /* sequence number */
569 __be32 erase_cnt; /* erase count */
570 __be16 version; /* version number */
571 u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */
572 __be64 fs_ppa; /* PPA for media manager
573 * superblock */
574};
575
576extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *);
577extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
578extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
579
580extern int nvm_dev_factory(struct nvm_dev *, int flags); 494extern int nvm_dev_factory(struct nvm_dev *, int flags);
581 495
582#define nvm_for_each_lun_ppa(geo, ppa, chid, lunid) \ 496extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
583 for ((chid) = 0, (ppa).ppa = 0; (chid) < (geo)->nr_chnls; \
584 (chid)++, (ppa).g.ch = (chid)) \
585 for ((lunid) = 0; (lunid) < (geo)->luns_per_chnl; \
586 (lunid)++, (ppa).g.lun = (lunid))
587 497
588#else /* CONFIG_NVM */ 498#else /* CONFIG_NVM */
589struct nvm_dev_ops; 499struct nvm_dev_ops;
diff --git a/include/linux/list.h b/include/linux/list.h
index d1039ecaf94f..ae537fa46216 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -527,6 +527,19 @@ static inline void list_splice_tail_init(struct list_head *list,
527 pos = list_next_entry(pos, member)) 527 pos = list_next_entry(pos, member))
528 528
529/** 529/**
530 * list_for_each_entry_from_reverse - iterate backwards over list of given type
531 * from the current point
532 * @pos: the type * to use as a loop cursor.
533 * @head: the head for your list.
534 * @member: the name of the list_head within the struct.
535 *
536 * Iterate backwards over list of given type, continuing from current position.
537 */
538#define list_for_each_entry_from_reverse(pos, head, member) \
539 for (; &pos->member != (head); \
540 pos = list_prev_entry(pos, member))
541
542/**
530 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 543 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
531 * @pos: the type * to use as a loop cursor. 544 * @pos: the type * to use as a loop cursor.
532 * @n: another type * to use as temporary storage 545 * @n: another type * to use as temporary storage
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fd4ca0b4fe0f..171baa90f6f6 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -3,28 +3,33 @@
3/* 3/*
4 * Lock-less NULL terminated single linked list 4 * Lock-less NULL terminated single linked list
5 * 5 *
6 * If there are multiple producers and multiple consumers, llist_add 6 * Cases where locking is not needed:
7 * can be used in producers and llist_del_all can be used in 7 * If there are multiple producers and multiple consumers, llist_add can be
8 * consumers. They can work simultaneously without lock. But 8 * used in producers and llist_del_all can be used in consumers simultaneously
9 * llist_del_first can not be used here. Because llist_del_first 9 * without locking. Also a single consumer can use llist_del_first while
10 * depends on list->first->next does not changed if list->first is not 10 * multiple producers simultaneously use llist_add, without any locking.
11 * changed during its operation, but llist_del_first, llist_add, 11 *
12 * llist_add (or llist_del_all, llist_add, llist_add) sequence in 12 * Cases where locking is needed:
13 * another consumer may violate that. 13 * If we have multiple consumers with llist_del_first used in one consumer, and
14 * 14 * llist_del_first or llist_del_all used in other consumers, then a lock is
15 * If there are multiple producers and one consumer, llist_add can be 15 * needed. This is because llist_del_first depends on list->first->next not
16 * used in producers and llist_del_all or llist_del_first can be used 16 * changing, but without lock protection, there's no way to be sure about that
17 * in the consumer. 17 * if a preemption happens in the middle of the delete operation and on being
18 * 18 * preempted back, the list->first is the same as before causing the cmpxchg in
19 * This can be summarized as follow: 19 * llist_del_first to succeed. For example, while a llist_del_first operation
20 * is in progress in one consumer, then a llist_del_first, llist_add,
21 * llist_add (or llist_del_all, llist_add, llist_add) sequence in another
22 * consumer may cause violations.
23 *
24 * This can be summarized as follows:
20 * 25 *
21 * | add | del_first | del_all 26 * | add | del_first | del_all
22 * add | - | - | - 27 * add | - | - | -
23 * del_first | | L | L 28 * del_first | | L | L
24 * del_all | | | - 29 * del_all | | | -
25 * 30 *
26 * Where "-" stands for no lock is needed, while "L" stands for lock 31 * Where, a particular row's operation can happen concurrently with a column's
27 * is needed. 32 * operation, with "-" being no lock needed, while "L" being lock is needed.
28 * 33 *
29 * The list entries deleted via llist_del_all can be traversed with 34 * The list entries deleted via llist_del_all can be traversed with
30 * traversing function such as llist_for_each etc. But the list 35 * traversing function such as llist_for_each etc. But the list
diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d91e6a..ef3d4f67118c 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
203 * ... and so on. 203 * ... and so on.
204 */ 204 */
205 205
206#define order_base_2(n) ilog2(roundup_pow_of_two(n)) 206static inline __attribute_const__
207int __order_base_2(unsigned long n)
208{
209 return n > 1 ? ilog2(n - 1) + 1 : 0;
210}
207 211
212#define order_base_2(n) \
213( \
214 __builtin_constant_p(n) ? ( \
215 ((n) == 0 || (n) == 1) ? 0 : \
216 ilog2((n) - 1) + 1) : \
217 __order_base_2(n) \
218)
208#endif /* _LINUX_LOG2_H */ 219#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 558adfa5c8a8..e29d4c62a3c8 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -352,8 +352,7 @@
352 * Return 0 if permission is granted. 352 * Return 0 if permission is granted.
353 * @inode_getattr: 353 * @inode_getattr:
354 * Check permission before obtaining file attributes. 354 * Check permission before obtaining file attributes.
355 * @mnt is the vfsmount where the dentry was looked up 355 * @path contains the path structure for the file.
356 * @dentry contains the dentry structure for the file.
357 * Return 0 if permission is granted. 356 * Return 0 if permission is granted.
358 * @inode_setxattr: 357 * @inode_setxattr:
359 * Check permission before setting the extended attributes 358 * Check permission before setting the extended attributes
@@ -666,11 +665,6 @@
666 * @sig contains the signal value. 665 * @sig contains the signal value.
667 * @secid contains the sid of the process where the signal originated 666 * @secid contains the sid of the process where the signal originated
668 * Return 0 if permission is granted. 667 * Return 0 if permission is granted.
669 * @task_wait:
670 * Check permission before allowing a process to reap a child process @p
671 * and collect its status information.
672 * @p contains the task_struct for process.
673 * Return 0 if permission is granted.
674 * @task_prctl: 668 * @task_prctl:
675 * Check permission before performing a process control operation on the 669 * Check permission before performing a process control operation on the
676 * current process. 670 * current process.
@@ -1507,7 +1501,6 @@ union security_list_options {
1507 int (*task_movememory)(struct task_struct *p); 1501 int (*task_movememory)(struct task_struct *p);
1508 int (*task_kill)(struct task_struct *p, struct siginfo *info, 1502 int (*task_kill)(struct task_struct *p, struct siginfo *info,
1509 int sig, u32 secid); 1503 int sig, u32 secid);
1510 int (*task_wait)(struct task_struct *p);
1511 int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, 1504 int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
1512 unsigned long arg4, unsigned long arg5); 1505 unsigned long arg4, unsigned long arg5);
1513 void (*task_to_inode)(struct task_struct *p, struct inode *inode); 1506 void (*task_to_inode)(struct task_struct *p, struct inode *inode);
@@ -1547,8 +1540,7 @@ union security_list_options {
1547 void (*d_instantiate)(struct dentry *dentry, struct inode *inode); 1540 void (*d_instantiate)(struct dentry *dentry, struct inode *inode);
1548 1541
1549 int (*getprocattr)(struct task_struct *p, char *name, char **value); 1542 int (*getprocattr)(struct task_struct *p, char *name, char **value);
1550 int (*setprocattr)(struct task_struct *p, char *name, void *value, 1543 int (*setprocattr)(const char *name, void *value, size_t size);
1551 size_t size);
1552 int (*ismaclabel)(const char *name); 1544 int (*ismaclabel)(const char *name);
1553 int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); 1545 int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
1554 int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid); 1546 int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
@@ -1768,7 +1760,6 @@ struct security_hook_heads {
1768 struct list_head task_getscheduler; 1760 struct list_head task_getscheduler;
1769 struct list_head task_movememory; 1761 struct list_head task_movememory;
1770 struct list_head task_kill; 1762 struct list_head task_kill;
1771 struct list_head task_wait;
1772 struct list_head task_prctl; 1763 struct list_head task_prctl;
1773 struct list_head task_to_inode; 1764 struct list_head task_to_inode;
1774 struct list_head ipc_permission; 1765 struct list_head ipc_permission;
@@ -1876,6 +1867,7 @@ struct security_hook_list {
1876 struct list_head list; 1867 struct list_head list;
1877 struct list_head *head; 1868 struct list_head *head;
1878 union security_list_options hook; 1869 union security_list_options hook;
1870 char *lsm;
1879}; 1871};
1880 1872
1881/* 1873/*
@@ -1888,15 +1880,10 @@ struct security_hook_list {
1888 { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } } 1880 { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
1889 1881
1890extern struct security_hook_heads security_hook_heads; 1882extern struct security_hook_heads security_hook_heads;
1883extern char *lsm_names;
1891 1884
1892static inline void security_add_hooks(struct security_hook_list *hooks, 1885extern void security_add_hooks(struct security_hook_list *hooks, int count,
1893 int count) 1886 char *lsm);
1894{
1895 int i;
1896
1897 for (i = 0; i < count; i++)
1898 list_add_tail_rcu(&hooks[i].list, hooks[i].head);
1899}
1900 1887
1901#ifdef CONFIG_SECURITY_SELINUX_DISABLE 1888#ifdef CONFIG_SECURITY_SELINUX_DISABLE
1902/* 1889/*
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index a57f0dfb6db7..4055cf8cc978 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -17,8 +17,15 @@
17#define MARVELL_PHY_ID_88E1116R 0x01410e40 17#define MARVELL_PHY_ID_88E1116R 0x01410e40
18#define MARVELL_PHY_ID_88E1510 0x01410dd0 18#define MARVELL_PHY_ID_88E1510 0x01410dd0
19#define MARVELL_PHY_ID_88E1540 0x01410eb0 19#define MARVELL_PHY_ID_88E1540 0x01410eb0
20#define MARVELL_PHY_ID_88E1545 0x01410ea0
20#define MARVELL_PHY_ID_88E3016 0x01410e60 21#define MARVELL_PHY_ID_88E3016 0x01410e60
21 22
23/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
24 * not have a model ID. So the switch driver traps reads to the ID2
25 * register and returns the switch family ID
26 */
27#define MARVELL_PHY_ID_88E6390 0x01410f90
28
22/* struct phy_device dev_flags definitions */ 29/* struct phy_device dev_flags definitions */
23#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 30#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
24#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 31#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 6e8b5b270ffe..80690c96c734 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,6 +133,16 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
133 return ret; 133 return ret;
134} 134}
135 135
136#ifndef mul_u32_u32
137/*
138 * Many a GCC version messes this up and generates a 64x64 mult :-(
139 */
140static inline u64 mul_u32_u32(u32 a, u32 b)
141{
142 return (u64)a * b;
143}
144#endif
145
136#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) 146#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
137 147
138#ifndef mul_u64_u32_shr 148#ifndef mul_u64_u32_shr
@@ -160,9 +170,9 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
160 al = a; 170 al = a;
161 ah = a >> 32; 171 ah = a >> 32;
162 172
163 ret = ((u64)al * mul) >> shift; 173 ret = mul_u32_u32(al, mul) >> shift;
164 if (ah) 174 if (ah)
165 ret += ((u64)ah * mul) << (32 - shift); 175 ret += mul_u32_u32(ah, mul) << (32 - shift);
166 176
167 return ret; 177 return ret;
168} 178}
@@ -186,10 +196,10 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
186 a0.ll = a; 196 a0.ll = a;
187 b0.ll = b; 197 b0.ll = b;
188 198
189 rl.ll = (u64)a0.l.low * b0.l.low; 199 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
190 rm.ll = (u64)a0.l.low * b0.l.high; 200 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
191 rn.ll = (u64)a0.l.high * b0.l.low; 201 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
192 rh.ll = (u64)a0.l.high * b0.l.high; 202 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
193 203
194 /* 204 /*
195 * Each of these lines computes a 64-bit intermediate result into "c", 205 * Each of these lines computes a 64-bit intermediate result into "c",
@@ -229,8 +239,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
229 } u, rl, rh; 239 } u, rl, rh;
230 240
231 u.ll = a; 241 u.ll = a;
232 rl.ll = (u64)u.l.low * mul; 242 rl.ll = mul_u32_u32(u.l.low, mul);
233 rh.ll = (u64)u.l.high * mul + rl.l.high; 243 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
234 244
235 /* Bits 32-63 of the result will be in rh.l.low. */ 245 /* Bits 32-63 of the result will be in rh.l.low. */
236 rl.l.high = do_div(rh.ll, divisor); 246 rl.l.high = do_div(rh.ll, divisor);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index bf9d1d750693..ca08ab16ecdc 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -10,6 +10,7 @@
10#define __LINUX_MDIO_H__ 10#define __LINUX_MDIO_H__
11 11
12#include <uapi/linux/mdio.h> 12#include <uapi/linux/mdio.h>
13#include <linux/mod_devicetable.h>
13 14
14struct mii_bus; 15struct mii_bus;
15 16
@@ -29,6 +30,7 @@ struct mdio_device {
29 30
30 const struct dev_pm_ops *pm_ops; 31 const struct dev_pm_ops *pm_ops;
31 struct mii_bus *bus; 32 struct mii_bus *bus;
33 char modalias[MDIO_NAME_SIZE];
32 34
33 int (*bus_match)(struct device *dev, struct device_driver *drv); 35 int (*bus_match)(struct device *dev, struct device_driver *drv);
34 void (*device_free)(struct mdio_device *mdiodev); 36 void (*device_free)(struct mdio_device *mdiodev);
@@ -71,6 +73,7 @@ int mdio_device_register(struct mdio_device *mdiodev);
71void mdio_device_remove(struct mdio_device *mdiodev); 73void mdio_device_remove(struct mdio_device *mdiodev);
72int mdio_driver_register(struct mdio_driver *drv); 74int mdio_driver_register(struct mdio_driver *drv);
73void mdio_driver_unregister(struct mdio_driver *drv); 75void mdio_driver_unregister(struct mdio_driver *drv);
76int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
74 77
75static inline bool mdio_phy_id_is_c45(int phy_id) 78static inline bool mdio_phy_id_is_c45(int phy_id)
76{ 79{
@@ -130,6 +133,10 @@ extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
130extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, 133extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
131 struct ethtool_cmd *ecmd, 134 struct ethtool_cmd *ecmd,
132 u32 npage_adv, u32 npage_lpa); 135 u32 npage_adv, u32 npage_lpa);
136extern void
137mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
138 struct ethtool_link_ksettings *cmd,
139 u32 npage_adv, u32 npage_lpa);
133 140
134/** 141/**
135 * mdio45_ethtool_gset - get settings for ETHTOOL_GSET 142 * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
@@ -147,6 +154,23 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
147 mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); 154 mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
148} 155}
149 156
157/**
158 * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS
159 * @mdio: MDIO interface
160 * @cmd: Ethtool request structure
161 *
162 * Since the CSRs for auto-negotiation using next pages are not fully
163 * standardised, this function does not attempt to decode them. Use
164 * mdio45_ethtool_ksettings_get_npage() to specify advertisement bits
165 * from next pages.
166 */
167static inline void
168mdio45_ethtool_ksettings_get(const struct mdio_if_info *mdio,
169 struct ethtool_link_ksettings *cmd)
170{
171 mdio45_ethtool_ksettings_get_npage(mdio, cmd, 0, 0);
172}
173
150extern int mdio_mii_ioctl(const struct mdio_if_info *mdio, 174extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
151 struct mii_ioctl_data *mii_data, int cmd); 175 struct mii_ioctl_data *mii_data, int cmd);
152 176
@@ -244,7 +268,7 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr);
244struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr); 268struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
245 269
246/** 270/**
247 * module_mdio_driver() - Helper macro for registering mdio drivers 271 * mdio_module_driver() - Helper macro for registering mdio drivers
248 * 272 *
249 * Helper macro for MDIO drivers which do not do anything special in module 273 * Helper macro for MDIO drivers which do not do anything special in module
250 * init/exit. Each module may only use this macro once, and calling it 274 * init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 01033fadea47..134a2f69c21a 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
85extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 85extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
86/* VM interface that may be used by firmware interface */ 86/* VM interface that may be used by firmware interface */
87extern int online_pages(unsigned long, unsigned long, int); 87extern int online_pages(unsigned long, unsigned long, int);
88extern int test_pages_in_a_zone(unsigned long, unsigned long); 88extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
89 unsigned long *valid_start, unsigned long *valid_end);
89extern void __offline_isolated_pages(unsigned long, unsigned long); 90extern void __offline_isolated_pages(unsigned long, unsigned long);
90 91
91typedef void (*online_page_callback_t)(struct page *page); 92typedef void (*online_page_callback_t)(struct page *page);
@@ -284,7 +285,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
284 unsigned long map_offset); 285 unsigned long map_offset);
285extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 286extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
286 unsigned long pnum); 287 unsigned long pnum);
287extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, 288extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
288 enum zone_type target); 289 enum zone_type target, int *zone_shift);
289 290
290#endif /* __LINUX_MEMORY_HOTPLUG_H */ 291#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index a4860bc9b73d..f848ee86a339 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -13,7 +13,7 @@
13 13
14#include <linux/regmap.h> 14#include <linux/regmap.h>
15 15
16enum { 16enum axp20x_variants {
17 AXP152_ID = 0, 17 AXP152_ID = 0,
18 AXP202_ID, 18 AXP202_ID,
19 AXP209_ID, 19 AXP209_ID,
@@ -532,35 +532,6 @@ struct axp20x_dev {
532 const struct regmap_irq_chip *regmap_irq_chip; 532 const struct regmap_irq_chip *regmap_irq_chip;
533}; 533};
534 534
535#define BATTID_LEN 64
536#define OCV_CURVE_SIZE 32
537#define MAX_THERM_CURVE_SIZE 25
538#define PD_DEF_MIN_TEMP 0
539#define PD_DEF_MAX_TEMP 55
540
541struct axp20x_fg_pdata {
542 char battid[BATTID_LEN + 1];
543 int design_cap;
544 int min_volt;
545 int max_volt;
546 int max_temp;
547 int min_temp;
548 int cap1;
549 int cap0;
550 int rdc1;
551 int rdc0;
552 int ocv_curve[OCV_CURVE_SIZE];
553 int tcsz;
554 int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
555};
556
557struct axp20x_chrg_pdata {
558 int max_cc;
559 int max_cv;
560 int def_cc;
561 int def_cv;
562};
563
564struct axp288_extcon_pdata { 535struct axp288_extcon_pdata {
565 /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */ 536 /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
566 struct gpio_desc *gpio_mux_cntl; 537 struct gpio_desc *gpio_mux_cntl;
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 1683003603f3..098c3501ad2c 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -1441,7 +1441,8 @@ enum motionsensor_type {
1441 MOTIONSENSE_TYPE_PROX = 3, 1441 MOTIONSENSE_TYPE_PROX = 3,
1442 MOTIONSENSE_TYPE_LIGHT = 4, 1442 MOTIONSENSE_TYPE_LIGHT = 4,
1443 MOTIONSENSE_TYPE_ACTIVITY = 5, 1443 MOTIONSENSE_TYPE_ACTIVITY = 5,
1444 MOTIONSENSE_TYPE_MAX 1444 MOTIONSENSE_TYPE_BARO = 6,
1445 MOTIONSENSE_TYPE_MAX,
1445}; 1446};
1446 1447
1447/* List of motion sensor locations. */ 1448/* List of motion sensor locations. */
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 2b300b44f994..fba8fcb54f8c 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,6 +20,8 @@
20#ifndef LPC_ICH_H 20#ifndef LPC_ICH_H
21#define LPC_ICH_H 21#define LPC_ICH_H
22 22
23#include <linux/platform_data/intel-spi.h>
24
23/* GPIO resources */ 25/* GPIO resources */
24#define ICH_RES_GPIO 0 26#define ICH_RES_GPIO 0
25#define ICH_RES_GPE0 1 27#define ICH_RES_GPE0 1
@@ -40,6 +42,7 @@ struct lpc_ich_info {
40 char name[32]; 42 char name[32];
41 unsigned int iTCO_version; 43 unsigned int iTCO_version;
42 unsigned int gpio_version; 44 unsigned int gpio_version;
45 enum intel_spi_type spi_type;
43 u8 use_gpio; 46 u8 use_gpio;
44}; 47};
45 48
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
new file mode 100644
index 000000000000..d0300045f04a
--- /dev/null
+++ b/include/linux/mfd/stm32-timers.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright (C) STMicroelectronics 2016
3 *
4 * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
5 *
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef _LINUX_STM32_GPTIMER_H_
10#define _LINUX_STM32_GPTIMER_H_
11
12#include <linux/clk.h>
13#include <linux/regmap.h>
14
15#define TIM_CR1 0x00 /* Control Register 1 */
16#define TIM_CR2 0x04 /* Control Register 2 */
17#define TIM_SMCR 0x08 /* Slave mode control reg */
18#define TIM_DIER 0x0C /* DMA/interrupt register */
19#define TIM_SR 0x10 /* Status register */
20#define TIM_EGR 0x14 /* Event Generation Reg */
21#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
22#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
23#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
24#define TIM_PSC 0x28 /* Prescaler */
25#define TIM_ARR 0x2c /* Auto-Reload Register */
26#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */
27#define TIM_CCR2 0x38 /* Capt/Comp Register 2 */
28#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */
29#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */
30#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
31
32#define TIM_CR1_CEN BIT(0) /* Counter Enable */
33#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
34#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
35#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
36#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
37#define TIM_DIER_UIE BIT(0) /* Update interrupt */
38#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
39#define TIM_EGR_UG BIT(0) /* Update Generation */
40#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
41#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
42#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */
43#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */
44#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */
45#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */
46#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */
47#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */
48#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */
49#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
50#define TIM_BDTR_BKE BIT(12) /* Break input enable */
51#define TIM_BDTR_BKP BIT(13) /* Break input polarity */
52#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
53#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
54#define TIM_BDTR_BKF (BIT(16) | BIT(17) | BIT(18) | BIT(19))
55#define TIM_BDTR_BK2F (BIT(20) | BIT(21) | BIT(22) | BIT(23))
56#define TIM_BDTR_BK2E BIT(24) /* Break 2 input enable */
57#define TIM_BDTR_BK2P BIT(25) /* Break 2 input polarity */
58
59#define MAX_TIM_PSC 0xFFFF
60#define TIM_CR2_MMS_SHIFT 4
61#define TIM_SMCR_TS_SHIFT 4
62#define TIM_BDTR_BKF_MASK 0xF
63#define TIM_BDTR_BKF_SHIFT 16
64#define TIM_BDTR_BK2F_SHIFT 20
65
66struct stm32_timers {
67 struct clk *clk;
68 struct regmap *regmap;
69 u32 max_arr;
70};
71#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index fba44abd05ba..a1520d88ebf3 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -94,10 +94,8 @@
94 */ 94 */
95#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7) 95#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7)
96 96
97/* 97/* Controller has some SDIO status bits which must be 1 */
98 * Some controllers needs to set 1 on SDIO status reserved bits 98#define TMIO_MMC_SDIO_STATUS_SETBITS (1 << 8)
99 */
100#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
101 99
102/* 100/*
103 * Some controllers have a 32-bit wide data port register 101 * Some controllers have a 32-bit wide data port register
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e0095e..f541da68d1e7 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
35#define PHY_ID_KSZ886X 0x00221430 35#define PHY_ID_KSZ886X 0x00221430
36#define PHY_ID_KSZ8863 0x00221435 36#define PHY_ID_KSZ8863 0x00221435
37 37
38#define PHY_ID_KSZ8795 0x00221550
39
38/* struct phy_device dev_flags definitions */ 40/* struct phy_device dev_flags definitions */
39#define MICREL_PHY_50MHZ_CLK 0x00000001 41#define MICREL_PHY_50MHZ_CLK 0x00000001
40#define MICREL_PHY_FXEN 0x00000002 42#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index ed30d5d713e3..0590263c462c 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -22,6 +22,7 @@
22/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ 22/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
23#define WATCHDOG_MINOR 130 /* Watchdog timer */ 23#define WATCHDOG_MINOR 130 /* Watchdog timer */
24#define TEMP_MINOR 131 /* Temperature Sensor */ 24#define TEMP_MINOR 131 /* Temperature Sensor */
25#define APM_MINOR_DEV 134
25#define RTC_MINOR 135 26#define RTC_MINOR 135
26#define EFI_RTC_MINOR 136 /* EFI Time services */ 27#define EFI_RTC_MINOR 136 /* EFI Time services */
27#define VHCI_MINOR 137 28#define VHCI_MINOR 137
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6533c16e27ad..7e66e4f62858 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1374,6 +1374,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
1374int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); 1374int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
1375int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, 1375int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
1376 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); 1376 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
1377int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
1377int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, 1378int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1378 u8 promisc); 1379 u8 promisc);
1379int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time); 1380int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
@@ -1539,8 +1540,13 @@ enum mlx4_ptys_proto {
1539 MLX4_PTYS_EN = 1<<2, 1540 MLX4_PTYS_EN = 1<<2,
1540}; 1541};
1541 1542
1543enum mlx4_ptys_flags {
1544 MLX4_PTYS_AN_DISABLE_CAP = 1 << 5,
1545 MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6,
1546};
1547
1542struct mlx4_ptys_reg { 1548struct mlx4_ptys_reg {
1543 u8 resrvd1; 1549 u8 flags;
1544 u8 local_port; 1550 u8 local_port;
1545 u8 resrvd2; 1551 u8 resrvd2;
1546 u8 proto_mask; 1552 u8 proto_mask;
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 7c3c0d3aca37..95898847c7d4 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -42,13 +42,13 @@ struct mlx5_core_cq {
42 int cqe_sz; 42 int cqe_sz;
43 __be32 *set_ci_db; 43 __be32 *set_ci_db;
44 __be32 *arm_db; 44 __be32 *arm_db;
45 struct mlx5_uars_page *uar;
45 atomic_t refcount; 46 atomic_t refcount;
46 struct completion free; 47 struct completion free;
47 unsigned vector; 48 unsigned vector;
48 unsigned int irqn; 49 unsigned int irqn;
49 void (*comp) (struct mlx5_core_cq *); 50 void (*comp) (struct mlx5_core_cq *);
50 void (*event) (struct mlx5_core_cq *, enum mlx5_event); 51 void (*event) (struct mlx5_core_cq *, enum mlx5_event);
51 struct mlx5_uar *uar;
52 u32 cons_index; 52 u32 cons_index;
53 unsigned arm_sn; 53 unsigned arm_sn;
54 struct mlx5_rsc_debug *dbg; 54 struct mlx5_rsc_debug *dbg;
@@ -144,7 +144,6 @@ enum {
144 144
145static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, 145static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
146 void __iomem *uar_page, 146 void __iomem *uar_page,
147 spinlock_t *doorbell_lock,
148 u32 cons_index) 147 u32 cons_index)
149{ 148{
150 __be32 doorbell[2]; 149 __be32 doorbell[2];
@@ -164,7 +163,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
164 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); 163 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
165 doorbell[1] = cpu_to_be32(cq->cqn); 164 doorbell[1] = cpu_to_be32(cq->cqn);
166 165
167 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock); 166 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
168} 167}
169 168
170int mlx5_init_cq_table(struct mlx5_core_dev *dev); 169int mlx5_init_cq_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 52b437431c6a..dd9a263ed368 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -67,10 +67,11 @@
67 67
68/* insert a value to a struct */ 68/* insert a value to a struct */
69#define MLX5_SET(typ, p, fld, v) do { \ 69#define MLX5_SET(typ, p, fld, v) do { \
70 u32 _v = v; \
70 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 71 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
71 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 72 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
72 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 73 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
73 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ 74 (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
74 << __mlx5_dw_bit_off(typ, fld))); \ 75 << __mlx5_dw_bit_off(typ, fld))); \
75} while (0) 76} while (0)
76 77
@@ -212,10 +213,20 @@ enum {
212}; 213};
213 214
214enum { 215enum {
215 MLX5_BF_REGS_PER_PAGE = 4, 216 MLX5_ADAPTER_PAGE_SHIFT = 12,
216 MLX5_MAX_UAR_PAGES = 1 << 8, 217 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
217 MLX5_NON_FP_BF_REGS_PER_PAGE = 2, 218};
218 MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE, 219
220enum {
221 MLX5_BFREGS_PER_UAR = 4,
222 MLX5_MAX_UARS = 1 << 8,
223 MLX5_NON_FP_BFREGS_PER_UAR = 2,
224 MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR -
225 MLX5_NON_FP_BFREGS_PER_UAR,
226 MLX5_MAX_BFREGS = MLX5_MAX_UARS *
227 MLX5_NON_FP_BFREGS_PER_UAR,
228 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
229 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
219}; 230};
220 231
221enum { 232enum {
@@ -279,6 +290,7 @@ enum mlx5_event {
279 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, 290 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
280 MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, 291 MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
281 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, 292 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
293 MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
282 294
283 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, 295 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
284 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, 296 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
@@ -389,11 +401,6 @@ enum {
389}; 401};
390 402
391enum { 403enum {
392 MLX5_ADAPTER_PAGE_SHIFT = 12,
393 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
394};
395
396enum {
397 MLX5_CAP_OFF_CMDIF_CSUM = 46, 404 MLX5_CAP_OFF_CMDIF_CSUM = 46,
398}; 405};
399 406
@@ -534,7 +541,9 @@ struct mlx5_eqe_page_fault {
534 __be16 wqe_index; 541 __be16 wqe_index;
535 u16 reserved2; 542 u16 reserved2;
536 __be16 packet_length; 543 __be16 packet_length;
537 u8 reserved3[12]; 544 __be32 token;
545 u8 reserved4[8];
546 __be32 pftype_wq;
538 } __packed wqe; 547 } __packed wqe;
539 struct { 548 struct {
540 __be32 r_key; 549 __be32 r_key;
@@ -542,9 +551,9 @@ struct mlx5_eqe_page_fault {
542 __be16 packet_length; 551 __be16 packet_length;
543 __be32 rdma_op_len; 552 __be32 rdma_op_len;
544 __be64 rdma_va; 553 __be64 rdma_va;
554 __be32 pftype_token;
545 } __packed rdma; 555 } __packed rdma;
546 } __packed; 556 } __packed;
547 __be32 flags_qpn;
548} __packed; 557} __packed;
549 558
550struct mlx5_eqe_vport_change { 559struct mlx5_eqe_vport_change {
@@ -562,6 +571,22 @@ struct mlx5_eqe_port_module {
562 u8 error_type; 571 u8 error_type;
563} __packed; 572} __packed;
564 573
574struct mlx5_eqe_pps {
575 u8 rsvd0[3];
576 u8 pin;
577 u8 rsvd1[4];
578 union {
579 struct {
580 __be32 time_sec;
581 __be32 time_nsec;
582 };
583 struct {
584 __be64 time_stamp;
585 };
586 };
587 u8 rsvd2[12];
588} __packed;
589
565union ev_data { 590union ev_data {
566 __be32 raw[7]; 591 __be32 raw[7];
567 struct mlx5_eqe_cmd cmd; 592 struct mlx5_eqe_cmd cmd;
@@ -576,6 +601,7 @@ union ev_data {
576 struct mlx5_eqe_page_fault page_fault; 601 struct mlx5_eqe_page_fault page_fault;
577 struct mlx5_eqe_vport_change vport_change; 602 struct mlx5_eqe_vport_change vport_change;
578 struct mlx5_eqe_port_module port_module; 603 struct mlx5_eqe_port_module port_module;
604 struct mlx5_eqe_pps pps;
579} __packed; 605} __packed;
580 606
581struct mlx5_eqe { 607struct mlx5_eqe {
@@ -945,38 +971,54 @@ enum mlx5_cap_type {
945 MLX5_CAP_NUM 971 MLX5_CAP_NUM
946}; 972};
947 973
974enum mlx5_pcam_reg_groups {
975 MLX5_PCAM_REGS_5000_TO_507F = 0x0,
976};
977
978enum mlx5_pcam_feature_groups {
979 MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
980};
981
982enum mlx5_mcam_reg_groups {
983 MLX5_MCAM_REGS_FIRST_128 = 0x0,
984};
985
986enum mlx5_mcam_feature_groups {
987 MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
988};
989
948/* GET Dev Caps macros */ 990/* GET Dev Caps macros */
949#define MLX5_CAP_GEN(mdev, cap) \ 991#define MLX5_CAP_GEN(mdev, cap) \
950 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) 992 MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
951 993
952#define MLX5_CAP_GEN_MAX(mdev, cap) \ 994#define MLX5_CAP_GEN_MAX(mdev, cap) \
953 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) 995 MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
954 996
955#define MLX5_CAP_ETH(mdev, cap) \ 997#define MLX5_CAP_ETH(mdev, cap) \
956 MLX5_GET(per_protocol_networking_offload_caps,\ 998 MLX5_GET(per_protocol_networking_offload_caps,\
957 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) 999 mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
958 1000
959#define MLX5_CAP_ETH_MAX(mdev, cap) \ 1001#define MLX5_CAP_ETH_MAX(mdev, cap) \
960 MLX5_GET(per_protocol_networking_offload_caps,\ 1002 MLX5_GET(per_protocol_networking_offload_caps,\
961 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1003 mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
962 1004
963#define MLX5_CAP_ROCE(mdev, cap) \ 1005#define MLX5_CAP_ROCE(mdev, cap) \
964 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) 1006 MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
965 1007
966#define MLX5_CAP_ROCE_MAX(mdev, cap) \ 1008#define MLX5_CAP_ROCE_MAX(mdev, cap) \
967 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) 1009 MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
968 1010
969#define MLX5_CAP_ATOMIC(mdev, cap) \ 1011#define MLX5_CAP_ATOMIC(mdev, cap) \
970 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) 1012 MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
971 1013
972#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ 1014#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
973 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) 1015 MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
974 1016
975#define MLX5_CAP_FLOWTABLE(mdev, cap) \ 1017#define MLX5_CAP_FLOWTABLE(mdev, cap) \
976 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) 1018 MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
977 1019
978#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1020#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
979 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) 1021 MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
980 1022
981#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ 1023#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
982 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) 1024 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
@@ -998,11 +1040,11 @@ enum mlx5_cap_type {
998 1040
999#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 1041#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1000 MLX5_GET(flow_table_eswitch_cap, \ 1042 MLX5_GET(flow_table_eswitch_cap, \
1001 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1043 mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1002 1044
1003#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ 1045#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1004 MLX5_GET(flow_table_eswitch_cap, \ 1046 MLX5_GET(flow_table_eswitch_cap, \
1005 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1047 mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1006 1048
1007#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ 1049#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1008 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) 1050 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
@@ -1024,21 +1066,27 @@ enum mlx5_cap_type {
1024 1066
1025#define MLX5_CAP_ESW(mdev, cap) \ 1067#define MLX5_CAP_ESW(mdev, cap) \
1026 MLX5_GET(e_switch_cap, \ 1068 MLX5_GET(e_switch_cap, \
1027 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) 1069 mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
1028 1070
1029#define MLX5_CAP_ESW_MAX(mdev, cap) \ 1071#define MLX5_CAP_ESW_MAX(mdev, cap) \
1030 MLX5_GET(e_switch_cap, \ 1072 MLX5_GET(e_switch_cap, \
1031 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) 1073 mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
1032 1074
1033#define MLX5_CAP_ODP(mdev, cap)\ 1075#define MLX5_CAP_ODP(mdev, cap)\
1034 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) 1076 MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
1035 1077
1036#define MLX5_CAP_VECTOR_CALC(mdev, cap) \ 1078#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1037 MLX5_GET(vector_calc_cap, \ 1079 MLX5_GET(vector_calc_cap, \
1038 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) 1080 mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
1039 1081
1040#define MLX5_CAP_QOS(mdev, cap)\ 1082#define MLX5_CAP_QOS(mdev, cap)\
1041 MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap) 1083 MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
1084
1085#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1086 MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1087
1088#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1089 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1042 1090
1043enum { 1091enum {
1044 MLX5_CMD_STAT_OK = 0x0, 1092 MLX5_CMD_STAT_OK = 0x0,
@@ -1068,9 +1116,14 @@ enum {
1068 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, 1116 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1069 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, 1117 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1070 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, 1118 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
1119 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1071 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, 1120 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1072}; 1121};
1073 1122
1123enum {
1124 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
1125};
1126
1074static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1127static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1075{ 1128{
1076 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1129 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index afc78a3f4462..0787de28f2fc 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -68,10 +68,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
68{ 68{
69 unsigned long flags; 69 unsigned long flags;
70 70
71 spin_lock_irqsave(doorbell_lock, flags); 71 if (doorbell_lock)
72 spin_lock_irqsave(doorbell_lock, flags);
72 __raw_writel((__force u32) val[0], dest); 73 __raw_writel((__force u32) val[0], dest);
73 __raw_writel((__force u32) val[1], dest + 4); 74 __raw_writel((__force u32) val[1], dest + 4);
74 spin_unlock_irqrestore(doorbell_lock, flags); 75 if (doorbell_lock)
76 spin_unlock_irqrestore(doorbell_lock, flags);
75} 77}
76 78
77#endif 79#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 735b36335f29..1bc4641734da 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -42,6 +42,7 @@
42#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
43#include <linux/radix-tree.h> 43#include <linux/radix-tree.h>
44#include <linux/workqueue.h> 44#include <linux/workqueue.h>
45#include <linux/mempool.h>
45#include <linux/interrupt.h> 46#include <linux/interrupt.h>
46 47
47#include <linux/mlx5/device.h> 48#include <linux/mlx5/device.h>
@@ -83,6 +84,7 @@ enum {
83 MLX5_EQ_VEC_PAGES = 0, 84 MLX5_EQ_VEC_PAGES = 0,
84 MLX5_EQ_VEC_CMD = 1, 85 MLX5_EQ_VEC_CMD = 1,
85 MLX5_EQ_VEC_ASYNC = 2, 86 MLX5_EQ_VEC_ASYNC = 2,
87 MLX5_EQ_VEC_PFAULT = 3,
86 MLX5_EQ_VEC_COMP_BASE, 88 MLX5_EQ_VEC_COMP_BASE,
87}; 89};
88 90
@@ -119,10 +121,15 @@ enum {
119 MLX5_REG_PVLC = 0x500f, 121 MLX5_REG_PVLC = 0x500f,
120 MLX5_REG_PCMR = 0x5041, 122 MLX5_REG_PCMR = 0x5041,
121 MLX5_REG_PMLP = 0x5002, 123 MLX5_REG_PMLP = 0x5002,
124 MLX5_REG_PCAM = 0x507f,
122 MLX5_REG_NODE_DESC = 0x6001, 125 MLX5_REG_NODE_DESC = 0x6001,
123 MLX5_REG_HOST_ENDIANNESS = 0x7004, 126 MLX5_REG_HOST_ENDIANNESS = 0x7004,
124 MLX5_REG_MCIA = 0x9014, 127 MLX5_REG_MCIA = 0x9014,
125 MLX5_REG_MLCR = 0x902b, 128 MLX5_REG_MLCR = 0x902b,
129 MLX5_REG_MPCNT = 0x9051,
130 MLX5_REG_MTPPS = 0x9053,
131 MLX5_REG_MTPPSE = 0x9054,
132 MLX5_REG_MCAM = 0x907f,
126}; 133};
127 134
128enum mlx5_dcbx_oper_mode { 135enum mlx5_dcbx_oper_mode {
@@ -170,6 +177,7 @@ enum mlx5_dev_event {
170 MLX5_DEV_EVENT_PKEY_CHANGE, 177 MLX5_DEV_EVENT_PKEY_CHANGE,
171 MLX5_DEV_EVENT_GUID_CHANGE, 178 MLX5_DEV_EVENT_GUID_CHANGE,
172 MLX5_DEV_EVENT_CLIENT_REREG, 179 MLX5_DEV_EVENT_CLIENT_REREG,
180 MLX5_DEV_EVENT_PPS,
173}; 181};
174 182
175enum mlx5_port_status { 183enum mlx5_port_status {
@@ -177,36 +185,26 @@ enum mlx5_port_status {
177 MLX5_PORT_DOWN = 2, 185 MLX5_PORT_DOWN = 2,
178}; 186};
179 187
180struct mlx5_uuar_info { 188enum mlx5_eq_type {
181 struct mlx5_uar *uars; 189 MLX5_EQ_TYPE_COMP,
182 int num_uars; 190 MLX5_EQ_TYPE_ASYNC,
183 int num_low_latency_uuars; 191#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
184 unsigned long *bitmap; 192 MLX5_EQ_TYPE_PF,
193#endif
194};
195
196struct mlx5_bfreg_info {
197 u32 *sys_pages;
198 int num_low_latency_bfregs;
185 unsigned int *count; 199 unsigned int *count;
186 struct mlx5_bf *bfs;
187 200
188 /* 201 /*
189 * protect uuar allocation data structs 202 * protect bfreg allocation data structs
190 */ 203 */
191 struct mutex lock; 204 struct mutex lock;
192 u32 ver; 205 u32 ver;
193}; 206 bool lib_uar_4k;
194 207 u32 num_sys_pages;
195struct mlx5_bf {
196 void __iomem *reg;
197 void __iomem *regreg;
198 int buf_size;
199 struct mlx5_uar *uar;
200 unsigned long offset;
201 int need_lock;
202 /* protect blue flame buffer selection when needed
203 */
204 spinlock_t lock;
205
206 /* serialize 64 bit writes when done as two 32 bit accesses
207 */
208 spinlock_t lock32;
209 int uuarn;
210}; 208};
211 209
212struct mlx5_cmd_first { 210struct mlx5_cmd_first {
@@ -332,6 +330,14 @@ struct mlx5_eq_tasklet {
332 spinlock_t lock; 330 spinlock_t lock;
333}; 331};
334 332
333struct mlx5_eq_pagefault {
334 struct work_struct work;
335 /* Pagefaults lock */
336 spinlock_t lock;
337 struct workqueue_struct *wq;
338 mempool_t *pool;
339};
340
335struct mlx5_eq { 341struct mlx5_eq {
336 struct mlx5_core_dev *dev; 342 struct mlx5_core_dev *dev;
337 __be32 __iomem *doorbell; 343 __be32 __iomem *doorbell;
@@ -345,7 +351,13 @@ struct mlx5_eq {
345 struct list_head list; 351 struct list_head list;
346 int index; 352 int index;
347 struct mlx5_rsc_debug *dbg; 353 struct mlx5_rsc_debug *dbg;
348 struct mlx5_eq_tasklet tasklet_ctx; 354 enum mlx5_eq_type type;
355 union {
356 struct mlx5_eq_tasklet tasklet_ctx;
357#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
358 struct mlx5_eq_pagefault pf_ctx;
359#endif
360 };
349}; 361};
350 362
351struct mlx5_core_psv { 363struct mlx5_core_psv {
@@ -369,13 +381,21 @@ struct mlx5_core_sig_ctx {
369 u32 sigerr_count; 381 u32 sigerr_count;
370}; 382};
371 383
384enum {
385 MLX5_MKEY_MR = 1,
386 MLX5_MKEY_MW,
387};
388
372struct mlx5_core_mkey { 389struct mlx5_core_mkey {
373 u64 iova; 390 u64 iova;
374 u64 size; 391 u64 size;
375 u32 key; 392 u32 key;
376 u32 pd; 393 u32 pd;
394 u32 type;
377}; 395};
378 396
397#define MLX5_24BIT_MASK ((1 << 24) - 1)
398
379enum mlx5_res_type { 399enum mlx5_res_type {
380 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, 400 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
381 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, 401 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
@@ -410,20 +430,47 @@ struct mlx5_eq_table {
410 struct mlx5_eq pages_eq; 430 struct mlx5_eq pages_eq;
411 struct mlx5_eq async_eq; 431 struct mlx5_eq async_eq;
412 struct mlx5_eq cmd_eq; 432 struct mlx5_eq cmd_eq;
433#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
434 struct mlx5_eq pfault_eq;
435#endif
413 int num_comp_vectors; 436 int num_comp_vectors;
414 /* protect EQs list 437 /* protect EQs list
415 */ 438 */
416 spinlock_t lock; 439 spinlock_t lock;
417}; 440};
418 441
419struct mlx5_uar { 442struct mlx5_uars_page {
420 u32 index;
421 struct list_head bf_list;
422 unsigned free_bf_bmap;
423 void __iomem *bf_map;
424 void __iomem *map; 443 void __iomem *map;
444 bool wc;
445 u32 index;
446 struct list_head list;
447 unsigned int bfregs;
448 unsigned long *reg_bitmap; /* for non fast path bf regs */
449 unsigned long *fp_bitmap;
450 unsigned int reg_avail;
451 unsigned int fp_avail;
452 struct kref ref_count;
453 struct mlx5_core_dev *mdev;
454};
455
456struct mlx5_bfreg_head {
457 /* protect blue flame registers allocations */
458 struct mutex lock;
459 struct list_head list;
460};
461
462struct mlx5_bfreg_data {
463 struct mlx5_bfreg_head reg_head;
464 struct mlx5_bfreg_head wc_head;
425}; 465};
426 466
467struct mlx5_sq_bfreg {
468 void __iomem *map;
469 struct mlx5_uars_page *up;
470 bool wc;
471 u32 index;
472 unsigned int offset;
473};
427 474
428struct mlx5_core_health { 475struct mlx5_core_health {
429 struct health_buffer __iomem *health; 476 struct health_buffer __iomem *health;
@@ -496,6 +543,7 @@ struct mlx5_fc_stats {
496 543
497struct mlx5_eswitch; 544struct mlx5_eswitch;
498struct mlx5_lag; 545struct mlx5_lag;
546struct mlx5_pagefault;
499 547
500struct mlx5_rl_entry { 548struct mlx5_rl_entry {
501 u32 rate; 549 u32 rate;
@@ -542,8 +590,6 @@ struct mlx5_priv {
542 struct mlx5_eq_table eq_table; 590 struct mlx5_eq_table eq_table;
543 struct msix_entry *msix_arr; 591 struct msix_entry *msix_arr;
544 struct mlx5_irq_info *irq_info; 592 struct mlx5_irq_info *irq_info;
545 struct mlx5_uuar_info uuari;
546 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
547 593
548 /* pages stuff */ 594 /* pages stuff */
549 struct workqueue_struct *pg_wq; 595 struct workqueue_struct *pg_wq;
@@ -600,6 +646,16 @@ struct mlx5_priv {
600 struct mlx5_rl_table rl_table; 646 struct mlx5_rl_table rl_table;
601 647
602 struct mlx5_port_module_event_stats pme_stats; 648 struct mlx5_port_module_event_stats pme_stats;
649
650#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
651 void (*pfault)(struct mlx5_core_dev *dev,
652 void *context,
653 struct mlx5_pagefault *pfault);
654 void *pfault_ctx;
655 struct srcu_struct pfault_srcu;
656#endif
657 struct mlx5_bfreg_data bfregs;
658 struct mlx5_uars_page *uar;
603}; 659};
604 660
605enum mlx5_device_state { 661enum mlx5_device_state {
@@ -618,13 +674,56 @@ enum mlx5_pci_status {
618 MLX5_PCI_STATUS_ENABLED, 674 MLX5_PCI_STATUS_ENABLED,
619}; 675};
620 676
677enum mlx5_pagefault_type_flags {
678 MLX5_PFAULT_REQUESTOR = 1 << 0,
679 MLX5_PFAULT_WRITE = 1 << 1,
680 MLX5_PFAULT_RDMA = 1 << 2,
681};
682
683/* Contains the details of a pagefault. */
684struct mlx5_pagefault {
685 u32 bytes_committed;
686 u32 token;
687 u8 event_subtype;
688 u8 type;
689 union {
690 /* Initiator or send message responder pagefault details. */
691 struct {
692 /* Received packet size, only valid for responders. */
693 u32 packet_size;
694 /*
695 * Number of resource holding WQE, depends on type.
696 */
697 u32 wq_num;
698 /*
699 * WQE index. Refers to either the send queue or
700 * receive queue, according to event_subtype.
701 */
702 u16 wqe_index;
703 } wqe;
704 /* RDMA responder pagefault details */
705 struct {
706 u32 r_key;
707 /*
708 * Received packet size, minimal size page fault
709 * resolution required for forward progress.
710 */
711 u32 packet_size;
712 u32 rdma_op_len;
713 u64 rdma_va;
714 } rdma;
715 };
716
717 struct mlx5_eq *eq;
718 struct work_struct work;
719};
720
621struct mlx5_td { 721struct mlx5_td {
622 struct list_head tirs_list; 722 struct list_head tirs_list;
623 u32 tdn; 723 u32 tdn;
624}; 724};
625 725
626struct mlx5e_resources { 726struct mlx5e_resources {
627 struct mlx5_uar cq_uar;
628 u32 pdn; 727 u32 pdn;
629 struct mlx5_td td; 728 struct mlx5_td td;
630 struct mlx5_core_mkey mkey; 729 struct mlx5_core_mkey mkey;
@@ -639,8 +738,12 @@ struct mlx5_core_dev {
639 char board_id[MLX5_BOARD_ID_LEN]; 738 char board_id[MLX5_BOARD_ID_LEN];
640 struct mlx5_cmd cmd; 739 struct mlx5_cmd cmd;
641 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; 740 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
642 u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 741 struct {
643 u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 742 u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
743 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
744 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
745 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
746 } caps;
644 phys_addr_t iseg_base; 747 phys_addr_t iseg_base;
645 struct mlx5_init_seg __iomem *iseg; 748 struct mlx5_init_seg __iomem *iseg;
646 enum mlx5_device_state state; 749 enum mlx5_device_state state;
@@ -814,11 +917,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
814int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); 917int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
815int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); 918int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
816int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 919int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
817int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
818int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
819int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
820 bool map_wc);
821void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
822void mlx5_health_cleanup(struct mlx5_core_dev *dev); 920void mlx5_health_cleanup(struct mlx5_core_dev *dev);
823int mlx5_health_init(struct mlx5_core_dev *dev); 921int mlx5_health_init(struct mlx5_core_dev *dev);
824void mlx5_start_health_poll(struct mlx5_core_dev *dev); 922void mlx5_start_health_poll(struct mlx5_core_dev *dev);
@@ -878,15 +976,13 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
878void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); 976void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
879void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); 977void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
880void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); 978void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
881#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
882void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
883#endif
884void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); 979void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
885struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); 980struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
886void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); 981void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
887void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); 982void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
888int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 983int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
889 int nent, u64 mask, const char *name, struct mlx5_uar *uar); 984 int nent, u64 mask, const char *name,
985 enum mlx5_eq_type type);
890int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 986int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
891int mlx5_start_eqs(struct mlx5_core_dev *dev); 987int mlx5_start_eqs(struct mlx5_core_dev *dev);
892int mlx5_stop_eqs(struct mlx5_core_dev *dev); 988int mlx5_stop_eqs(struct mlx5_core_dev *dev);
@@ -925,12 +1021,19 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
925 struct mlx5_odp_caps *odp_caps); 1021 struct mlx5_odp_caps *odp_caps);
926int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, 1022int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
927 u8 port_num, void *out, size_t sz); 1023 u8 port_num, void *out, size_t sz);
1024#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1025int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
1026 u32 wq_num, u8 type, int error);
1027#endif
928 1028
929int mlx5_init_rl_table(struct mlx5_core_dev *dev); 1029int mlx5_init_rl_table(struct mlx5_core_dev *dev);
930void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); 1030void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
931int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); 1031int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
932void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); 1032void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
933bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); 1033bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1034int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1035 bool map_wc, bool fast_path);
1036void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
934 1037
935static inline int fw_initializing(struct mlx5_core_dev *dev) 1038static inline int fw_initializing(struct mlx5_core_dev *dev)
936{ 1039{
@@ -958,7 +1061,7 @@ enum {
958}; 1061};
959 1062
960enum { 1063enum {
961 MAX_MR_CACHE_ENTRIES = 16, 1064 MAX_MR_CACHE_ENTRIES = 21,
962}; 1065};
963 1066
964enum { 1067enum {
@@ -973,6 +1076,9 @@ struct mlx5_interface {
973 void (*detach)(struct mlx5_core_dev *dev, void *context); 1076 void (*detach)(struct mlx5_core_dev *dev, void *context);
974 void (*event)(struct mlx5_core_dev *dev, void *context, 1077 void (*event)(struct mlx5_core_dev *dev, void *context,
975 enum mlx5_dev_event event, unsigned long param); 1078 enum mlx5_dev_event event, unsigned long param);
1079 void (*pfault)(struct mlx5_core_dev *dev,
1080 void *context,
1081 struct mlx5_pagefault *pfault);
976 void * (*get_dev)(void *context); 1082 void * (*get_dev)(void *context);
977 int protocol; 1083 int protocol;
978 struct list_head list; 1084 struct list_head list;
@@ -987,6 +1093,8 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
987int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); 1093int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
988bool mlx5_lag_is_active(struct mlx5_core_dev *dev); 1094bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
989struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); 1095struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1096struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1097void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
990 1098
991struct mlx5_profile { 1099struct mlx5_profile {
992 u64 mask; 1100 u64 mask;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a852e9db6f0d..afcd4736d8df 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -328,7 +328,7 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
328 u8 receive[0x1]; 328 u8 receive[0x1];
329 u8 write[0x1]; 329 u8 write[0x1];
330 u8 read[0x1]; 330 u8 read[0x1];
331 u8 reserved_at_4[0x1]; 331 u8 atomic[0x1];
332 u8 srq_receive[0x1]; 332 u8 srq_receive[0x1];
333 u8 reserved_at_6[0x1a]; 333 u8 reserved_at_6[0x1a];
334}; 334};
@@ -365,8 +365,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
365 u8 ip_protocol[0x8]; 365 u8 ip_protocol[0x8];
366 u8 ip_dscp[0x6]; 366 u8 ip_dscp[0x6];
367 u8 ip_ecn[0x2]; 367 u8 ip_ecn[0x2];
368 u8 vlan_tag[0x1]; 368 u8 cvlan_tag[0x1];
369 u8 reserved_at_91[0x1]; 369 u8 svlan_tag[0x1];
370 u8 frag[0x1]; 370 u8 frag[0x1];
371 u8 reserved_at_93[0x4]; 371 u8 reserved_at_93[0x4];
372 u8 tcp_flags[0x9]; 372 u8 tcp_flags[0x9];
@@ -398,9 +398,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
398 u8 inner_second_cfi[0x1]; 398 u8 inner_second_cfi[0x1];
399 u8 inner_second_vid[0xc]; 399 u8 inner_second_vid[0xc];
400 400
401 u8 outer_second_vlan_tag[0x1]; 401 u8 outer_second_cvlan_tag[0x1];
402 u8 inner_second_vlan_tag[0x1]; 402 u8 inner_second_cvlan_tag[0x1];
403 u8 reserved_at_62[0xe]; 403 u8 outer_second_svlan_tag[0x1];
404 u8 inner_second_svlan_tag[0x1];
405 u8 reserved_at_64[0xc];
404 u8 gre_protocol[0x10]; 406 u8 gre_protocol[0x10];
405 407
406 u8 gre_key_h[0x18]; 408 u8 gre_key_h[0x18];
@@ -545,7 +547,9 @@ struct mlx5_ifc_e_switch_cap_bits {
545struct mlx5_ifc_qos_cap_bits { 547struct mlx5_ifc_qos_cap_bits {
546 u8 packet_pacing[0x1]; 548 u8 packet_pacing[0x1];
547 u8 esw_scheduling[0x1]; 549 u8 esw_scheduling[0x1];
548 u8 reserved_at_2[0x1e]; 550 u8 esw_bw_share[0x1];
551 u8 esw_rate_limit[0x1];
552 u8 reserved_at_4[0x1c];
549 553
550 u8 reserved_at_20[0x20]; 554 u8 reserved_at_20[0x20];
551 555
@@ -573,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
573 u8 lro_cap[0x1]; 577 u8 lro_cap[0x1];
574 u8 lro_psh_flag[0x1]; 578 u8 lro_psh_flag[0x1];
575 u8 lro_time_stamp[0x1]; 579 u8 lro_time_stamp[0x1];
576 u8 reserved_at_5[0x3]; 580 u8 reserved_at_5[0x2];
581 u8 wqe_vlan_insert[0x1];
577 u8 self_lb_en_modifiable[0x1]; 582 u8 self_lb_en_modifiable[0x1];
578 u8 reserved_at_9[0x2]; 583 u8 reserved_at_9[0x2];
579 u8 max_lso_cap[0x5]; 584 u8 max_lso_cap[0x5];
@@ -782,11 +787,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
782 u8 log_max_eq[0x4]; 787 u8 log_max_eq[0x4];
783 788
784 u8 max_indirection[0x8]; 789 u8 max_indirection[0x8];
785 u8 reserved_at_108[0x1]; 790 u8 fixed_buffer_size[0x1];
786 u8 log_max_mrw_sz[0x7]; 791 u8 log_max_mrw_sz[0x7];
787 u8 reserved_at_110[0x2]; 792 u8 reserved_at_110[0x2];
788 u8 log_max_bsf_list_size[0x6]; 793 u8 log_max_bsf_list_size[0x6];
789 u8 reserved_at_118[0x2]; 794 u8 umr_extended_translation_offset[0x1];
795 u8 null_mkey[0x1];
790 u8 log_max_klm_list_size[0x6]; 796 u8 log_max_klm_list_size[0x6];
791 797
792 u8 reserved_at_120[0xa]; 798 u8 reserved_at_120[0xa];
@@ -799,10 +805,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
799 u8 reserved_at_150[0xa]; 805 u8 reserved_at_150[0xa];
800 u8 log_max_ra_res_qp[0x6]; 806 u8 log_max_ra_res_qp[0x6];
801 807
802 u8 pad_cap[0x1]; 808 u8 end_pad[0x1];
803 u8 cc_query_allowed[0x1]; 809 u8 cc_query_allowed[0x1];
804 u8 cc_modify_allowed[0x1]; 810 u8 cc_modify_allowed[0x1];
805 u8 reserved_at_163[0xd]; 811 u8 start_pad[0x1];
812 u8 cache_line_128byte[0x1];
813 u8 reserved_at_163[0xb];
806 u8 gid_table_size[0x10]; 814 u8 gid_table_size[0x10];
807 815
808 u8 out_of_seq_cnt[0x1]; 816 u8 out_of_seq_cnt[0x1];
@@ -823,18 +831,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
823 u8 nic_flow_table[0x1]; 831 u8 nic_flow_table[0x1];
824 u8 eswitch_flow_table[0x1]; 832 u8 eswitch_flow_table[0x1];
825 u8 early_vf_enable[0x1]; 833 u8 early_vf_enable[0x1];
826 u8 reserved_at_1a9[0x2]; 834 u8 mcam_reg[0x1];
835 u8 pcam_reg[0x1];
827 u8 local_ca_ack_delay[0x5]; 836 u8 local_ca_ack_delay[0x5];
828 u8 port_module_event[0x1]; 837 u8 port_module_event[0x1];
829 u8 reserved_at_1b0[0x1]; 838 u8 reserved_at_1b1[0x1];
830 u8 ports_check[0x1]; 839 u8 ports_check[0x1];
831 u8 reserved_at_1b2[0x1]; 840 u8 reserved_at_1b3[0x1];
832 u8 disable_link_up[0x1]; 841 u8 disable_link_up[0x1];
833 u8 beacon_led[0x1]; 842 u8 beacon_led[0x1];
834 u8 port_type[0x2]; 843 u8 port_type[0x2];
835 u8 num_ports[0x8]; 844 u8 num_ports[0x8];
836 845
837 u8 reserved_at_1c0[0x3]; 846 u8 reserved_at_1c0[0x1];
847 u8 pps[0x1];
848 u8 pps_modify[0x1];
838 u8 log_max_msg[0x5]; 849 u8 log_max_msg[0x5];
839 u8 reserved_at_1c8[0x4]; 850 u8 reserved_at_1c8[0x4];
840 u8 max_tc[0x4]; 851 u8 max_tc[0x4];
@@ -858,7 +869,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
858 869
859 u8 compact_address_vector[0x1]; 870 u8 compact_address_vector[0x1];
860 u8 striding_rq[0x1]; 871 u8 striding_rq[0x1];
861 u8 reserved_at_201[0x2]; 872 u8 reserved_at_202[0x2];
862 u8 ipoib_basic_offloads[0x1]; 873 u8 ipoib_basic_offloads[0x1];
863 u8 reserved_at_205[0xa]; 874 u8 reserved_at_205[0xa];
864 u8 drain_sigerr[0x1]; 875 u8 drain_sigerr[0x1];
@@ -904,7 +915,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
904 u8 uc[0x1]; 915 u8 uc[0x1];
905 u8 rc[0x1]; 916 u8 rc[0x1];
906 917
907 u8 reserved_at_240[0xa]; 918 u8 uar_4k[0x1];
919 u8 reserved_at_241[0x9];
908 u8 uar_sz[0x6]; 920 u8 uar_sz[0x6];
909 u8 reserved_at_250[0x8]; 921 u8 reserved_at_250[0x8];
910 u8 log_pg_sz[0x8]; 922 u8 log_pg_sz[0x8];
@@ -996,7 +1008,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
996 u8 device_frequency_mhz[0x20]; 1008 u8 device_frequency_mhz[0x20];
997 u8 device_frequency_khz[0x20]; 1009 u8 device_frequency_khz[0x20];
998 1010
999 u8 reserved_at_500[0x80]; 1011 u8 reserved_at_500[0x20];
1012 u8 num_of_uars_per_page[0x20];
1013 u8 reserved_at_540[0x40];
1000 1014
1001 u8 reserved_at_580[0x3f]; 1015 u8 reserved_at_580[0x3f];
1002 u8 cqe_compression[0x1]; 1016 u8 cqe_compression[0x1];
@@ -1009,10 +1023,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
1009 u8 rndv_offload_rc[0x1]; 1023 u8 rndv_offload_rc[0x1];
1010 u8 rndv_offload_dc[0x1]; 1024 u8 rndv_offload_dc[0x1];
1011 u8 log_tag_matching_list_sz[0x5]; 1025 u8 log_tag_matching_list_sz[0x5];
1012 u8 reserved_at_5e8[0x3]; 1026 u8 reserved_at_5f8[0x3];
1013 u8 log_max_xrq[0x5]; 1027 u8 log_max_xrq[0x5];
1014 1028
1015 u8 reserved_at_5f0[0x200]; 1029 u8 reserved_at_600[0x200];
1016}; 1030};
1017 1031
1018enum mlx5_flow_destination_type { 1032enum mlx5_flow_destination_type {
@@ -1375,6 +1389,42 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
1375 u8 reserved_at_640[0x180]; 1389 u8 reserved_at_640[0x180];
1376}; 1390};
1377 1391
1392struct mlx5_ifc_phys_layer_statistical_cntrs_bits {
1393 u8 time_since_last_clear_high[0x20];
1394
1395 u8 time_since_last_clear_low[0x20];
1396
1397 u8 phy_received_bits_high[0x20];
1398
1399 u8 phy_received_bits_low[0x20];
1400
1401 u8 phy_symbol_errors_high[0x20];
1402
1403 u8 phy_symbol_errors_low[0x20];
1404
1405 u8 phy_corrected_bits_high[0x20];
1406
1407 u8 phy_corrected_bits_low[0x20];
1408
1409 u8 phy_corrected_bits_lane0_high[0x20];
1410
1411 u8 phy_corrected_bits_lane0_low[0x20];
1412
1413 u8 phy_corrected_bits_lane1_high[0x20];
1414
1415 u8 phy_corrected_bits_lane1_low[0x20];
1416
1417 u8 phy_corrected_bits_lane2_high[0x20];
1418
1419 u8 phy_corrected_bits_lane2_low[0x20];
1420
1421 u8 phy_corrected_bits_lane3_high[0x20];
1422
1423 u8 phy_corrected_bits_lane3_low[0x20];
1424
1425 u8 reserved_at_200[0x5c0];
1426};
1427
1378struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { 1428struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
1379 u8 symbol_error_counter[0x10]; 1429 u8 symbol_error_counter[0x10];
1380 1430
@@ -1757,6 +1807,30 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
1757 u8 reserved_at_4c0[0x300]; 1807 u8 reserved_at_4c0[0x300];
1758}; 1808};
1759 1809
1810struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
1811 u8 life_time_counter_high[0x20];
1812
1813 u8 life_time_counter_low[0x20];
1814
1815 u8 rx_errors[0x20];
1816
1817 u8 tx_errors[0x20];
1818
1819 u8 l0_to_recovery_eieos[0x20];
1820
1821 u8 l0_to_recovery_ts[0x20];
1822
1823 u8 l0_to_recovery_framing[0x20];
1824
1825 u8 l0_to_recovery_retrain[0x20];
1826
1827 u8 crc_error_dllp[0x20];
1828
1829 u8 crc_error_tlp[0x20];
1830
1831 u8 reserved_at_140[0x680];
1832};
1833
1760struct mlx5_ifc_cmd_inter_comp_event_bits { 1834struct mlx5_ifc_cmd_inter_comp_event_bits {
1761 u8 command_completion_vector[0x20]; 1835 u8 command_completion_vector[0x20];
1762 1836
@@ -2495,6 +2569,7 @@ enum {
2495 MLX5_MKC_ACCESS_MODE_PA = 0x0, 2569 MLX5_MKC_ACCESS_MODE_PA = 0x0,
2496 MLX5_MKC_ACCESS_MODE_MTT = 0x1, 2570 MLX5_MKC_ACCESS_MODE_MTT = 0x1,
2497 MLX5_MKC_ACCESS_MODE_KLMS = 0x2, 2571 MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
2572 MLX5_MKC_ACCESS_MODE_KSM = 0x3,
2498}; 2573};
2499 2574
2500struct mlx5_ifc_mkc_bits { 2575struct mlx5_ifc_mkc_bits {
@@ -2918,6 +2993,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
2918 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; 2993 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
2919 struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; 2994 struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
2920 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; 2995 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
2996 struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
2997 u8 reserved_at_0[0x7c0];
2998};
2999
3000union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
3001 struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
2921 u8 reserved_at_0[0x7c0]; 3002 u8 reserved_at_0[0x7c0];
2922}; 3003};
2923 3004
@@ -3597,6 +3678,10 @@ struct mlx5_ifc_query_special_contexts_out_bits {
3597 u8 dump_fill_mkey[0x20]; 3678 u8 dump_fill_mkey[0x20];
3598 3679
3599 u8 resd_lkey[0x20]; 3680 u8 resd_lkey[0x20];
3681
3682 u8 null_mkey[0x20];
3683
3684 u8 reserved_at_a0[0x60];
3600}; 3685};
3601 3686
3602struct mlx5_ifc_query_special_contexts_in_bits { 3687struct mlx5_ifc_query_special_contexts_in_bits {
@@ -4689,12 +4774,11 @@ struct mlx5_ifc_page_fault_resume_in_bits {
4689 4774
4690 u8 error[0x1]; 4775 u8 error[0x1];
4691 u8 reserved_at_41[0x4]; 4776 u8 reserved_at_41[0x4];
4692 u8 rdma[0x1]; 4777 u8 page_fault_type[0x3];
4693 u8 read_write[0x1]; 4778 u8 wq_number[0x18];
4694 u8 req_res[0x1];
4695 u8 qpn[0x18];
4696 4779
4697 u8 reserved_at_60[0x20]; 4780 u8 reserved_at_60[0x8];
4781 u8 token[0x18];
4698}; 4782};
4699 4783
4700struct mlx5_ifc_nop_out_bits { 4784struct mlx5_ifc_nop_out_bits {
@@ -7240,6 +7324,18 @@ struct mlx5_ifc_ppcnt_reg_bits {
7240 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; 7324 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
7241}; 7325};
7242 7326
7327struct mlx5_ifc_mpcnt_reg_bits {
7328 u8 reserved_at_0[0x8];
7329 u8 pcie_index[0x8];
7330 u8 reserved_at_10[0xa];
7331 u8 grp[0x6];
7332
7333 u8 clr[0x1];
7334 u8 reserved_at_21[0x1f];
7335
7336 union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
7337};
7338
7243struct mlx5_ifc_ppad_reg_bits { 7339struct mlx5_ifc_ppad_reg_bits {
7244 u8 reserved_at_0[0x3]; 7340 u8 reserved_at_0[0x3];
7245 u8 single_mac[0x1]; 7341 u8 single_mac[0x1];
@@ -7469,6 +7565,63 @@ struct mlx5_ifc_peir_reg_bits {
7469 u8 error_type[0x8]; 7565 u8 error_type[0x8];
7470}; 7566};
7471 7567
7568struct mlx5_ifc_pcam_enhanced_features_bits {
7569 u8 reserved_at_0[0x7e];
7570
7571 u8 ppcnt_discard_group[0x1];
7572 u8 ppcnt_statistical_group[0x1];
7573};
7574
7575struct mlx5_ifc_pcam_reg_bits {
7576 u8 reserved_at_0[0x8];
7577 u8 feature_group[0x8];
7578 u8 reserved_at_10[0x8];
7579 u8 access_reg_group[0x8];
7580
7581 u8 reserved_at_20[0x20];
7582
7583 union {
7584 u8 reserved_at_0[0x80];
7585 } port_access_reg_cap_mask;
7586
7587 u8 reserved_at_c0[0x80];
7588
7589 union {
7590 struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features;
7591 u8 reserved_at_0[0x80];
7592 } feature_cap_mask;
7593
7594 u8 reserved_at_1c0[0xc0];
7595};
7596
7597struct mlx5_ifc_mcam_enhanced_features_bits {
7598 u8 reserved_at_0[0x7f];
7599
7600 u8 pcie_performance_group[0x1];
7601};
7602
7603struct mlx5_ifc_mcam_reg_bits {
7604 u8 reserved_at_0[0x8];
7605 u8 feature_group[0x8];
7606 u8 reserved_at_10[0x8];
7607 u8 access_reg_group[0x8];
7608
7609 u8 reserved_at_20[0x20];
7610
7611 union {
7612 u8 reserved_at_0[0x80];
7613 } mng_access_reg_cap_mask;
7614
7615 u8 reserved_at_c0[0x80];
7616
7617 union {
7618 struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features;
7619 u8 reserved_at_0[0x80];
7620 } mng_feature_cap_mask;
7621
7622 u8 reserved_at_1c0[0x80];
7623};
7624
7472struct mlx5_ifc_pcap_reg_bits { 7625struct mlx5_ifc_pcap_reg_bits {
7473 u8 reserved_at_0[0x8]; 7626 u8 reserved_at_0[0x8];
7474 u8 local_port[0x8]; 7627 u8 local_port[0x8];
@@ -7813,6 +7966,60 @@ struct mlx5_ifc_initial_seg_bits {
7813 u8 reserved_at_80a0[0x17fc0]; 7966 u8 reserved_at_80a0[0x17fc0];
7814}; 7967};
7815 7968
7969struct mlx5_ifc_mtpps_reg_bits {
7970 u8 reserved_at_0[0xc];
7971 u8 cap_number_of_pps_pins[0x4];
7972 u8 reserved_at_10[0x4];
7973 u8 cap_max_num_of_pps_in_pins[0x4];
7974 u8 reserved_at_18[0x4];
7975 u8 cap_max_num_of_pps_out_pins[0x4];
7976
7977 u8 reserved_at_20[0x24];
7978 u8 cap_pin_3_mode[0x4];
7979 u8 reserved_at_48[0x4];
7980 u8 cap_pin_2_mode[0x4];
7981 u8 reserved_at_50[0x4];
7982 u8 cap_pin_1_mode[0x4];
7983 u8 reserved_at_58[0x4];
7984 u8 cap_pin_0_mode[0x4];
7985
7986 u8 reserved_at_60[0x4];
7987 u8 cap_pin_7_mode[0x4];
7988 u8 reserved_at_68[0x4];
7989 u8 cap_pin_6_mode[0x4];
7990 u8 reserved_at_70[0x4];
7991 u8 cap_pin_5_mode[0x4];
7992 u8 reserved_at_78[0x4];
7993 u8 cap_pin_4_mode[0x4];
7994
7995 u8 reserved_at_80[0x80];
7996
7997 u8 enable[0x1];
7998 u8 reserved_at_101[0xb];
7999 u8 pattern[0x4];
8000 u8 reserved_at_110[0x4];
8001 u8 pin_mode[0x4];
8002 u8 pin[0x8];
8003
8004 u8 reserved_at_120[0x20];
8005
8006 u8 time_stamp[0x40];
8007
8008 u8 out_pulse_duration[0x10];
8009 u8 out_periodic_adjustment[0x10];
8010
8011 u8 reserved_at_1a0[0x60];
8012};
8013
8014struct mlx5_ifc_mtppse_reg_bits {
8015 u8 reserved_at_0[0x18];
8016 u8 pin[0x8];
8017 u8 event_arm[0x1];
8018 u8 reserved_at_21[0x1b];
8019 u8 event_generation_mode[0x4];
8020 u8 reserved_at_40[0x40];
8021};
8022
7816union mlx5_ifc_ports_control_registers_document_bits { 8023union mlx5_ifc_ports_control_registers_document_bits {
7817 struct mlx5_ifc_bufferx_reg_bits bufferx_reg; 8024 struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
7818 struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; 8025 struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -7845,6 +8052,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
7845 struct mlx5_ifc_pmtu_reg_bits pmtu_reg; 8052 struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
7846 struct mlx5_ifc_ppad_reg_bits ppad_reg; 8053 struct mlx5_ifc_ppad_reg_bits ppad_reg;
7847 struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; 8054 struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
8055 struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
7848 struct mlx5_ifc_pplm_reg_bits pplm_reg; 8056 struct mlx5_ifc_pplm_reg_bits pplm_reg;
7849 struct mlx5_ifc_pplr_reg_bits pplr_reg; 8057 struct mlx5_ifc_pplr_reg_bits pplr_reg;
7850 struct mlx5_ifc_ppsc_reg_bits ppsc_reg; 8058 struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
@@ -7857,6 +8065,8 @@ union mlx5_ifc_ports_control_registers_document_bits {
7857 struct mlx5_ifc_pvlc_reg_bits pvlc_reg; 8065 struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
7858 struct mlx5_ifc_slrg_reg_bits slrg_reg; 8066 struct mlx5_ifc_slrg_reg_bits slrg_reg;
7859 struct mlx5_ifc_sltp_reg_bits sltp_reg; 8067 struct mlx5_ifc_sltp_reg_bits sltp_reg;
8068 struct mlx5_ifc_mtpps_reg_bits mtpps_reg;
8069 struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
7860 u8 reserved_at_0[0x60e0]; 8070 u8 reserved_at_0[0x60e0];
7861}; 8071};
7862 8072
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 0aacb2a7480d..3096370fe831 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -50,9 +50,6 @@
50#define MLX5_BSF_APPTAG_ESCAPE 0x1 50#define MLX5_BSF_APPTAG_ESCAPE 0x1
51#define MLX5_BSF_APPREF_ESCAPE 0x2 51#define MLX5_BSF_APPREF_ESCAPE 0x2
52 52
53#define MLX5_QPN_BITS 24
54#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
55
56enum mlx5_qp_optpar { 53enum mlx5_qp_optpar {
57 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 54 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
58 MLX5_QP_OPTPAR_RRE = 1 << 1, 55 MLX5_QP_OPTPAR_RRE = 1 << 1,
@@ -215,6 +212,7 @@ struct mlx5_wqe_ctrl_seg {
215#define MLX5_WQE_CTRL_OPCODE_MASK 0xff 212#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
216#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 213#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
217#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 214#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
215#define MLX5_WQE_AV_EXT 0x80000000
218 216
219enum { 217enum {
220 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, 218 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
@@ -223,14 +221,26 @@ enum {
223 MLX5_ETH_WQE_L4_CSUM = 1 << 7, 221 MLX5_ETH_WQE_L4_CSUM = 1 << 7,
224}; 222};
225 223
224enum {
225 MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
226};
227
226struct mlx5_wqe_eth_seg { 228struct mlx5_wqe_eth_seg {
227 u8 rsvd0[4]; 229 u8 rsvd0[4];
228 u8 cs_flags; 230 u8 cs_flags;
229 u8 rsvd1; 231 u8 rsvd1;
230 __be16 mss; 232 __be16 mss;
231 __be32 rsvd2; 233 __be32 rsvd2;
232 __be16 inline_hdr_sz; 234 union {
233 u8 inline_hdr_start[2]; 235 struct {
236 __be16 sz;
237 u8 start[2];
238 } inline_hdr;
239 struct {
240 __be16 type;
241 __be16 vlan_tci;
242 } insert;
243 };
234}; 244};
235 245
236struct mlx5_wqe_xrc_seg { 246struct mlx5_wqe_xrc_seg {
@@ -245,6 +255,23 @@ struct mlx5_wqe_masked_atomic_seg {
245 __be64 compare_mask; 255 __be64 compare_mask;
246}; 256};
247 257
258struct mlx5_base_av {
259 union {
260 struct {
261 __be32 qkey;
262 __be32 reserved;
263 } qkey;
264 __be64 dc_key;
265 } key;
266 __be32 dqp_dct;
267 u8 stat_rate_sl;
268 u8 fl_mlid;
269 union {
270 __be16 rlid;
271 __be16 udp_sport;
272 };
273};
274
248struct mlx5_av { 275struct mlx5_av {
249 union { 276 union {
250 struct { 277 struct {
@@ -292,10 +319,14 @@ struct mlx5_wqe_data_seg {
292struct mlx5_wqe_umr_ctrl_seg { 319struct mlx5_wqe_umr_ctrl_seg {
293 u8 flags; 320 u8 flags;
294 u8 rsvd0[3]; 321 u8 rsvd0[3];
295 __be16 klm_octowords; 322 __be16 xlt_octowords;
296 __be16 bsf_octowords; 323 union {
324 __be16 xlt_offset;
325 __be16 bsf_octowords;
326 };
297 __be64 mkey_mask; 327 __be64 mkey_mask;
298 u8 rsvd1[32]; 328 __be32 xlt_offset_47_16;
329 u8 rsvd1[28];
299}; 330};
300 331
301struct mlx5_seg_set_psv { 332struct mlx5_seg_set_psv {
@@ -389,6 +420,10 @@ struct mlx5_bsf {
389 struct mlx5_bsf_inl m_inl; 420 struct mlx5_bsf_inl m_inl;
390}; 421};
391 422
423struct mlx5_mtt {
424 __be64 ptag;
425};
426
392struct mlx5_klm { 427struct mlx5_klm {
393 __be32 bcount; 428 __be32 bcount;
394 __be32 key; 429 __be32 key;
@@ -410,46 +445,9 @@ struct mlx5_stride_block_ctrl_seg {
410 __be16 num_entries; 445 __be16 num_entries;
411}; 446};
412 447
413enum mlx5_pagefault_flags {
414 MLX5_PFAULT_REQUESTOR = 1 << 0,
415 MLX5_PFAULT_WRITE = 1 << 1,
416 MLX5_PFAULT_RDMA = 1 << 2,
417};
418
419/* Contains the details of a pagefault. */
420struct mlx5_pagefault {
421 u32 bytes_committed;
422 u8 event_subtype;
423 enum mlx5_pagefault_flags flags;
424 union {
425 /* Initiator or send message responder pagefault details. */
426 struct {
427 /* Received packet size, only valid for responders. */
428 u32 packet_size;
429 /*
430 * WQE index. Refers to either the send queue or
431 * receive queue, according to event_subtype.
432 */
433 u16 wqe_index;
434 } wqe;
435 /* RDMA responder pagefault details */
436 struct {
437 u32 r_key;
438 /*
439 * Received packet size, minimal size page fault
440 * resolution required for forward progress.
441 */
442 u32 packet_size;
443 u32 rdma_op_len;
444 u64 rdma_va;
445 } rdma;
446 };
447};
448
449struct mlx5_core_qp { 448struct mlx5_core_qp {
450 struct mlx5_core_rsc_common common; /* must be first */ 449 struct mlx5_core_rsc_common common; /* must be first */
451 void (*event) (struct mlx5_core_qp *, int); 450 void (*event) (struct mlx5_core_qp *, int);
452 void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
453 int qpn; 451 int qpn;
454 struct mlx5_rsc_debug *dbg; 452 struct mlx5_rsc_debug *dbg;
455 int pid; 453 int pid;
@@ -549,10 +547,6 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
549void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); 547void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
550int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 548int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
551void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 549void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
552#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
553int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
554 u8 context, int error);
555#endif
556int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, 550int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
557 struct mlx5_core_qp *rq); 551 struct mlx5_core_qp *rq);
558void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, 552void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index ec35157ea725..656c70b65dd2 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -51,6 +51,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
51 u16 vport, u8 *addr); 51 u16 vport, u8 *addr);
52int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, 52int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
53 u16 vport, u8 *min_inline); 53 u16 vport, u8 *min_inline);
54void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
54int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, 55int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
55 u16 vport, u8 min_inline); 56 u16 vport, u8 min_inline);
56int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 57int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b84615b0f64c..6ff66d6fe8e2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
76#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) 76#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
77#endif 77#endif
78 78
79#ifndef lm_alias
80#define lm_alias(x) __va(__pa_symbol(x))
81#endif
82
79/* 83/*
80 * To prevent common memory management code establishing 84 * To prevent common memory management code establishing
81 * a zero page mapping on a read fault. 85 * a zero page mapping on a read fault.
diff --git a/include/linux/mmc/boot.h b/include/linux/mmc/boot.h
deleted file mode 100644
index 23acc3baa07d..000000000000
--- a/include/linux/mmc/boot.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef LINUX_MMC_BOOT_H
2#define LINUX_MMC_BOOT_H
3
4enum { MMC_PROGRESS_ENTER, MMC_PROGRESS_INIT,
5 MMC_PROGRESS_LOAD, MMC_PROGRESS_DONE };
6
7#endif /* LINUX_MMC_BOOT_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 95d69d498296..77e61e0a216a 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -11,7 +11,6 @@
11#define LINUX_MMC_CARD_H 11#define LINUX_MMC_CARD_H
12 12
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/mmc/core.h>
15#include <linux/mod_devicetable.h> 14#include <linux/mod_devicetable.h>
16 15
17struct mmc_cid { 16struct mmc_cid {
@@ -84,6 +83,7 @@ struct mmc_ext_csd {
84 unsigned int hpi_cmd; /* cmd used as HPI */ 83 unsigned int hpi_cmd; /* cmd used as HPI */
85 bool bkops; /* background support bit */ 84 bool bkops; /* background support bit */
86 bool man_bkops_en; /* manual bkops enable bit */ 85 bool man_bkops_en; /* manual bkops enable bit */
86 bool auto_bkops_en; /* auto bkops enable bit */
87 unsigned int data_sector_size; /* 512 bytes or 4KB */ 87 unsigned int data_sector_size; /* 512 bytes or 4KB */
88 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ 88 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
89 unsigned int boot_ro_lock; /* ro lock support */ 89 unsigned int boot_ro_lock; /* ro lock support */
@@ -121,6 +121,9 @@ struct mmc_ext_csd {
121 u8 raw_pwr_cl_ddr_200_360; /* 253 */ 121 u8 raw_pwr_cl_ddr_200_360; /* 253 */
122 u8 raw_bkops_status; /* 246 */ 122 u8 raw_bkops_status; /* 246 */
123 u8 raw_sectors[4]; /* 212 - 4 bytes */ 123 u8 raw_sectors[4]; /* 212 - 4 bytes */
124 u8 pre_eol_info; /* 267 */
125 u8 device_life_time_est_typ_a; /* 268 */
126 u8 device_life_time_est_typ_b; /* 269 */
124 127
125 unsigned int feature_support; 128 unsigned int feature_support;
126#define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */ 129#define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */
@@ -203,7 +206,6 @@ struct sdio_cis {
203}; 206};
204 207
205struct mmc_host; 208struct mmc_host;
206struct mmc_ios;
207struct sdio_func; 209struct sdio_func;
208struct sdio_func_tuple; 210struct sdio_func_tuple;
209 211
@@ -247,13 +249,6 @@ struct mmc_card {
247#define MMC_TYPE_SDIO 2 /* SDIO card */ 249#define MMC_TYPE_SDIO 2 /* SDIO card */
248#define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */ 250#define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */
249 unsigned int state; /* (our) card state */ 251 unsigned int state; /* (our) card state */
250#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */
251#define MMC_STATE_READONLY (1<<1) /* card is read-only */
252#define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */
253#define MMC_CARD_SDXC (1<<3) /* card is SDXC */
254#define MMC_CARD_REMOVED (1<<4) /* card has been removed */
255#define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing BKOPS */
256#define MMC_STATE_SUSPENDED (1<<6) /* card is suspended */
257 unsigned int quirks; /* card quirks */ 252 unsigned int quirks; /* card quirks */
258#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ 253#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
259#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ 254#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -272,7 +267,6 @@ struct mmc_card {
272#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ 267#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
273#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ 268#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
274 269
275
276 unsigned int erase_size; /* erase size in sectors */ 270 unsigned int erase_size; /* erase size in sectors */
277 unsigned int erase_shift; /* if erase unit is power 2 */ 271 unsigned int erase_shift; /* if erase unit is power 2 */
278 unsigned int pref_erase; /* in sectors */ 272 unsigned int pref_erase; /* in sectors */
@@ -308,245 +302,13 @@ struct mmc_card {
308 unsigned int nr_parts; 302 unsigned int nr_parts;
309}; 303};
310 304
311/*
312 * This function fill contents in mmc_part.
313 */
314static inline void mmc_part_add(struct mmc_card *card, unsigned int size,
315 unsigned int part_cfg, char *name, int idx, bool ro,
316 int area_type)
317{
318 card->part[card->nr_parts].size = size;
319 card->part[card->nr_parts].part_cfg = part_cfg;
320 sprintf(card->part[card->nr_parts].name, name, idx);
321 card->part[card->nr_parts].force_ro = ro;
322 card->part[card->nr_parts].area_type = area_type;
323 card->nr_parts++;
324}
325
326static inline bool mmc_large_sector(struct mmc_card *card) 305static inline bool mmc_large_sector(struct mmc_card *card)
327{ 306{
328 return card->ext_csd.data_sector_size == 4096; 307 return card->ext_csd.data_sector_size == 4096;
329} 308}
330 309
331/*
332 * The world is not perfect and supplies us with broken mmc/sdio devices.
333 * For at least some of these bugs we need a work-around.
334 */
335
336struct mmc_fixup {
337 /* CID-specific fields. */
338 const char *name;
339
340 /* Valid revision range */
341 u64 rev_start, rev_end;
342
343 unsigned int manfid;
344 unsigned short oemid;
345
346 /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
347 u16 cis_vendor, cis_device;
348
349 /* for MMC cards */
350 unsigned int ext_csd_rev;
351
352 void (*vendor_fixup)(struct mmc_card *card, int data);
353 int data;
354};
355
356#define CID_MANFID_ANY (-1u)
357#define CID_OEMID_ANY ((unsigned short) -1)
358#define CID_NAME_ANY (NULL)
359
360#define EXT_CSD_REV_ANY (-1u)
361
362#define CID_MANFID_SANDISK 0x2
363#define CID_MANFID_TOSHIBA 0x11
364#define CID_MANFID_MICRON 0x13
365#define CID_MANFID_SAMSUNG 0x15
366#define CID_MANFID_KINGSTON 0x70
367#define CID_MANFID_HYNIX 0x90
368
369#define END_FIXUP { NULL }
370
371#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
372 _cis_vendor, _cis_device, \
373 _fixup, _data, _ext_csd_rev) \
374 { \
375 .name = (_name), \
376 .manfid = (_manfid), \
377 .oemid = (_oemid), \
378 .rev_start = (_rev_start), \
379 .rev_end = (_rev_end), \
380 .cis_vendor = (_cis_vendor), \
381 .cis_device = (_cis_device), \
382 .vendor_fixup = (_fixup), \
383 .data = (_data), \
384 .ext_csd_rev = (_ext_csd_rev), \
385 }
386
387#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
388 _fixup, _data, _ext_csd_rev) \
389 _FIXUP_EXT(_name, _manfid, \
390 _oemid, _rev_start, _rev_end, \
391 SDIO_ANY_ID, SDIO_ANY_ID, \
392 _fixup, _data, _ext_csd_rev) \
393
394#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
395 MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
396 EXT_CSD_REV_ANY)
397
398#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data, \
399 _ext_csd_rev) \
400 MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
401 _ext_csd_rev)
402
403#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
404 _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
405 CID_OEMID_ANY, 0, -1ull, \
406 _vendor, _device, \
407 _fixup, _data, EXT_CSD_REV_ANY) \
408
409#define cid_rev(hwrev, fwrev, year, month) \
410 (((u64) hwrev) << 40 | \
411 ((u64) fwrev) << 32 | \
412 ((u64) year) << 16 | \
413 ((u64) month))
414
415#define cid_rev_card(card) \
416 cid_rev(card->cid.hwrev, \
417 card->cid.fwrev, \
418 card->cid.year, \
419 card->cid.month)
420
421/*
422 * Unconditionally quirk add/remove.
423 */
424
425static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
426{
427 card->quirks |= data;
428}
429
430static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
431{
432 card->quirks &= ~data;
433}
434
435#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) 310#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
436#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) 311#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
437#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) 312#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
438 313
439#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
440#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
441#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
442#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
443#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
444#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
445#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
446
447#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
448#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
449#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
450#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
451#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
452#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
453#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
454#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
455#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
456
457/*
458 * Quirk add/remove for MMC products.
459 */
460
461static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
462{
463 if (mmc_card_mmc(card))
464 card->quirks |= data;
465}
466
467static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
468 int data)
469{
470 if (mmc_card_mmc(card))
471 card->quirks &= ~data;
472}
473
474/*
475 * Quirk add/remove for SD products.
476 */
477
478static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
479{
480 if (mmc_card_sd(card))
481 card->quirks |= data;
482}
483
484static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
485 int data)
486{
487 if (mmc_card_sd(card))
488 card->quirks &= ~data;
489}
490
491static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
492{
493 return c->quirks & MMC_QUIRK_LENIENT_FN0;
494}
495
496static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
497{
498 return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
499}
500
501static inline int mmc_card_disable_cd(const struct mmc_card *c)
502{
503 return c->quirks & MMC_QUIRK_DISABLE_CD;
504}
505
506static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
507{
508 return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
509}
510
511static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
512{
513 return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
514}
515
516static inline int mmc_card_long_read_time(const struct mmc_card *c)
517{
518 return c->quirks & MMC_QUIRK_LONG_READ_TIME;
519}
520
521static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
522{
523 return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING;
524}
525
526static inline int mmc_card_broken_hpi(const struct mmc_card *c)
527{
528 return c->quirks & MMC_QUIRK_BROKEN_HPI;
529}
530
531#define mmc_card_name(c) ((c)->cid.prod_name)
532#define mmc_card_id(c) (dev_name(&(c)->dev))
533
534#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
535
536/*
537 * MMC device driver (e.g., Flash card, I/O card...)
538 */
539struct mmc_driver {
540 struct device_driver drv;
541 int (*probe)(struct mmc_card *);
542 void (*remove)(struct mmc_card *);
543 void (*shutdown)(struct mmc_card *);
544};
545
546extern int mmc_register_driver(struct mmc_driver *);
547extern void mmc_unregister_driver(struct mmc_driver *);
548
549extern void mmc_fixup_device(struct mmc_card *card,
550 const struct mmc_fixup *table);
551
552#endif /* LINUX_MMC_CARD_H */ 314#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index e33cc748dcfe..a0c63ea28796 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -8,10 +8,9 @@
8#ifndef LINUX_MMC_CORE_H 8#ifndef LINUX_MMC_CORE_H
9#define LINUX_MMC_CORE_H 9#define LINUX_MMC_CORE_H
10 10
11#include <linux/interrupt.h>
12#include <linux/completion.h> 11#include <linux/completion.h>
12#include <linux/types.h>
13 13
14struct request;
15struct mmc_data; 14struct mmc_data;
16struct mmc_request; 15struct mmc_request;
17 16
@@ -159,79 +158,14 @@ struct mmc_request {
159struct mmc_card; 158struct mmc_card;
160struct mmc_async_req; 159struct mmc_async_req;
161 160
162extern int mmc_stop_bkops(struct mmc_card *); 161struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
163extern int mmc_read_bkops_status(struct mmc_card *); 162 struct mmc_async_req *areq,
164extern struct mmc_async_req *mmc_start_req(struct mmc_host *, 163 enum mmc_blk_status *ret_stat);
165 struct mmc_async_req *, 164void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
166 enum mmc_blk_status *); 165int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
167extern int mmc_interrupt_hpi(struct mmc_card *); 166 int retries);
168extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); 167
169extern void mmc_wait_for_req_done(struct mmc_host *host, 168int mmc_hw_reset(struct mmc_host *host);
170 struct mmc_request *mrq); 169void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
171extern bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
172extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
173extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
174extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
175 struct mmc_command *, int);
176extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
177extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
178extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
179extern int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
180extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
181
182#define MMC_ERASE_ARG 0x00000000
183#define MMC_SECURE_ERASE_ARG 0x80000000
184#define MMC_TRIM_ARG 0x00000001
185#define MMC_DISCARD_ARG 0x00000003
186#define MMC_SECURE_TRIM1_ARG 0x80000001
187#define MMC_SECURE_TRIM2_ARG 0x80008000
188
189#define MMC_SECURE_ARGS 0x80000000
190#define MMC_TRIM_ARGS 0x00008001
191
192extern int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
193 unsigned int arg);
194extern int mmc_can_erase(struct mmc_card *card);
195extern int mmc_can_trim(struct mmc_card *card);
196extern int mmc_can_discard(struct mmc_card *card);
197extern int mmc_can_sanitize(struct mmc_card *card);
198extern int mmc_can_secure_erase_trim(struct mmc_card *card);
199extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
200 unsigned int nr);
201extern unsigned int mmc_calc_max_discard(struct mmc_card *card);
202
203extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
204extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
205 bool is_rel_write);
206extern int mmc_hw_reset(struct mmc_host *host);
207extern int mmc_can_reset(struct mmc_card *card);
208
209extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
210extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
211
212extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
213extern void mmc_release_host(struct mmc_host *host);
214
215extern void mmc_get_card(struct mmc_card *card);
216extern void mmc_put_card(struct mmc_card *card);
217
218extern int mmc_flush_cache(struct mmc_card *);
219
220extern int mmc_detect_card_removed(struct mmc_host *host);
221
222/**
223 * mmc_claim_host - exclusively claim a host
224 * @host: mmc host to claim
225 *
226 * Claim a host for a set of operations.
227 */
228static inline void mmc_claim_host(struct mmc_host *host)
229{
230 __mmc_claim_host(host, NULL);
231}
232
233struct device_node;
234extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
235extern int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
236 170
237#endif /* LINUX_MMC_CORE_H */ 171#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
deleted file mode 100644
index 15db6f83f53f..000000000000
--- a/include/linux/mmc/dw_mmc.h
+++ /dev/null
@@ -1,274 +0,0 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef LINUX_MMC_DW_MMC_H
15#define LINUX_MMC_DW_MMC_H
16
17#include <linux/scatterlist.h>
18#include <linux/mmc/core.h>
19#include <linux/dmaengine.h>
20#include <linux/reset.h>
21
22#define MAX_MCI_SLOTS 2
23
24enum dw_mci_state {
25 STATE_IDLE = 0,
26 STATE_SENDING_CMD,
27 STATE_SENDING_DATA,
28 STATE_DATA_BUSY,
29 STATE_SENDING_STOP,
30 STATE_DATA_ERROR,
31 STATE_SENDING_CMD11,
32 STATE_WAITING_CMD11_DONE,
33};
34
35enum {
36 EVENT_CMD_COMPLETE = 0,
37 EVENT_XFER_COMPLETE,
38 EVENT_DATA_COMPLETE,
39 EVENT_DATA_ERROR,
40};
41
42enum dw_mci_cookie {
43 COOKIE_UNMAPPED,
44 COOKIE_PRE_MAPPED, /* mapped by pre_req() of dwmmc */
45 COOKIE_MAPPED, /* mapped by prepare_data() of dwmmc */
46};
47
48struct mmc_data;
49
50enum {
51 TRANS_MODE_PIO = 0,
52 TRANS_MODE_IDMAC,
53 TRANS_MODE_EDMAC
54};
55
56struct dw_mci_dma_slave {
57 struct dma_chan *ch;
58 enum dma_transfer_direction direction;
59};
60
61/**
62 * struct dw_mci - MMC controller state shared between all slots
63 * @lock: Spinlock protecting the queue and associated data.
64 * @irq_lock: Spinlock protecting the INTMASK setting.
65 * @regs: Pointer to MMIO registers.
66 * @fifo_reg: Pointer to MMIO registers for data FIFO
67 * @sg: Scatterlist entry currently being processed by PIO code, if any.
68 * @sg_miter: PIO mapping scatterlist iterator.
69 * @cur_slot: The slot which is currently using the controller.
70 * @mrq: The request currently being processed on @cur_slot,
71 * or NULL if the controller is idle.
72 * @cmd: The command currently being sent to the card, or NULL.
73 * @data: The data currently being transferred, or NULL if no data
74 * transfer is in progress.
75 * @stop_abort: The command currently prepared for stoping transfer.
76 * @prev_blksz: The former transfer blksz record.
77 * @timing: Record of current ios timing.
78 * @use_dma: Whether DMA channel is initialized or not.
79 * @using_dma: Whether DMA is in use for the current transfer.
80 * @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
81 * @sg_dma: Bus address of DMA buffer.
82 * @sg_cpu: Virtual address of DMA buffer.
83 * @dma_ops: Pointer to platform-specific DMA callbacks.
84 * @cmd_status: Snapshot of SR taken upon completion of the current
85 * @ring_size: Buffer size for idma descriptors.
86 * command. Only valid when EVENT_CMD_COMPLETE is pending.
87 * @dms: structure of slave-dma private data.
88 * @phy_regs: physical address of controller's register map
89 * @data_status: Snapshot of SR taken upon completion of the current
90 * data transfer. Only valid when EVENT_DATA_COMPLETE or
91 * EVENT_DATA_ERROR is pending.
92 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
93 * to be sent.
94 * @dir_status: Direction of current transfer.
95 * @tasklet: Tasklet running the request state machine.
96 * @pending_events: Bitmask of events flagged by the interrupt handler
97 * to be processed by the tasklet.
98 * @completed_events: Bitmask of events which the state machine has
99 * processed.
100 * @state: Tasklet state.
101 * @queue: List of slots waiting for access to the controller.
102 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
103 * rate and timeout calculations.
104 * @current_speed: Configured rate of the controller.
105 * @num_slots: Number of slots available.
106 * @fifoth_val: The value of FIFOTH register.
107 * @verid: Denote Version ID.
108 * @dev: Device associated with the MMC controller.
109 * @pdata: Platform data associated with the MMC controller.
110 * @drv_data: Driver specific data for identified variant of the controller
111 * @priv: Implementation defined private data.
112 * @biu_clk: Pointer to bus interface unit clock instance.
113 * @ciu_clk: Pointer to card interface unit clock instance.
114 * @slot: Slots sharing this MMC controller.
115 * @fifo_depth: depth of FIFO.
116 * @data_shift: log2 of FIFO item size.
117 * @part_buf_start: Start index in part_buf.
118 * @part_buf_count: Bytes of partial data in part_buf.
119 * @part_buf: Simple buffer for partial fifo reads/writes.
120 * @push_data: Pointer to FIFO push function.
121 * @pull_data: Pointer to FIFO pull function.
122 * @vqmmc_enabled: Status of vqmmc, should be true or false.
123 * @irq_flags: The flags to be passed to request_irq.
124 * @irq: The irq value to be passed to request_irq.
125 * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
126 * @cmd11_timer: Timer for SD3.0 voltage switch over scheme.
127 * @dto_timer: Timer for broken data transfer over scheme.
128 *
129 * Locking
130 * =======
131 *
132 * @lock is a softirq-safe spinlock protecting @queue as well as
133 * @cur_slot, @mrq and @state. These must always be updated
134 * at the same time while holding @lock.
135 *
136 * @irq_lock is an irq-safe spinlock protecting the INTMASK register
137 * to allow the interrupt handler to modify it directly. Held for only long
138 * enough to read-modify-write INTMASK and no other locks are grabbed when
139 * holding this one.
140 *
141 * The @mrq field of struct dw_mci_slot is also protected by @lock,
142 * and must always be written at the same time as the slot is added to
143 * @queue.
144 *
145 * @pending_events and @completed_events are accessed using atomic bit
146 * operations, so they don't need any locking.
147 *
148 * None of the fields touched by the interrupt handler need any
149 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
150 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
151 * interrupts must be disabled and @data_status updated with a
152 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
153 * CMDRDY interrupt must be disabled and @cmd_status updated with a
154 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
155 * bytes_xfered field of @data must be written. This is ensured by
156 * using barriers.
157 */
158struct dw_mci {
159 spinlock_t lock;
160 spinlock_t irq_lock;
161 void __iomem *regs;
162 void __iomem *fifo_reg;
163
164 struct scatterlist *sg;
165 struct sg_mapping_iter sg_miter;
166
167 struct dw_mci_slot *cur_slot;
168 struct mmc_request *mrq;
169 struct mmc_command *cmd;
170 struct mmc_data *data;
171 struct mmc_command stop_abort;
172 unsigned int prev_blksz;
173 unsigned char timing;
174
175 /* DMA interface members*/
176 int use_dma;
177 int using_dma;
178 int dma_64bit_address;
179
180 dma_addr_t sg_dma;
181 void *sg_cpu;
182 const struct dw_mci_dma_ops *dma_ops;
183 /* For idmac */
184 unsigned int ring_size;
185
186 /* For edmac */
187 struct dw_mci_dma_slave *dms;
188 /* Registers's physical base address */
189 resource_size_t phy_regs;
190
191 u32 cmd_status;
192 u32 data_status;
193 u32 stop_cmdr;
194 u32 dir_status;
195 struct tasklet_struct tasklet;
196 unsigned long pending_events;
197 unsigned long completed_events;
198 enum dw_mci_state state;
199 struct list_head queue;
200
201 u32 bus_hz;
202 u32 current_speed;
203 u32 num_slots;
204 u32 fifoth_val;
205 u16 verid;
206 struct device *dev;
207 struct dw_mci_board *pdata;
208 const struct dw_mci_drv_data *drv_data;
209 void *priv;
210 struct clk *biu_clk;
211 struct clk *ciu_clk;
212 struct dw_mci_slot *slot[MAX_MCI_SLOTS];
213
214 /* FIFO push and pull */
215 int fifo_depth;
216 int data_shift;
217 u8 part_buf_start;
218 u8 part_buf_count;
219 union {
220 u16 part_buf16;
221 u32 part_buf32;
222 u64 part_buf;
223 };
224 void (*push_data)(struct dw_mci *host, void *buf, int cnt);
225 void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
226
227 bool vqmmc_enabled;
228 unsigned long irq_flags; /* IRQ flags */
229 int irq;
230
231 int sdio_id0;
232
233 struct timer_list cmd11_timer;
234 struct timer_list dto_timer;
235};
236
237/* DMA ops for Internal/External DMAC interface */
238struct dw_mci_dma_ops {
239 /* DMA Ops */
240 int (*init)(struct dw_mci *host);
241 int (*start)(struct dw_mci *host, unsigned int sg_len);
242 void (*complete)(void *host);
243 void (*stop)(struct dw_mci *host);
244 void (*cleanup)(struct dw_mci *host);
245 void (*exit)(struct dw_mci *host);
246};
247
248struct dma_pdata;
249
250/* Board platform data */
251struct dw_mci_board {
252 u32 num_slots;
253
254 unsigned int bus_hz; /* Clock speed at the cclk_in pad */
255
256 u32 caps; /* Capabilities */
257 u32 caps2; /* More capabilities */
258 u32 pm_caps; /* PM capabilities */
259 /*
260 * Override fifo depth. If 0, autodetect it from the FIFOTH register,
261 * but note that this may not be reliable after a bootloader has used
262 * it.
263 */
264 unsigned int fifo_depth;
265
266 /* delay in mS before detecting cards after interrupt */
267 u32 detect_delay_ms;
268
269 struct reset_control *rstc;
270 struct dw_mci_dma_ops *dma_ops;
271 struct dma_pdata *data;
272};
273
274#endif /* LINUX_MMC_DW_MMC_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 8bc884121465..83f1c4a9f03b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -10,16 +10,12 @@
10#ifndef LINUX_MMC_HOST_H 10#ifndef LINUX_MMC_HOST_H
11#define LINUX_MMC_HOST_H 11#define LINUX_MMC_HOST_H
12 12
13#include <linux/leds.h>
14#include <linux/mutex.h>
15#include <linux/timer.h>
16#include <linux/sched.h> 13#include <linux/sched.h>
17#include <linux/device.h> 14#include <linux/device.h>
18#include <linux/fault-inject.h> 15#include <linux/fault-inject.h>
19 16
20#include <linux/mmc/core.h> 17#include <linux/mmc/core.h>
21#include <linux/mmc/card.h> 18#include <linux/mmc/card.h>
22#include <linux/mmc/mmc.h>
23#include <linux/mmc/pm.h> 19#include <linux/mmc/pm.h>
24 20
25struct mmc_ios { 21struct mmc_ios {
@@ -82,6 +78,8 @@ struct mmc_ios {
82 bool enhanced_strobe; /* hs400es selection */ 78 bool enhanced_strobe; /* hs400es selection */
83}; 79};
84 80
81struct mmc_host;
82
85struct mmc_host_ops { 83struct mmc_host_ops {
86 /* 84 /*
87 * It is optional for the host to implement pre_req and post_req in 85 * It is optional for the host to implement pre_req and post_req in
@@ -162,9 +160,6 @@ struct mmc_host_ops {
162 unsigned int direction, int blk_size); 160 unsigned int direction, int blk_size);
163}; 161};
164 162
165struct mmc_card;
166struct device;
167
168struct mmc_async_req { 163struct mmc_async_req {
169 /* active mmc request */ 164 /* active mmc request */
170 struct mmc_request *mrq; 165 struct mmc_request *mrq;
@@ -264,17 +259,16 @@ struct mmc_host {
264#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ 259#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
265#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ 260#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
266#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ 261#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */
267#define MMC_CAP_1_8V_DDR (1 << 11) /* can support */ 262#define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */
268 /* DDR mode at 1.8V */ 263#define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */
269#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */ 264#define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */
270 /* DDR mode at 1.2V */ 265#define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */
271#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */ 266#define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */
272#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */ 267#define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */
273#define MMC_CAP_UHS_SDR12 (1 << 15) /* Host supports UHS SDR12 mode */ 268#define MMC_CAP_UHS_SDR25 (1 << 17) /* Host supports UHS SDR25 mode */
274#define MMC_CAP_UHS_SDR25 (1 << 16) /* Host supports UHS SDR25 mode */ 269#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
275#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */ 270#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
276#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */ 271#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
277#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
278#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ 272#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
279#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ 273#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
280#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ 274#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
@@ -397,11 +391,14 @@ struct mmc_host {
397 unsigned long private[0] ____cacheline_aligned; 391 unsigned long private[0] ____cacheline_aligned;
398}; 392};
399 393
394struct device_node;
395
400struct mmc_host *mmc_alloc_host(int extra, struct device *); 396struct mmc_host *mmc_alloc_host(int extra, struct device *);
401int mmc_add_host(struct mmc_host *); 397int mmc_add_host(struct mmc_host *);
402void mmc_remove_host(struct mmc_host *); 398void mmc_remove_host(struct mmc_host *);
403void mmc_free_host(struct mmc_host *); 399void mmc_free_host(struct mmc_host *);
404int mmc_of_parse(struct mmc_host *host); 400int mmc_of_parse(struct mmc_host *host);
401int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
405 402
406static inline void *mmc_priv(struct mmc_host *host) 403static inline void *mmc_priv(struct mmc_host *host)
407{ 404{
@@ -457,6 +454,7 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
457} 454}
458#endif 455#endif
459 456
457u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
460int mmc_regulator_get_supply(struct mmc_host *mmc); 458int mmc_regulator_get_supply(struct mmc_host *mmc);
461 459
462static inline int mmc_card_is_removable(struct mmc_host *host) 460static inline int mmc_card_is_removable(struct mmc_host *host)
@@ -474,56 +472,20 @@ static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
474 return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ; 472 return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
475} 473}
476 474
477static inline int mmc_host_cmd23(struct mmc_host *host) 475/* TODO: Move to private header */
478{
479 return host->caps & MMC_CAP_CMD23;
480}
481
482static inline int mmc_boot_partition_access(struct mmc_host *host)
483{
484 return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
485}
486
487static inline int mmc_host_uhs(struct mmc_host *host)
488{
489 return host->caps &
490 (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
491 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
492 MMC_CAP_UHS_DDR50);
493}
494
495static inline int mmc_card_hs(struct mmc_card *card) 476static inline int mmc_card_hs(struct mmc_card *card)
496{ 477{
497 return card->host->ios.timing == MMC_TIMING_SD_HS || 478 return card->host->ios.timing == MMC_TIMING_SD_HS ||
498 card->host->ios.timing == MMC_TIMING_MMC_HS; 479 card->host->ios.timing == MMC_TIMING_MMC_HS;
499} 480}
500 481
482/* TODO: Move to private header */
501static inline int mmc_card_uhs(struct mmc_card *card) 483static inline int mmc_card_uhs(struct mmc_card *card)
502{ 484{
503 return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 && 485 return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 &&
504 card->host->ios.timing <= MMC_TIMING_UHS_DDR50; 486 card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
505} 487}
506 488
507static inline bool mmc_card_hs200(struct mmc_card *card)
508{
509 return card->host->ios.timing == MMC_TIMING_MMC_HS200;
510}
511
512static inline bool mmc_card_ddr52(struct mmc_card *card)
513{
514 return card->host->ios.timing == MMC_TIMING_MMC_DDR52;
515}
516
517static inline bool mmc_card_hs400(struct mmc_card *card)
518{
519 return card->host->ios.timing == MMC_TIMING_MMC_HS400;
520}
521
522static inline bool mmc_card_hs400es(struct mmc_card *card)
523{
524 return card->host->ios.enhanced_strobe;
525}
526
527void mmc_retune_timer_stop(struct mmc_host *host); 489void mmc_retune_timer_stop(struct mmc_host *host);
528 490
529static inline void mmc_retune_needed(struct mmc_host *host) 491static inline void mmc_retune_needed(struct mmc_host *host)
@@ -532,18 +494,12 @@ static inline void mmc_retune_needed(struct mmc_host *host)
532 host->need_retune = 1; 494 host->need_retune = 1;
533} 495}
534 496
535static inline void mmc_retune_recheck(struct mmc_host *host)
536{
537 if (host->hold_retune <= 1)
538 host->retune_now = 1;
539}
540
541static inline bool mmc_can_retune(struct mmc_host *host) 497static inline bool mmc_can_retune(struct mmc_host *host)
542{ 498{
543 return host->can_retune == 1; 499 return host->can_retune == 1;
544} 500}
545 501
546void mmc_retune_pause(struct mmc_host *host); 502int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
547void mmc_retune_unpause(struct mmc_host *host); 503int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
548 504
549#endif /* LINUX_MMC_HOST_H */ 505#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 672730acc705..3ffc27aaeeaf 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -24,6 +24,8 @@
24#ifndef LINUX_MMC_MMC_H 24#ifndef LINUX_MMC_MMC_H
25#define LINUX_MMC_MMC_H 25#define LINUX_MMC_MMC_H
26 26
27#include <linux/types.h>
28
27/* Standard MMC commands (4.1) type argument response */ 29/* Standard MMC commands (4.1) type argument response */
28 /* class 1 */ 30 /* class 1 */
29#define MMC_GO_IDLE_STATE 0 /* bc */ 31#define MMC_GO_IDLE_STATE 0 /* bc */
@@ -182,50 +184,6 @@ static inline bool mmc_op_multi(u32 opcode)
182#define R2_SPI_OUT_OF_RANGE (1 << 15) /* or CSD overwrite */ 184#define R2_SPI_OUT_OF_RANGE (1 << 15) /* or CSD overwrite */
183#define R2_SPI_CSD_OVERWRITE R2_SPI_OUT_OF_RANGE 185#define R2_SPI_CSD_OVERWRITE R2_SPI_OUT_OF_RANGE
184 186
185/* These are unpacked versions of the actual responses */
186
187struct _mmc_csd {
188 u8 csd_structure;
189 u8 spec_vers;
190 u8 taac;
191 u8 nsac;
192 u8 tran_speed;
193 u16 ccc;
194 u8 read_bl_len;
195 u8 read_bl_partial;
196 u8 write_blk_misalign;
197 u8 read_blk_misalign;
198 u8 dsr_imp;
199 u16 c_size;
200 u8 vdd_r_curr_min;
201 u8 vdd_r_curr_max;
202 u8 vdd_w_curr_min;
203 u8 vdd_w_curr_max;
204 u8 c_size_mult;
205 union {
206 struct { /* MMC system specification version 3.1 */
207 u8 erase_grp_size;
208 u8 erase_grp_mult;
209 } v31;
210 struct { /* MMC system specification version 2.2 */
211 u8 sector_size;
212 u8 erase_grp_size;
213 } v22;
214 } erase;
215 u8 wp_grp_size;
216 u8 wp_grp_enable;
217 u8 default_ecc;
218 u8 r2w_factor;
219 u8 write_bl_len;
220 u8 write_bl_partial;
221 u8 file_format_grp;
222 u8 copy;
223 u8 perm_write_protect;
224 u8 tmp_write_protect;
225 u8 file_format;
226 u8 ecc;
227};
228
229/* 187/*
230 * OCR bits are mostly in host.h 188 * OCR bits are mostly in host.h
231 */ 189 */
@@ -339,6 +297,9 @@ struct _mmc_csd {
339#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ 297#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
340#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ 298#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
341#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ 299#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
300#define EXT_CSD_PRE_EOL_INFO 267 /* RO */
301#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A 268 /* RO */
302#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B 269 /* RO */
342#define EXT_CSD_CMDQ_DEPTH 307 /* RO */ 303#define EXT_CSD_CMDQ_DEPTH 307 /* RO */
343#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */ 304#define EXT_CSD_CMDQ_SUPPORT 308 /* RO */
344#define EXT_CSD_SUPPORTED_MODE 493 /* RO */ 305#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
@@ -446,6 +407,7 @@ struct _mmc_csd {
446 * BKOPS modes 407 * BKOPS modes
447 */ 408 */
448#define EXT_CSD_MANUAL_BKOPS_MASK 0x01 409#define EXT_CSD_MANUAL_BKOPS_MASK 0x01
410#define EXT_CSD_AUTO_BKOPS_MASK 0x02
449 411
450/* 412/*
451 * Command Queue 413 * Command Queue
@@ -457,12 +419,23 @@ struct _mmc_csd {
457/* 419/*
458 * MMC_SWITCH access modes 420 * MMC_SWITCH access modes
459 */ 421 */
460
461#define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */ 422#define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */
462#define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */ 423#define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */
463#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */ 424#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
464#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */ 425#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
465 426
427/*
428 * Erase/trim/discard
429 */
430#define MMC_ERASE_ARG 0x00000000
431#define MMC_SECURE_ERASE_ARG 0x80000000
432#define MMC_TRIM_ARG 0x00000001
433#define MMC_DISCARD_ARG 0x00000003
434#define MMC_SECURE_TRIM1_ARG 0x80000001
435#define MMC_SECURE_TRIM2_ARG 0x80008000
436#define MMC_SECURE_ARGS 0x80000000
437#define MMC_TRIM_ARGS 0x00008001
438
466#define mmc_driver_type_mask(n) (1 << (n)) 439#define mmc_driver_type_mask(n) (1 << (n))
467 440
468#endif /* LINUX_MMC_MMC_H */ 441#endif /* LINUX_MMC_MMC_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index d43ef96bf075..b733eb404ffc 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -36,6 +36,7 @@
36#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 36#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
37#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 37#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
38#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 38#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
39#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
39#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 40#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
40#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 41#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
41 42
@@ -51,6 +52,7 @@
51#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 52#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
52#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 53#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104
53#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105 54#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105
55#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
54 56
55#define SDIO_VENDOR_ID_SIANO 0x039a 57#define SDIO_VENDOR_ID_SIANO 0x039a
56#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 58#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
@@ -60,4 +62,10 @@
60#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 62#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100
61#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 63#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347
62 64
65#define SDIO_VENDOR_ID_TI 0x0097
66#define SDIO_DEVICE_ID_TI_WL1271 0x4076
67
68#define SDIO_VENDOR_ID_STE 0x0020
69#define SDIO_DEVICE_ID_STE_CW1200 0x2280
70
63#endif /* LINUX_MMC_SDIO_IDS_H */ 71#endif /* LINUX_MMC_SDIO_IDS_H */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index ccd8fb2cad52..a7baa29484c3 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -32,13 +32,8 @@
32 */ 32 */
33 33
34struct sh_mmcif_plat_data { 34struct sh_mmcif_plat_data {
35 int (*get_cd)(struct platform_device *pdef);
36 unsigned int slave_id_tx; /* embedded slave_id_[tr]x */ 35 unsigned int slave_id_tx; /* embedded slave_id_[tr]x */
37 unsigned int slave_id_rx; 36 unsigned int slave_id_rx;
38 bool use_cd_gpio : 1;
39 bool ccs_unsupported : 1;
40 bool clk_ctrl2_present : 1;
41 unsigned int cd_gpio;
42 u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */ 37 u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */
43 unsigned long caps; 38 unsigned long caps;
44 u32 ocr; 39 u32 ocr;
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index a7972cd3bc14..82f0d289f110 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -11,6 +11,9 @@
11#ifndef MMC_SLOT_GPIO_H 11#ifndef MMC_SLOT_GPIO_H
12#define MMC_SLOT_GPIO_H 12#define MMC_SLOT_GPIO_H
13 13
14#include <linux/types.h>
15#include <linux/irqreturn.h>
16
14struct mmc_host; 17struct mmc_host;
15 18
16int mmc_gpio_get_ro(struct mmc_host *host); 19int mmc_gpio_get_ro(struct mmc_host *host);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 36d9896fbc1e..f4aac87adcc3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
972 * @zonelist - The zonelist to search for a suitable zone 972 * @zonelist - The zonelist to search for a suitable zone
973 * @highest_zoneidx - The zone index of the highest zone to return 973 * @highest_zoneidx - The zone index of the highest zone to return
974 * @nodes - An optional nodemask to filter the zonelist with 974 * @nodes - An optional nodemask to filter the zonelist with
975 * @zone - The first suitable zone found is returned via this parameter 975 * @return - Zoneref pointer for the first suitable zone found (see below)
976 * 976 *
977 * This function returns the first zone at or below a given zone index that is 977 * This function returns the first zone at or below a given zone index that is
978 * within the allowed nodemask. The zoneref returned is a cursor that can be 978 * within the allowed nodemask. The zoneref returned is a cursor that can be
979 * used to iterate the zonelist with next_zones_zonelist by advancing it by 979 * used to iterate the zonelist with next_zones_zonelist by advancing it by
980 * one before calling. 980 * one before calling.
981 *
982 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
983 * never NULL). This may happen either genuinely, or due to concurrent nodemask
984 * update due to cpuset modification.
981 */ 985 */
982static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 986static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
983 enum zone_type highest_zoneidx, 987 enum zone_type highest_zoneidx,
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8a57f0b1242d..8850fcaf50db 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -501,6 +501,7 @@ struct platform_device_id {
501 kernel_ulong_t driver_data; 501 kernel_ulong_t driver_data;
502}; 502};
503 503
504#define MDIO_NAME_SIZE 32
504#define MDIO_MODULE_PREFIX "mdio:" 505#define MDIO_MODULE_PREFIX "mdio:"
505 506
506#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d" 507#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
diff --git a/include/linux/module.h b/include/linux/module.h
index 7c84273d60b9..f4f542ed3d92 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -18,7 +18,6 @@
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/jump_label.h> 19#include <linux/jump_label.h>
20#include <linux/export.h> 20#include <linux/export.h>
21#include <linux/extable.h> /* only as arch move module.h -> extable.h */
22#include <linux/rbtree_latch.h> 21#include <linux/rbtree_latch.h>
23 22
24#include <linux/percpu.h> 23#include <linux/percpu.h>
@@ -346,7 +345,7 @@ struct module {
346 345
347 /* Exported symbols */ 346 /* Exported symbols */
348 const struct kernel_symbol *syms; 347 const struct kernel_symbol *syms;
349 const unsigned long *crcs; 348 const s32 *crcs;
350 unsigned int num_syms; 349 unsigned int num_syms;
351 350
352 /* Kernel parameters. */ 351 /* Kernel parameters. */
@@ -359,18 +358,18 @@ struct module {
359 /* GPL-only exported symbols. */ 358 /* GPL-only exported symbols. */
360 unsigned int num_gpl_syms; 359 unsigned int num_gpl_syms;
361 const struct kernel_symbol *gpl_syms; 360 const struct kernel_symbol *gpl_syms;
362 const unsigned long *gpl_crcs; 361 const s32 *gpl_crcs;
363 362
364#ifdef CONFIG_UNUSED_SYMBOLS 363#ifdef CONFIG_UNUSED_SYMBOLS
365 /* unused exported symbols. */ 364 /* unused exported symbols. */
366 const struct kernel_symbol *unused_syms; 365 const struct kernel_symbol *unused_syms;
367 const unsigned long *unused_crcs; 366 const s32 *unused_crcs;
368 unsigned int num_unused_syms; 367 unsigned int num_unused_syms;
369 368
370 /* GPL-only, unused exported symbols. */ 369 /* GPL-only, unused exported symbols. */
371 unsigned int num_unused_gpl_syms; 370 unsigned int num_unused_gpl_syms;
372 const struct kernel_symbol *unused_gpl_syms; 371 const struct kernel_symbol *unused_gpl_syms;
373 const unsigned long *unused_gpl_crcs; 372 const s32 *unused_gpl_crcs;
374#endif 373#endif
375 374
376#ifdef CONFIG_MODULE_SIG 375#ifdef CONFIG_MODULE_SIG
@@ -382,7 +381,7 @@ struct module {
382 381
383 /* symbols that will be GPL-only in the near future. */ 382 /* symbols that will be GPL-only in the near future. */
384 const struct kernel_symbol *gpl_future_syms; 383 const struct kernel_symbol *gpl_future_syms;
385 const unsigned long *gpl_future_crcs; 384 const s32 *gpl_future_crcs;
386 unsigned int num_gpl_future_syms; 385 unsigned int num_gpl_future_syms;
387 386
388 /* Exception table */ 387 /* Exception table */
@@ -523,7 +522,7 @@ struct module *find_module(const char *name);
523 522
524struct symsearch { 523struct symsearch {
525 const struct kernel_symbol *start, *stop; 524 const struct kernel_symbol *start, *stop;
526 const unsigned long *crcs; 525 const s32 *crcs;
527 enum { 526 enum {
528 NOT_GPL_ONLY, 527 NOT_GPL_ONLY,
529 GPL_ONLY, 528 GPL_ONLY,
@@ -539,7 +538,7 @@ struct symsearch {
539 */ 538 */
540const struct kernel_symbol *find_symbol(const char *name, 539const struct kernel_symbol *find_symbol(const char *name,
541 struct module **owner, 540 struct module **owner,
542 const unsigned long **crc, 541 const s32 **crc,
543 bool gplok, 542 bool gplok,
544 bool warn); 543 bool warn);
545 544
@@ -764,7 +763,7 @@ extern int module_sysfs_initialized;
764 763
765#define __MODULE_STRING(x) __stringify(x) 764#define __MODULE_STRING(x) __stringify(x)
766 765
767#ifdef CONFIG_DEBUG_SET_MODULE_RONX 766#ifdef CONFIG_STRICT_MODULE_RWX
768extern void set_all_modules_text_rw(void); 767extern void set_all_modules_text_rw(void);
769extern void set_all_modules_text_ro(void); 768extern void set_all_modules_text_ro(void);
770extern void module_enable_ro(const struct module *mod, bool after_init); 769extern void module_enable_ro(const struct module *mod, bool after_init);
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index e5fb81376e92..d7f63339ef0b 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/in.h> 4#include <linux/in.h>
5#include <linux/pim.h> 5#include <linux/pim.h>
6#include <linux/rhashtable.h>
6#include <net/sock.h> 7#include <net/sock.h>
7#include <uapi/linux/mroute.h> 8#include <uapi/linux/mroute.h>
8 9
@@ -60,7 +61,6 @@ struct vif_device {
60#define VIFF_STATIC 0x8000 61#define VIFF_STATIC 0x8000
61 62
62#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) 63#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
63#define MFC_LINES 64
64 64
65struct mr_table { 65struct mr_table {
66 struct list_head list; 66 struct list_head list;
@@ -69,8 +69,9 @@ struct mr_table {
69 struct sock __rcu *mroute_sk; 69 struct sock __rcu *mroute_sk;
70 struct timer_list ipmr_expire_timer; 70 struct timer_list ipmr_expire_timer;
71 struct list_head mfc_unres_queue; 71 struct list_head mfc_unres_queue;
72 struct list_head mfc_cache_array[MFC_LINES];
73 struct vif_device vif_table[MAXVIFS]; 72 struct vif_device vif_table[MAXVIFS];
73 struct rhltable mfc_hash;
74 struct list_head mfc_cache_list;
74 int maxvif; 75 int maxvif;
75 atomic_t cache_resolve_queue_len; 76 atomic_t cache_resolve_queue_len;
76 bool mroute_do_assert; 77 bool mroute_do_assert;
@@ -85,17 +86,48 @@ enum {
85 MFC_STATIC = BIT(0), 86 MFC_STATIC = BIT(0),
86}; 87};
87 88
89struct mfc_cache_cmp_arg {
90 __be32 mfc_mcastgrp;
91 __be32 mfc_origin;
92};
93
94/**
95 * struct mfc_cache - multicast routing entries
96 * @mnode: rhashtable list
97 * @mfc_mcastgrp: destination multicast group address
98 * @mfc_origin: source address
99 * @cmparg: used for rhashtable comparisons
100 * @mfc_parent: source interface (iif)
101 * @mfc_flags: entry flags
102 * @expires: unresolved entry expire time
103 * @unresolved: unresolved cached skbs
104 * @last_assert: time of last assert
105 * @minvif: minimum VIF id
106 * @maxvif: maximum VIF id
107 * @bytes: bytes that have passed for this entry
108 * @pkt: packets that have passed for this entry
109 * @wrong_if: number of wrong source interface hits
110 * @lastuse: time of last use of the group (traffic or update)
111 * @ttls: OIF TTL threshold array
112 * @list: global entry list
113 * @rcu: used for entry destruction
114 */
88struct mfc_cache { 115struct mfc_cache {
89 struct list_head list; 116 struct rhlist_head mnode;
90 __be32 mfc_mcastgrp; /* Group the entry belongs to */ 117 union {
91 __be32 mfc_origin; /* Source of packet */ 118 struct {
92 vifi_t mfc_parent; /* Source interface */ 119 __be32 mfc_mcastgrp;
93 int mfc_flags; /* Flags on line */ 120 __be32 mfc_origin;
121 };
122 struct mfc_cache_cmp_arg cmparg;
123 };
124 vifi_t mfc_parent;
125 int mfc_flags;
94 126
95 union { 127 union {
96 struct { 128 struct {
97 unsigned long expires; 129 unsigned long expires;
98 struct sk_buff_head unresolved; /* Unresolved buffers */ 130 struct sk_buff_head unresolved;
99 } unres; 131 } unres;
100 struct { 132 struct {
101 unsigned long last_assert; 133 unsigned long last_assert;
@@ -105,20 +137,15 @@ struct mfc_cache {
105 unsigned long pkt; 137 unsigned long pkt;
106 unsigned long wrong_if; 138 unsigned long wrong_if;
107 unsigned long lastuse; 139 unsigned long lastuse;
108 unsigned char ttls[MAXVIFS]; /* TTL thresholds */ 140 unsigned char ttls[MAXVIFS];
109 } res; 141 } res;
110 } mfc_un; 142 } mfc_un;
143 struct list_head list;
111 struct rcu_head rcu; 144 struct rcu_head rcu;
112}; 145};
113 146
114#ifdef __BIG_ENDIAN
115#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1))
116#else
117#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1))
118#endif
119
120struct rtmsg; 147struct rtmsg;
121int ipmr_get_route(struct net *net, struct sk_buff *skb, 148int ipmr_get_route(struct net *net, struct sk_buff *skb,
122 __be32 saddr, __be32 daddr, 149 __be32 saddr, __be32 daddr,
123 struct rtmsg *rtm, int nowait, u32 portid); 150 struct rtmsg *rtm, u32 portid);
124#endif 151#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 19a1c0c2993b..ce44e3e96d27 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -116,7 +116,7 @@ struct mfc6_cache {
116 116
117struct rtmsg; 117struct rtmsg;
118extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, 118extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
119 struct rtmsg *rtm, int nowait, u32 portid); 119 struct rtmsg *rtm, u32 portid);
120 120
121#ifdef CONFIG_IPV6_MROUTE 121#ifdef CONFIG_IPV6_MROUTE
122extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); 122extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 0db320b7bb15..a83b84ff70e5 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -17,7 +17,13 @@ struct msi_desc;
17struct pci_dev; 17struct pci_dev;
18struct platform_msi_priv_data; 18struct platform_msi_priv_data;
19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
20#ifdef CONFIG_GENERIC_MSI_IRQ
20void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 21void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
22#else
23static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
24{
25}
26#endif
21 27
22typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, 28typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
23 struct msi_msg *msg); 29 struct msi_msg *msg);
@@ -116,11 +122,15 @@ struct msi_desc {
116 122
117struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); 123struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
118void *msi_desc_to_pci_sysdata(struct msi_desc *desc); 124void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
125void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
119#else /* CONFIG_PCI_MSI */ 126#else /* CONFIG_PCI_MSI */
120static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) 127static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
121{ 128{
122 return NULL; 129 return NULL;
123} 130}
131static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
132{
133}
124#endif /* CONFIG_PCI_MSI */ 134#endif /* CONFIG_PCI_MSI */
125 135
126struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, 136struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
@@ -128,7 +138,6 @@ struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
128void free_msi_entry(struct msi_desc *entry); 138void free_msi_entry(struct msi_desc *entry);
129void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 139void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
130void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 140void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
131void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
132 141
133u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); 142u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
134u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); 143u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
deleted file mode 100644
index ad3c3488073c..000000000000
--- a/include/linux/mtd/fsmc.h
+++ /dev/null
@@ -1,156 +0,0 @@
1/*
2 * incude/mtd/fsmc.h
3 *
4 * ST Microelectronics
5 * Flexible Static Memory Controller (FSMC)
6 * platform data interface and header file
7 *
8 * Copyright © 2010 ST Microelectronics
9 * Vipin Kumar <vipin.kumar@st.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#ifndef __MTD_FSMC_H
17#define __MTD_FSMC_H
18
19#include <linux/io.h>
20#include <linux/platform_device.h>
21#include <linux/mtd/physmap.h>
22#include <linux/types.h>
23#include <linux/mtd/partitions.h>
24#include <asm/param.h>
25
26#define FSMC_NAND_BW8 1
27#define FSMC_NAND_BW16 2
28
29#define FSMC_MAX_NOR_BANKS 4
30#define FSMC_MAX_NAND_BANKS 4
31
32#define FSMC_FLASH_WIDTH8 1
33#define FSMC_FLASH_WIDTH16 2
34
35/* fsmc controller registers for NOR flash */
36#define CTRL 0x0
37 /* ctrl register definitions */
38 #define BANK_ENABLE (1 << 0)
39 #define MUXED (1 << 1)
40 #define NOR_DEV (2 << 2)
41 #define WIDTH_8 (0 << 4)
42 #define WIDTH_16 (1 << 4)
43 #define RSTPWRDWN (1 << 6)
44 #define WPROT (1 << 7)
45 #define WRT_ENABLE (1 << 12)
46 #define WAIT_ENB (1 << 13)
47
48#define CTRL_TIM 0x4
49 /* ctrl_tim register definitions */
50
51#define FSMC_NOR_BANK_SZ 0x8
52#define FSMC_NOR_REG_SIZE 0x40
53
54#define FSMC_NOR_REG(base, bank, reg) (base + \
55 FSMC_NOR_BANK_SZ * (bank) + \
56 reg)
57
58/* fsmc controller registers for NAND flash */
59#define PC 0x00
60 /* pc register definitions */
61 #define FSMC_RESET (1 << 0)
62 #define FSMC_WAITON (1 << 1)
63 #define FSMC_ENABLE (1 << 2)
64 #define FSMC_DEVTYPE_NAND (1 << 3)
65 #define FSMC_DEVWID_8 (0 << 4)
66 #define FSMC_DEVWID_16 (1 << 4)
67 #define FSMC_ECCEN (1 << 6)
68 #define FSMC_ECCPLEN_512 (0 << 7)
69 #define FSMC_ECCPLEN_256 (1 << 7)
70 #define FSMC_TCLR_1 (1)
71 #define FSMC_TCLR_SHIFT (9)
72 #define FSMC_TCLR_MASK (0xF)
73 #define FSMC_TAR_1 (1)
74 #define FSMC_TAR_SHIFT (13)
75 #define FSMC_TAR_MASK (0xF)
76#define STS 0x04
77 /* sts register definitions */
78 #define FSMC_CODE_RDY (1 << 15)
79#define COMM 0x08
80 /* comm register definitions */
81 #define FSMC_TSET_0 0
82 #define FSMC_TSET_SHIFT 0
83 #define FSMC_TSET_MASK 0xFF
84 #define FSMC_TWAIT_6 6
85 #define FSMC_TWAIT_SHIFT 8
86 #define FSMC_TWAIT_MASK 0xFF
87 #define FSMC_THOLD_4 4
88 #define FSMC_THOLD_SHIFT 16
89 #define FSMC_THOLD_MASK 0xFF
90 #define FSMC_THIZ_1 1
91 #define FSMC_THIZ_SHIFT 24
92 #define FSMC_THIZ_MASK 0xFF
93#define ATTRIB 0x0C
94#define IOATA 0x10
95#define ECC1 0x14
96#define ECC2 0x18
97#define ECC3 0x1C
98#define FSMC_NAND_BANK_SZ 0x20
99
100#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \
101 (FSMC_NAND_BANK_SZ * (bank)) + \
102 reg)
103
104#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
105
106struct fsmc_nand_timings {
107 uint8_t tclr;
108 uint8_t tar;
109 uint8_t thiz;
110 uint8_t thold;
111 uint8_t twait;
112 uint8_t tset;
113};
114
115enum access_mode {
116 USE_DMA_ACCESS = 1,
117 USE_WORD_ACCESS,
118};
119
120/**
121 * fsmc_nand_platform_data - platform specific NAND controller config
122 * @nand_timings: timing setup for the physical NAND interface
123 * @partitions: partition table for the platform, use a default fallback
124 * if this is NULL
125 * @nr_partitions: the number of partitions in the previous entry
126 * @options: different options for the driver
127 * @width: bus width
128 * @bank: default bank
129 * @select_bank: callback to select a certain bank, this is
130 * platform-specific. If the controller only supports one bank
131 * this may be set to NULL
132 */
133struct fsmc_nand_platform_data {
134 struct fsmc_nand_timings *nand_timings;
135 struct mtd_partition *partitions;
136 unsigned int nr_partitions;
137 unsigned int options;
138 unsigned int width;
139 unsigned int bank;
140
141 enum access_mode mode;
142
143 void (*select_bank)(uint32_t bank, uint32_t busw);
144
145 /* priv structures for dma accesses */
146 void *read_dma_priv;
147 void *write_dma_priv;
148};
149
150extern int __init fsmc_nor_init(struct platform_device *pdev,
151 unsigned long base, uint32_t bank, uint32_t width);
152extern void __init fsmc_init_board_info(struct platform_device *pdev,
153 struct mtd_partition *partitions, unsigned int nr_partitions,
154 unsigned int width);
155
156#endif /* __MTD_FSMC_H */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 13f8052b9ff9..eebdc63cf6af 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -24,6 +24,7 @@
24#include <linux/uio.h> 24#include <linux/uio.h>
25#include <linux/notifier.h> 25#include <linux/notifier.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/of.h>
27 28
28#include <mtd/mtd-abi.h> 29#include <mtd/mtd-abi.h>
29 30
@@ -322,6 +323,7 @@ struct mtd_info {
322 int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs); 323 int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
323 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); 324 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
324 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); 325 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
326 int (*_max_bad_blocks) (struct mtd_info *mtd, loff_t ofs, size_t len);
325 int (*_suspend) (struct mtd_info *mtd); 327 int (*_suspend) (struct mtd_info *mtd);
326 void (*_resume) (struct mtd_info *mtd); 328 void (*_resume) (struct mtd_info *mtd);
327 void (*_reboot) (struct mtd_info *mtd); 329 void (*_reboot) (struct mtd_info *mtd);
@@ -385,6 +387,8 @@ static inline void mtd_set_of_node(struct mtd_info *mtd,
385 struct device_node *np) 387 struct device_node *np)
386{ 388{
387 mtd->dev.of_node = np; 389 mtd->dev.of_node = np;
390 if (!mtd->name)
391 of_property_read_string(np, "label", &mtd->name);
388} 392}
389 393
390static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) 394static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
@@ -397,6 +401,18 @@ static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
397 return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize; 401 return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
398} 402}
399 403
404static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
405 loff_t ofs, size_t len)
406{
407 if (!mtd->_max_bad_blocks)
408 return -ENOTSUPP;
409
410 if (mtd->size < (len + ofs) || ofs < 0)
411 return -EINVAL;
412
413 return mtd->_max_bad_blocks(mtd, ofs, len);
414}
415
400int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, 416int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
401 struct mtd_pairing_info *info); 417 struct mtd_pairing_info *info);
402int mtd_pairing_info_to_wunit(struct mtd_info *mtd, 418int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c5f3a012ae62..9591e0fbe5bd 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -615,7 +615,7 @@ struct nand_buffers {
615 * @tALS_min: ALE setup time 615 * @tALS_min: ALE setup time
616 * @tAR_min: ALE to RE# delay 616 * @tAR_min: ALE to RE# delay
617 * @tCEA_max: CE# access time 617 * @tCEA_max: CE# access time
618 * @tCEH_min: 618 * @tCEH_min: CE# high hold time
619 * @tCH_min: CE# hold time 619 * @tCH_min: CE# hold time
620 * @tCHZ_max: CE# high to output hi-Z 620 * @tCHZ_max: CE# high to output hi-Z
621 * @tCLH_min: CLE hold time 621 * @tCLH_min: CLE hold time
@@ -801,6 +801,10 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
801 * supported, 0 otherwise. 801 * supported, 0 otherwise.
802 * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is 802 * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is
803 * supported, 0 otherwise. 803 * supported, 0 otherwise.
804 * @max_bb_per_die: [INTERN] the max number of bad blocks each die of a
805 * this nand device will encounter their life times.
806 * @blocks_per_die: [INTERN] The number of PEBs in a die
807 * @data_interface: [INTERN] NAND interface timing information
804 * @read_retries: [INTERN] the number of read retry modes supported 808 * @read_retries: [INTERN] the number of read retry modes supported
805 * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand 809 * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
806 * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand 810 * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
@@ -883,6 +887,8 @@ struct nand_chip {
883 struct nand_onfi_params onfi_params; 887 struct nand_onfi_params onfi_params;
884 struct nand_jedec_params jedec_params; 888 struct nand_jedec_params jedec_params;
885 }; 889 };
890 u16 max_bb_per_die;
891 u32 blocks_per_die;
886 892
887 struct nand_data_interface *data_interface; 893 struct nand_data_interface *data_interface;
888 894
@@ -958,6 +964,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
958#define NAND_MFR_SANDISK 0x45 964#define NAND_MFR_SANDISK 0x45
959#define NAND_MFR_INTEL 0x89 965#define NAND_MFR_INTEL 0x89
960#define NAND_MFR_ATO 0x9b 966#define NAND_MFR_ATO 0x9b
967#define NAND_MFR_WINBOND 0xef
961 968
962/* The maximum expected count of bytes in the NAND ID sequence */ 969/* The maximum expected count of bytes in the NAND ID sequence */
963#define NAND_MAX_ID_LEN 8 970#define NAND_MAX_ID_LEN 8
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 70736e1e6c8f..06df1e06b6e0 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -41,6 +41,7 @@ struct mtd_partition {
41 uint64_t size; /* partition size */ 41 uint64_t size; /* partition size */
42 uint64_t offset; /* offset within the master MTD space */ 42 uint64_t offset; /* offset within the master MTD space */
43 uint32_t mask_flags; /* master MTD flags to mask out for this partition */ 43 uint32_t mask_flags; /* master MTD flags to mask out for this partition */
44 struct device_node *of_node;
44}; 45};
45 46
46#define MTDPART_OFS_RETAIN (-3) 47#define MTDPART_OFS_RETAIN (-3)
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index c425c7b4c2a0..f2a718030476 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -43,9 +43,13 @@
43#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ 43#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
44#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */ 44#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */
45#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */ 45#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */
46#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual SPI) */ 46#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */
47#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad SPI) */ 47#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */
48#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */
49#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */
48#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ 50#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */
51#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */
52#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */
49#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ 53#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */
50#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ 54#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
51#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ 55#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */
@@ -56,11 +60,17 @@
56#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ 60#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
57 61
58/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ 62/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
59#define SPINOR_OP_READ4 0x13 /* Read data bytes (low frequency) */ 63#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
60#define SPINOR_OP_READ4_FAST 0x0c /* Read data bytes (high frequency) */ 64#define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */
61#define SPINOR_OP_READ4_1_1_2 0x3c /* Read data bytes (Dual SPI) */ 65#define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */
62#define SPINOR_OP_READ4_1_1_4 0x6c /* Read data bytes (Quad SPI) */ 66#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */
67#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
68#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
63#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ 69#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */
70#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */
71#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */
72#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */
73#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */
64#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ 74#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */
65 75
66/* Used for SST flashes only. */ 76/* Used for SST flashes only. */
@@ -68,6 +78,15 @@
68#define SPINOR_OP_WRDI 0x04 /* Write disable */ 78#define SPINOR_OP_WRDI 0x04 /* Write disable */
69#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ 79#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
70 80
81/* Used for S3AN flashes only */
82#define SPINOR_OP_XSE 0x50 /* Sector erase */
83#define SPINOR_OP_XPP 0x82 /* Page program */
84#define SPINOR_OP_XRDSR 0xd7 /* Read status register */
85
86#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
87#define XSR_RDY BIT(7) /* Ready */
88
89
71/* Used for Macronix and Winbond flashes. */ 90/* Used for Macronix and Winbond flashes. */
72#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ 91#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
73#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ 92#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
@@ -119,6 +138,9 @@ enum spi_nor_ops {
119enum spi_nor_option_flags { 138enum spi_nor_option_flags {
120 SNOR_F_USE_FSR = BIT(0), 139 SNOR_F_USE_FSR = BIT(0),
121 SNOR_F_HAS_SR_TB = BIT(1), 140 SNOR_F_HAS_SR_TB = BIT(1),
141 SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
142 SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
143 SNOR_F_READY_XSR_RDY = BIT(4),
122}; 144};
123 145
124/** 146/**
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b97870f2debd..1127fe31645d 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -20,6 +20,8 @@
20#include <linux/osq_lock.h> 20#include <linux/osq_lock.h>
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22 22
23struct ww_acquire_ctx;
24
23/* 25/*
24 * Simple, straightforward mutexes with strict semantics: 26 * Simple, straightforward mutexes with strict semantics:
25 * 27 *
@@ -65,7 +67,7 @@ struct mutex {
65 67
66static inline struct task_struct *__mutex_owner(struct mutex *lock) 68static inline struct task_struct *__mutex_owner(struct mutex *lock)
67{ 69{
68 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03); 70 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
69} 71}
70 72
71/* 73/*
@@ -75,6 +77,7 @@ static inline struct task_struct *__mutex_owner(struct mutex *lock)
75struct mutex_waiter { 77struct mutex_waiter {
76 struct list_head list; 78 struct list_head list;
77 struct task_struct *task; 79 struct task_struct *task;
80 struct ww_acquire_ctx *ww_ctx;
78#ifdef CONFIG_DEBUG_MUTEXES 81#ifdef CONFIG_DEBUG_MUTEXES
79 void *magic; 82 void *magic;
80#endif 83#endif
@@ -156,10 +159,12 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
156 unsigned int subclass); 159 unsigned int subclass);
157extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 160extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
158 unsigned int subclass); 161 unsigned int subclass);
162extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
159 163
160#define mutex_lock(lock) mutex_lock_nested(lock, 0) 164#define mutex_lock(lock) mutex_lock_nested(lock, 0)
161#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) 165#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
162#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) 166#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
167#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
163 168
164#define mutex_lock_nest_lock(lock, nest_lock) \ 169#define mutex_lock_nest_lock(lock, nest_lock) \
165do { \ 170do { \
@@ -171,11 +176,13 @@ do { \
171extern void mutex_lock(struct mutex *lock); 176extern void mutex_lock(struct mutex *lock);
172extern int __must_check mutex_lock_interruptible(struct mutex *lock); 177extern int __must_check mutex_lock_interruptible(struct mutex *lock);
173extern int __must_check mutex_lock_killable(struct mutex *lock); 178extern int __must_check mutex_lock_killable(struct mutex *lock);
179extern void mutex_lock_io(struct mutex *lock);
174 180
175# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 181# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
176# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 182# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
177# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 183# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
178# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) 184# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
185# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
179#endif 186#endif
180 187
181/* 188/*
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 9c6c8ef2e9e7..9a0419594e84 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -71,7 +71,6 @@ enum {
71 NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */ 71 NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */
72 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ 72 NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
73 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ 73 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
74 NETIF_F_BUSY_POLL_BIT, /* Busy poll */
75 74
76 NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ 75 NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
77 76
@@ -134,7 +133,6 @@ enum {
134#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) 133#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
135#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 134#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
136#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) 135#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
137#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
138#define NETIF_F_HW_TC __NETIF_F(HW_TC) 136#define NETIF_F_HW_TC __NETIF_F(HW_TC)
139 137
140#define for_each_netdev_feature(mask_addr, bit) \ 138#define for_each_netdev_feature(mask_addr, bit) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9bde9558b596..f40f0ab3847a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -352,6 +352,7 @@ enum gro_result {
352 GRO_HELD, 352 GRO_HELD,
353 GRO_NORMAL, 353 GRO_NORMAL,
354 GRO_DROP, 354 GRO_DROP,
355 GRO_CONSUMED,
355}; 356};
356typedef enum gro_result gro_result_t; 357typedef enum gro_result gro_result_t;
357 358
@@ -463,7 +464,6 @@ static inline bool napi_reschedule(struct napi_struct *napi)
463 return false; 464 return false;
464} 465}
465 466
466bool __napi_complete(struct napi_struct *n);
467bool napi_complete_done(struct napi_struct *n, int work_done); 467bool napi_complete_done(struct napi_struct *n, int work_done);
468/** 468/**
469 * napi_complete - NAPI processing complete 469 * napi_complete - NAPI processing complete
@@ -866,11 +866,15 @@ struct netdev_xdp {
866 * of useless work if you return NETDEV_TX_BUSY. 866 * of useless work if you return NETDEV_TX_BUSY.
867 * Required; cannot be NULL. 867 * Required; cannot be NULL.
868 * 868 *
869 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 869 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
870 * netdev_features_t features); 870 * struct net_device *dev
871 * Adjusts the requested feature flags according to device-specific 871 * netdev_features_t features);
872 * constraints, and returns the resulting flags. Must not modify 872 * Called by core transmit path to determine if device is capable of
873 * the device state. 873 * performing offload operations on a given packet. This is to give
874 * the device an opportunity to implement any restrictions that cannot
875 * be otherwise expressed by feature flags. The check is called with
876 * the set of features that the stack has calculated and it returns
877 * those the driver believes to be appropriate.
874 * 878 *
875 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 879 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
876 * void *accel_priv, select_queue_fallback_t fallback); 880 * void *accel_priv, select_queue_fallback_t fallback);
@@ -913,8 +917,8 @@ struct netdev_xdp {
913 * Callback used when the transmitter has not made any progress 917 * Callback used when the transmitter has not made any progress
914 * for dev->watchdog ticks. 918 * for dev->watchdog ticks.
915 * 919 *
916 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 920 * void (*ndo_get_stats64)(struct net_device *dev,
917 * struct rtnl_link_stats64 *storage); 921 * struct rtnl_link_stats64 *storage);
918 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 922 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
919 * Called when a user wants to get the network device usage 923 * Called when a user wants to get the network device usage
920 * statistics. Drivers must do one of the following: 924 * statistics. Drivers must do one of the following:
@@ -964,11 +968,12 @@ struct netdev_xdp {
964 * with PF and querying it may introduce a theoretical security risk. 968 * with PF and querying it may introduce a theoretical security risk.
965 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); 969 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
966 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 970 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
967 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) 971 * int (*ndo_setup_tc)(struct net_device *dev, u32 handle,
968 * Called to setup 'tc' number of traffic classes in the net device. This 972 * __be16 protocol, struct tc_to_netdev *tc);
969 * is always called from the stack with the rtnl lock held and netif tx 973 * Called to setup any 'tc' scheduler, classifier or action on @dev.
970 * queues stopped. This allows the netdevice to perform queue management 974 * This is always called from the stack with the rtnl lock held and netif
971 * safely. 975 * tx queues stopped. This allows the netdevice to perform queue
976 * management safely.
972 * 977 *
973 * Fiber Channel over Ethernet (FCoE) offload functions. 978 * Fiber Channel over Ethernet (FCoE) offload functions.
974 * int (*ndo_fcoe_enable)(struct net_device *dev); 979 * int (*ndo_fcoe_enable)(struct net_device *dev);
@@ -1028,6 +1033,12 @@ struct netdev_xdp {
1028 * Called to release previously enslaved netdev. 1033 * Called to release previously enslaved netdev.
1029 * 1034 *
1030 * Feature/offload setting functions. 1035 * Feature/offload setting functions.
1036 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1037 * netdev_features_t features);
1038 * Adjusts the requested feature flags according to device-specific
1039 * constraints, and returns the resulting flags. Must not modify
1040 * the device state.
1041 *
1031 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1042 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1032 * Called to update device configuration to new features. Passed 1043 * Called to update device configuration to new features. Passed
1033 * feature set might be less than what was returned by ndo_fix_features()). 1044 * feature set might be less than what was returned by ndo_fix_features()).
@@ -1100,15 +1111,6 @@ struct netdev_xdp {
1100 * Callback to use for xmit over the accelerated station. This 1111 * Callback to use for xmit over the accelerated station. This
1101 * is used in place of ndo_start_xmit on accelerated net 1112 * is used in place of ndo_start_xmit on accelerated net
1102 * devices. 1113 * devices.
1103 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1104 * struct net_device *dev
1105 * netdev_features_t features);
1106 * Called by core transmit path to determine if device is capable of
1107 * performing offload operations on a given packet. This is to give
1108 * the device an opportunity to implement any restrictions that cannot
1109 * be otherwise expressed by feature flags. The check is called with
1110 * the set of features that the stack has calculated and it returns
1111 * those the driver believes to be appropriate.
1112 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1114 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1113 * int queue_index, u32 maxrate); 1115 * int queue_index, u32 maxrate);
1114 * Called when a user wants to set a max-rate limitation of specific 1116 * Called when a user wants to set a max-rate limitation of specific
@@ -1165,8 +1167,8 @@ struct net_device_ops {
1165 struct neigh_parms *); 1167 struct neigh_parms *);
1166 void (*ndo_tx_timeout) (struct net_device *dev); 1168 void (*ndo_tx_timeout) (struct net_device *dev);
1167 1169
1168 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 1170 void (*ndo_get_stats64)(struct net_device *dev,
1169 struct rtnl_link_stats64 *storage); 1171 struct rtnl_link_stats64 *storage);
1170 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); 1172 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1171 int (*ndo_get_offload_stats)(int attr_id, 1173 int (*ndo_get_offload_stats)(int attr_id,
1172 const struct net_device *dev, 1174 const struct net_device *dev,
@@ -1183,9 +1185,6 @@ struct net_device_ops {
1183 struct netpoll_info *info); 1185 struct netpoll_info *info);
1184 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1186 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1185#endif 1187#endif
1186#ifdef CONFIG_NET_RX_BUSY_POLL
1187 int (*ndo_busy_poll)(struct napi_struct *dev);
1188#endif
1189 int (*ndo_set_vf_mac)(struct net_device *dev, 1188 int (*ndo_set_vf_mac)(struct net_device *dev,
1190 int queue, u8 *mac); 1189 int queue, u8 *mac);
1191 int (*ndo_set_vf_vlan)(struct net_device *dev, 1190 int (*ndo_set_vf_vlan)(struct net_device *dev,
@@ -1510,6 +1509,7 @@ enum netdev_priv_flags {
1510 * @max_mtu: Interface Maximum MTU value 1509 * @max_mtu: Interface Maximum MTU value
1511 * @type: Interface hardware type 1510 * @type: Interface hardware type
1512 * @hard_header_len: Maximum hardware header length. 1511 * @hard_header_len: Maximum hardware header length.
1512 * @min_header_len: Minimum hardware header length
1513 * 1513 *
1514 * @needed_headroom: Extra headroom the hardware may need, but not in all 1514 * @needed_headroom: Extra headroom the hardware may need, but not in all
1515 * cases can this be guaranteed 1515 * cases can this be guaranteed
@@ -1551,7 +1551,6 @@ enum netdev_priv_flags {
1551 * @ax25_ptr: AX.25 specific data 1551 * @ax25_ptr: AX.25 specific data
1552 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1552 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1553 * 1553 *
1554 * @last_rx: Time of last Rx
1555 * @dev_addr: Hw address (before bcast, 1554 * @dev_addr: Hw address (before bcast,
1556 * because most packets are unicast) 1555 * because most packets are unicast)
1557 * 1556 *
@@ -1727,6 +1726,7 @@ struct net_device {
1727 unsigned int max_mtu; 1726 unsigned int max_mtu;
1728 unsigned short type; 1727 unsigned short type;
1729 unsigned short hard_header_len; 1728 unsigned short hard_header_len;
1729 unsigned short min_header_len;
1730 1730
1731 unsigned short needed_headroom; 1731 unsigned short needed_headroom;
1732 unsigned short needed_tailroom; 1732 unsigned short needed_tailroom;
@@ -1777,8 +1777,6 @@ struct net_device {
1777/* 1777/*
1778 * Cache lines mostly used on receive path (including eth_type_trans()) 1778 * Cache lines mostly used on receive path (including eth_type_trans())
1779 */ 1779 */
1780 unsigned long last_rx;
1781
1782 /* Interface address info used in eth_type_trans() */ 1780 /* Interface address info used in eth_type_trans() */
1783 unsigned char *dev_addr; 1781 unsigned char *dev_addr;
1784 1782
@@ -1868,8 +1866,12 @@ struct net_device {
1868 struct pcpu_vstats __percpu *vstats; 1866 struct pcpu_vstats __percpu *vstats;
1869 }; 1867 };
1870 1868
1869#if IS_ENABLED(CONFIG_GARP)
1871 struct garp_port __rcu *garp_port; 1870 struct garp_port __rcu *garp_port;
1871#endif
1872#if IS_ENABLED(CONFIG_MRP)
1872 struct mrp_port __rcu *mrp_port; 1873 struct mrp_port __rcu *mrp_port;
1874#endif
1873 1875
1874 struct device dev; 1876 struct device dev;
1875 const struct attribute_group *sysfs_groups[4]; 1877 const struct attribute_group *sysfs_groups[4];
@@ -2666,6 +2668,19 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2666 remcsum_unadjust((__sum16 *)ptr, grc->delta); 2668 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2667} 2669}
2668 2670
2671#ifdef CONFIG_XFRM_OFFLOAD
2672static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
2673{
2674 if (PTR_ERR(pp) != -EINPROGRESS)
2675 NAPI_GRO_CB(skb)->flush |= flush;
2676}
2677#else
2678static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
2679{
2680 NAPI_GRO_CB(skb)->flush |= flush;
2681}
2682#endif
2683
2669static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 2684static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2670 unsigned short type, 2685 unsigned short type,
2671 const void *daddr, const void *saddr, 2686 const void *daddr, const void *saddr,
@@ -2693,6 +2708,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
2693{ 2708{
2694 if (likely(len >= dev->hard_header_len)) 2709 if (likely(len >= dev->hard_header_len))
2695 return true; 2710 return true;
2711 if (len < dev->min_header_len)
2712 return false;
2696 2713
2697 if (capable(CAP_SYS_RAWIO)) { 2714 if (capable(CAP_SYS_RAWIO)) {
2698 memset(ll_header + len, 0, dev->hard_header_len - len); 2715 memset(ll_header + len, 0, dev->hard_header_len - len);
@@ -3106,7 +3123,19 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
3106 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 3123 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3107} 3124}
3108 3125
3109void netif_wake_subqueue(struct net_device *dev, u16 queue_index); 3126/**
3127 * netif_wake_subqueue - allow sending packets on subqueue
3128 * @dev: network device
3129 * @queue_index: sub queue index
3130 *
3131 * Resume individual transmit queue of a device with multiple transmit queues.
3132 */
3133static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3134{
3135 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3136
3137 netif_tx_wake_queue(txq);
3138}
3110 3139
3111#ifdef CONFIG_XPS 3140#ifdef CONFIG_XPS
3112int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3141int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
@@ -3800,6 +3829,10 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
3800extern int netdev_max_backlog; 3829extern int netdev_max_backlog;
3801extern int netdev_tstamp_prequeue; 3830extern int netdev_tstamp_prequeue;
3802extern int weight_p; 3831extern int weight_p;
3832extern int dev_weight_rx_bias;
3833extern int dev_weight_tx_bias;
3834extern int dev_rx_weight;
3835extern int dev_tx_weight;
3803 3836
3804bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 3837bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3805struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, 3838struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
@@ -3877,10 +3910,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
3877 struct net_device *lower_dev); 3910 struct net_device *lower_dev);
3878void netdev_lower_state_changed(struct net_device *lower_dev, 3911void netdev_lower_state_changed(struct net_device *lower_dev,
3879 void *lower_state_info); 3912 void *lower_state_info);
3880int netdev_default_l2upper_neigh_construct(struct net_device *dev,
3881 struct neighbour *n);
3882void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
3883 struct neighbour *n);
3884 3913
3885/* RSS keys are 40 or 52 bytes long */ 3914/* RSS keys are 40 or 52 bytes long */
3886#define NETDEV_RSS_KEY_LEN 52 3915#define NETDEV_RSS_KEY_LEN 52
@@ -4333,6 +4362,15 @@ do { \
4333}) 4362})
4334#endif 4363#endif
4335 4364
4365/* if @cond then downgrade to debug, else print at @level */
4366#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
4367 do { \
4368 if (cond) \
4369 netif_dbg(priv, type, netdev, fmt, ##args); \
4370 else \
4371 netif_ ## level(priv, type, netdev, fmt, ##args); \
4372 } while (0)
4373
4336#if defined(VERBOSE_DEBUG) 4374#if defined(VERBOSE_DEBUG)
4337#define netif_vdbg netif_dbg 4375#define netif_vdbg netif_dbg
4338#else 4376#else
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 1d82dd5e9a08..1b49209dd5c7 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -28,6 +28,7 @@ struct nfnetlink_subsystem {
28 const struct nfnl_callback *cb; /* callback for individual types */ 28 const struct nfnl_callback *cb; /* callback for individual types */
29 int (*commit)(struct net *net, struct sk_buff *skb); 29 int (*commit)(struct net *net, struct sk_buff *skb);
30 int (*abort)(struct net *net, struct sk_buff *skb); 30 int (*abort)(struct net *net, struct sk_buff *skb);
31 bool (*valid_genid)(struct net *net, u32 genid);
31}; 32};
32 33
33int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); 34int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 5117e4d2ddfa..be378cf47fcc 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -167,6 +167,7 @@ struct xt_match {
167 167
168 const char *table; 168 const char *table;
169 unsigned int matchsize; 169 unsigned int matchsize;
170 unsigned int usersize;
170#ifdef CONFIG_COMPAT 171#ifdef CONFIG_COMPAT
171 unsigned int compatsize; 172 unsigned int compatsize;
172#endif 173#endif
@@ -207,6 +208,7 @@ struct xt_target {
207 208
208 const char *table; 209 const char *table;
209 unsigned int targetsize; 210 unsigned int targetsize;
211 unsigned int usersize;
210#ifdef CONFIG_COMPAT 212#ifdef CONFIG_COMPAT
211 unsigned int compatsize; 213 unsigned int compatsize;
212#endif 214#endif
@@ -287,6 +289,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
287int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, 289int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
288 bool inv_proto); 290 bool inv_proto);
289 291
292int xt_match_to_user(const struct xt_entry_match *m,
293 struct xt_entry_match __user *u);
294int xt_target_to_user(const struct xt_entry_target *t,
295 struct xt_entry_target __user *u);
296int xt_data_to_user(void __user *dst, const void *src,
297 int usersize, int size);
298
290void *xt_copy_counters_from_user(const void __user *user, unsigned int len, 299void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
291 struct xt_counters_info *info, bool compat); 300 struct xt_counters_info *info, bool compat);
292 301
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bca536341d1a..1b1ca04820a3 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -282,7 +282,7 @@ enum nfsstat4 {
282 282
283static inline bool seqid_mutating_err(u32 err) 283static inline bool seqid_mutating_err(u32 err)
284{ 284{
285 /* rfc 3530 section 8.1.5: */ 285 /* See RFC 7530, section 9.1.7 */
286 switch (err) { 286 switch (err) {
287 case NFS4ERR_STALE_CLIENTID: 287 case NFS4ERR_STALE_CLIENTID:
288 case NFS4ERR_STALE_STATEID: 288 case NFS4ERR_STALE_STATEID:
@@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err)
291 case NFS4ERR_BADXDR: 291 case NFS4ERR_BADXDR:
292 case NFS4ERR_RESOURCE: 292 case NFS4ERR_RESOURCE:
293 case NFS4ERR_NOFILEHANDLE: 293 case NFS4ERR_NOFILEHANDLE:
294 case NFS4ERR_MOVED:
294 return false; 295 return false;
295 }; 296 };
296 return true; 297 return true;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aacca824a6ae..0a3fadc32693 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
110extern int watchdog_thresh; 110extern int watchdog_thresh;
111extern unsigned long watchdog_enabled; 111extern unsigned long watchdog_enabled;
112extern unsigned long *watchdog_cpumask_bits; 112extern unsigned long *watchdog_cpumask_bits;
113extern atomic_t watchdog_park_in_progress;
113#ifdef CONFIG_SMP 114#ifdef CONFIG_SMP
114extern int sysctl_softlockup_all_cpu_backtrace; 115extern int sysctl_softlockup_all_cpu_backtrace;
115extern int sysctl_hardlockup_all_cpu_backtrace; 116extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 3d1c6f1b15c9..0b676a02cf3e 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -244,6 +244,7 @@ enum {
244 NVME_CTRL_ONCS_DSM = 1 << 2, 244 NVME_CTRL_ONCS_DSM = 1 << 2,
245 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, 245 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
246 NVME_CTRL_VWC_PRESENT = 1 << 0, 246 NVME_CTRL_VWC_PRESENT = 1 << 0,
247 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
247}; 248};
248 249
249struct nvme_lbaf { 250struct nvme_lbaf {
@@ -553,6 +554,8 @@ enum {
553 NVME_DSMGMT_AD = 1 << 2, 554 NVME_DSMGMT_AD = 1 << 2,
554}; 555};
555 556
557#define NVME_DSM_MAX_RANGES 256
558
556struct nvme_dsm_range { 559struct nvme_dsm_range {
557 __le32 cattr; 560 __le32 cattr;
558 __le32 nlb; 561 __le32 nlb;
diff --git a/include/linux/of.h b/include/linux/of.h
index d72f01009297..21e6323de0f3 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -280,6 +280,7 @@ extern struct device_node *of_get_child_by_name(const struct device_node *node,
280 280
281/* cache lookup */ 281/* cache lookup */
282extern struct device_node *of_find_next_cache_node(const struct device_node *); 282extern struct device_node *of_find_next_cache_node(const struct device_node *);
283extern int of_find_last_cache_level(unsigned int cpu);
283extern struct device_node *of_find_node_with_property( 284extern struct device_node *of_find_node_with_property(
284 struct device_node *from, const char *prop_name); 285 struct device_node *from, const char *prop_name);
285 286
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index cc7dd687a89d..e9afbcc8de12 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -37,6 +37,7 @@ extern const void *of_device_get_match_data(const struct device *dev);
37 37
38extern ssize_t of_device_get_modalias(struct device *dev, 38extern ssize_t of_device_get_modalias(struct device *dev,
39 char *str, ssize_t len); 39 char *str, ssize_t len);
40extern int of_device_request_module(struct device *dev);
40 41
41extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env); 42extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
42extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env); 43extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
@@ -78,6 +79,11 @@ static inline int of_device_get_modalias(struct device *dev,
78 return -ENODEV; 79 return -ENODEV;
79} 80}
80 81
82static inline int of_device_request_module(struct device *dev)
83{
84 return -ENODEV;
85}
86
81static inline int of_device_uevent_modalias(struct device *dev, 87static inline int of_device_uevent_modalias(struct device *dev,
82 struct kobj_uevent_env *env) 88 struct kobj_uevent_env *env)
83{ 89{
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 6a7fc5051099..13394ac83c66 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
31 31
32#endif /* CONFIG_OF_IOMMU */ 32#endif /* CONFIG_OF_IOMMU */
33 33
34static inline void of_iommu_set_ops(struct device_node *np,
35 const struct iommu_ops *ops)
36{
37 iommu_register_instance(&np->fwnode, ops);
38}
39
40static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
41{
42 return iommu_get_instance(&np->fwnode);
43}
44
45extern struct of_device_id __iommu_of_table; 34extern struct of_device_id __iommu_of_table;
46 35
47typedef int (*of_iommu_init_fn)(struct device_node *); 36typedef int (*of_iommu_init_fn)(struct device_node *);
diff --git a/include/linux/parman.h b/include/linux/parman.h
new file mode 100644
index 000000000000..3c8cccc7d4da
--- /dev/null
+++ b/include/linux/parman.h
@@ -0,0 +1,76 @@
1/*
2 * include/linux/parman.h - Manager for linear priority array areas
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef _PARMAN_H
36#define _PARMAN_H
37
38#include <linux/list.h>
39
40enum parman_algo_type {
41 PARMAN_ALGO_TYPE_LSORT,
42};
43
44struct parman_item {
45 struct list_head list;
46 unsigned long index;
47};
48
49struct parman_prio {
50 struct list_head list;
51 struct list_head item_list;
52 unsigned long priority;
53};
54
55struct parman_ops {
56 unsigned long base_count;
57 unsigned long resize_step;
58 int (*resize)(void *priv, unsigned long new_count);
59 void (*move)(void *priv, unsigned long from_index,
60 unsigned long to_index, unsigned long count);
61 enum parman_algo_type algo;
62};
63
64struct parman;
65
66struct parman *parman_create(const struct parman_ops *ops, void *priv);
67void parman_destroy(struct parman *parman);
68void parman_prio_init(struct parman *parman, struct parman_prio *prio,
69 unsigned long priority);
70void parman_prio_fini(struct parman_prio *prio);
71int parman_item_add(struct parman *parman, struct parman_prio *prio,
72 struct parman_item *item);
73void parman_item_remove(struct parman *parman, struct parman_prio *prio,
74 struct parman_item *item);
75
76#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e2d1a124216a..adbc859fe7c4 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -885,7 +885,6 @@ void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
885void pci_sort_breadthfirst(void); 885void pci_sort_breadthfirst(void);
886#define dev_is_pci(d) ((d)->bus == &pci_bus_type) 886#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
887#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) 887#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
888#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
889 888
890/* Generic PCI functions exported to card drivers */ 889/* Generic PCI functions exported to card drivers */
891 890
@@ -1630,7 +1629,6 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1630 1629
1631#define dev_is_pci(d) (false) 1630#define dev_is_pci(d) (false)
1632#define dev_is_pf(d) (false) 1631#define dev_is_pf(d) (false)
1633#define dev_num_vf(d) (0)
1634#endif /* CONFIG_PCI */ 1632#endif /* CONFIG_PCI */
1635 1633
1636/* Include architecture-dependent settings and functions */ 1634/* Include architecture-dependent settings and functions */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 1c7eec09e5eb..3a481a49546e 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
204static inline bool percpu_ref_tryget(struct percpu_ref *ref) 204static inline bool percpu_ref_tryget(struct percpu_ref *ref)
205{ 205{
206 unsigned long __percpu *percpu_count; 206 unsigned long __percpu *percpu_count;
207 int ret; 207 bool ret;
208 208
209 rcu_read_lock_sched(); 209 rcu_read_lock_sched();
210 210
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
238static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 238static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
239{ 239{
240 unsigned long __percpu *percpu_count; 240 unsigned long __percpu *percpu_count;
241 int ret = false; 241 bool ret = false;
242 242
243 rcu_read_lock_sched(); 243 rcu_read_lock_sched();
244 244
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 5b2e6159b744..93664f022ecf 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -4,15 +4,15 @@
4#include <linux/atomic.h> 4#include <linux/atomic.h>
5#include <linux/rwsem.h> 5#include <linux/rwsem.h>
6#include <linux/percpu.h> 6#include <linux/percpu.h>
7#include <linux/wait.h> 7#include <linux/rcuwait.h>
8#include <linux/rcu_sync.h> 8#include <linux/rcu_sync.h>
9#include <linux/lockdep.h> 9#include <linux/lockdep.h>
10 10
11struct percpu_rw_semaphore { 11struct percpu_rw_semaphore {
12 struct rcu_sync rss; 12 struct rcu_sync rss;
13 unsigned int __percpu *read_count; 13 unsigned int __percpu *read_count;
14 struct rw_semaphore rw_sem; 14 struct rw_semaphore rw_sem; /* slowpath */
15 wait_queue_head_t writer; 15 struct rcuwait writer; /* blocked writer */
16 int readers_block; 16 int readers_block;
17}; 17};
18 18
@@ -22,7 +22,7 @@ static struct percpu_rw_semaphore name = { \
22 .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ 22 .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
23 .read_count = &__percpu_rwsem_rc_##name, \ 23 .read_count = &__percpu_rwsem_rc_##name, \
24 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ 24 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
25 .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ 25 .writer = __RCUWAIT_INITIALIZER(name.writer), \
26} 26}
27 27
28extern int __percpu_down_read(struct percpu_rw_semaphore *, int); 28extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 78ed8105e64d..000fdb211c7d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -482,6 +482,7 @@ struct perf_addr_filter {
482 * @list: list of filters for this event 482 * @list: list of filters for this event
483 * @lock: spinlock that serializes accesses to the @list and event's 483 * @lock: spinlock that serializes accesses to the @list and event's
484 * (and its children's) filter generations. 484 * (and its children's) filter generations.
485 * @nr_file_filters: number of file-based filters
485 * 486 *
486 * A child event will use parent's @list (and therefore @lock), so they are 487 * A child event will use parent's @list (and therefore @lock), so they are
487 * bundled together; see perf_event_addr_filters(). 488 * bundled together; see perf_event_addr_filters().
@@ -489,6 +490,7 @@ struct perf_addr_filter {
489struct perf_addr_filters_head { 490struct perf_addr_filters_head {
490 struct list_head list; 491 struct list_head list;
491 raw_spinlock_t lock; 492 raw_spinlock_t lock;
493 unsigned int nr_file_filters;
492}; 494};
493 495
494/** 496/**
@@ -785,9 +787,9 @@ struct perf_cpu_context {
785 ktime_t hrtimer_interval; 787 ktime_t hrtimer_interval;
786 unsigned int hrtimer_active; 788 unsigned int hrtimer_active;
787 789
788 struct pmu *unique_pmu;
789#ifdef CONFIG_CGROUP_PERF 790#ifdef CONFIG_CGROUP_PERF
790 struct perf_cgroup *cgrp; 791 struct perf_cgroup *cgrp;
792 struct list_head cgrp_cpuctx_entry;
791#endif 793#endif
792 794
793 struct list_head sched_cb_entry; 795 struct list_head sched_cb_entry;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index f7d95f644eed..772476028a65 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -25,7 +25,6 @@
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/mod_devicetable.h> 27#include <linux/mod_devicetable.h>
28#include <linux/phy_led_triggers.h>
29 28
30#include <linux/atomic.h> 29#include <linux/atomic.h>
31 30
@@ -82,6 +81,9 @@ typedef enum {
82 PHY_INTERFACE_MODE_MOCA, 81 PHY_INTERFACE_MODE_MOCA,
83 PHY_INTERFACE_MODE_QSGMII, 82 PHY_INTERFACE_MODE_QSGMII,
84 PHY_INTERFACE_MODE_TRGMII, 83 PHY_INTERFACE_MODE_TRGMII,
84 PHY_INTERFACE_MODE_1000BASEX,
85 PHY_INTERFACE_MODE_2500BASEX,
86 PHY_INTERFACE_MODE_RXAUI,
85 PHY_INTERFACE_MODE_MAX, 87 PHY_INTERFACE_MODE_MAX,
86} phy_interface_t; 88} phy_interface_t;
87 89
@@ -142,6 +144,12 @@ static inline const char *phy_modes(phy_interface_t interface)
142 return "qsgmii"; 144 return "qsgmii";
143 case PHY_INTERFACE_MODE_TRGMII: 145 case PHY_INTERFACE_MODE_TRGMII:
144 return "trgmii"; 146 return "trgmii";
147 case PHY_INTERFACE_MODE_1000BASEX:
148 return "1000base-x";
149 case PHY_INTERFACE_MODE_2500BASEX:
150 return "2500base-x";
151 case PHY_INTERFACE_MODE_RXAUI:
152 return "rxaui";
145 default: 153 default:
146 return "unknown"; 154 return "unknown";
147 } 155 }
@@ -158,11 +166,7 @@ static inline const char *phy_modes(phy_interface_t interface)
158/* Used when trying to connect to a specific phy (mii bus id:phy device id) */ 166/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
159#define PHY_ID_FMT "%s:%02x" 167#define PHY_ID_FMT "%s:%02x"
160 168
161/* 169#define MII_BUS_ID_SIZE 61
162 * Need to be a little smaller than phydev->dev.bus_id to leave room
163 * for the ":%02x"
164 */
165#define MII_BUS_ID_SIZE (20 - 3)
166 170
167/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit 171/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
168 IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ 172 IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
@@ -632,7 +636,7 @@ struct phy_driver {
632/* A Structure for boards to register fixups with the PHY Lib */ 636/* A Structure for boards to register fixups with the PHY Lib */
633struct phy_fixup { 637struct phy_fixup {
634 struct list_head list; 638 struct list_head list;
635 char bus_id[20]; 639 char bus_id[MII_BUS_ID_SIZE + 3];
636 u32 phy_uid; 640 u32 phy_uid;
637 u32 phy_uid_mask; 641 u32 phy_uid_mask;
638 int (*run)(struct phy_device *phydev); 642 int (*run)(struct phy_device *phydev);
@@ -803,6 +807,9 @@ int phy_stop_interrupts(struct phy_device *phydev);
803 807
804static inline int phy_read_status(struct phy_device *phydev) 808static inline int phy_read_status(struct phy_device *phydev)
805{ 809{
810 if (!phydev->drv)
811 return -EIO;
812
806 return phydev->drv->read_status(phydev); 813 return phydev->drv->read_status(phydev);
807} 814}
808 815
@@ -882,6 +889,25 @@ void mdio_bus_exit(void);
882 889
883extern struct bus_type mdio_bus_type; 890extern struct bus_type mdio_bus_type;
884 891
892struct mdio_board_info {
893 const char *bus_id;
894 char modalias[MDIO_NAME_SIZE];
895 int mdio_addr;
896 const void *platform_data;
897};
898
899#if IS_ENABLED(CONFIG_PHYLIB)
900int mdiobus_register_board_info(const struct mdio_board_info *info,
901 unsigned int n);
902#else
903static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
904 unsigned int n)
905{
906 return 0;
907}
908#endif
909
910
885/** 911/**
886 * module_phy_driver() - Helper macro for registering PHY drivers 912 * module_phy_driver() - Helper macro for registering PHY drivers
887 * @__phy_drivers: array of PHY drivers to register 913 * @__phy_drivers: array of PHY drivers to register
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index a2daea0a37d2..b37b05bfd1a6 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -18,11 +18,11 @@ struct phy_device;
18#ifdef CONFIG_LED_TRIGGER_PHY 18#ifdef CONFIG_LED_TRIGGER_PHY
19 19
20#include <linux/leds.h> 20#include <linux/leds.h>
21#include <linux/phy.h>
21 22
22#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 23#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
23#define PHY_MII_BUS_ID_SIZE (20 - 3)
24 24
25#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \ 25#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
26 FIELD_SIZEOF(struct mdio_device, addr)+\ 26 FIELD_SIZEOF(struct mdio_device, addr)+\
27 PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) 27 PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
28 28
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index d7e5d608faa7..a0f2aba72fa9 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -29,6 +29,7 @@ extern int pinctrl_request_gpio(unsigned gpio);
29extern void pinctrl_free_gpio(unsigned gpio); 29extern void pinctrl_free_gpio(unsigned gpio);
30extern int pinctrl_gpio_direction_input(unsigned gpio); 30extern int pinctrl_gpio_direction_input(unsigned gpio);
31extern int pinctrl_gpio_direction_output(unsigned gpio); 31extern int pinctrl_gpio_direction_output(unsigned gpio);
32extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
32 33
33extern struct pinctrl * __must_check pinctrl_get(struct device *dev); 34extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
34extern void pinctrl_put(struct pinctrl *p); 35extern void pinctrl_put(struct pinctrl *p);
@@ -80,6 +81,11 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio)
80 return 0; 81 return 0;
81} 82}
82 83
84static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
85{
86 return 0;
87}
88
83static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) 89static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
84{ 90{
85 return NULL; 91 return NULL;
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 12343caa114e..7620eb127cff 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -12,12 +12,6 @@
12#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H 12#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H
13#define __LINUX_PINCTRL_PINCONF_GENERIC_H 13#define __LINUX_PINCTRL_PINCONF_GENERIC_H
14 14
15/*
16 * You shouldn't even be able to compile with these enums etc unless you're
17 * using generic pin config. That is why this is defined out.
18 */
19#ifdef CONFIG_GENERIC_PINCONF
20
21/** 15/**
22 * enum pin_config_param - possible pin configuration parameters 16 * enum pin_config_param - possible pin configuration parameters
23 * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it 17 * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
@@ -92,6 +86,8 @@
92 * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if 86 * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
93 * you need to pass in custom configurations to the pin controller, use 87 * you need to pass in custom configurations to the pin controller, use
94 * PIN_CONFIG_END+1 as the base offset. 88 * PIN_CONFIG_END+1 as the base offset.
89 * @PIN_CONFIG_MAX: this is the maximum configuration value that can be
90 * presented using the packed format.
95 */ 91 */
96enum pin_config_param { 92enum pin_config_param {
97 PIN_CONFIG_BIAS_BUS_HOLD, 93 PIN_CONFIG_BIAS_BUS_HOLD,
@@ -112,49 +108,53 @@ enum pin_config_param {
112 PIN_CONFIG_OUTPUT, 108 PIN_CONFIG_OUTPUT,
113 PIN_CONFIG_POWER_SOURCE, 109 PIN_CONFIG_POWER_SOURCE,
114 PIN_CONFIG_SLEW_RATE, 110 PIN_CONFIG_SLEW_RATE,
115 PIN_CONFIG_END = 0x7FFF, 111 PIN_CONFIG_END = 0x7F,
116}; 112 PIN_CONFIG_MAX = 0xFF,
117
118#ifdef CONFIG_DEBUG_FS
119#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
120 .has_arg = d }
121
122struct pin_config_item {
123 const enum pin_config_param param;
124 const char * const display;
125 const char * const format;
126 bool has_arg;
127}; 113};
128#endif /* CONFIG_DEBUG_FS */
129 114
130/* 115/*
131 * Helpful configuration macro to be used in tables etc. 116 * Helpful configuration macro to be used in tables etc.
132 */ 117 */
133#define PIN_CONF_PACKED(p, a) ((a << 16) | ((unsigned long) p & 0xffffUL)) 118#define PIN_CONF_PACKED(p, a) ((a << 8) | ((unsigned long) p & 0xffUL))
134 119
135/* 120/*
136 * The following inlines stuffs a configuration parameter and data value 121 * The following inlines stuffs a configuration parameter and data value
137 * into and out of an unsigned long argument, as used by the generic pin config 122 * into and out of an unsigned long argument, as used by the generic pin config
138 * system. We put the parameter in the lower 16 bits and the argument in the 123 * system. We put the parameter in the lower 8 bits and the argument in the
139 * upper 16 bits. 124 * upper 24 bits.
140 */ 125 */
141 126
142static inline enum pin_config_param pinconf_to_config_param(unsigned long config) 127static inline enum pin_config_param pinconf_to_config_param(unsigned long config)
143{ 128{
144 return (enum pin_config_param) (config & 0xffffUL); 129 return (enum pin_config_param) (config & 0xffUL);
145} 130}
146 131
147static inline u16 pinconf_to_config_argument(unsigned long config) 132static inline u32 pinconf_to_config_argument(unsigned long config)
148{ 133{
149 return (enum pin_config_param) ((config >> 16) & 0xffffUL); 134 return (u32) ((config >> 8) & 0xffffffUL);
150} 135}
151 136
152static inline unsigned long pinconf_to_config_packed(enum pin_config_param param, 137static inline unsigned long pinconf_to_config_packed(enum pin_config_param param,
153 u16 argument) 138 u32 argument)
154{ 139{
155 return PIN_CONF_PACKED(param, argument); 140 return PIN_CONF_PACKED(param, argument);
156} 141}
157 142
143#ifdef CONFIG_GENERIC_PINCONF
144
145#ifdef CONFIG_DEBUG_FS
146#define PCONFDUMP(a, b, c, d) { \
147 .param = a, .display = b, .format = c, .has_arg = d \
148 }
149
150struct pin_config_item {
151 const enum pin_config_param param;
152 const char * const display;
153 const char * const format;
154 bool has_arg;
155};
156#endif /* CONFIG_DEBUG_FS */
157
158#ifdef CONFIG_OF 158#ifdef CONFIG_OF
159 159
160#include <linux/device.h> 160#include <linux/device.h>
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index a42e57da270d..8ce2d87a238b 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -141,12 +141,27 @@ struct pinctrl_desc {
141}; 141};
142 142
143/* External interface to pin controller */ 143/* External interface to pin controller */
144
145extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
146 struct device *dev, void *driver_data,
147 struct pinctrl_dev **pctldev);
148
149/* Please use pinctrl_register_and_init() instead */
144extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, 150extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
145 struct device *dev, void *driver_data); 151 struct device *dev, void *driver_data);
152
146extern void pinctrl_unregister(struct pinctrl_dev *pctldev); 153extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
154
155extern int devm_pinctrl_register_and_init(struct device *dev,
156 struct pinctrl_desc *pctldesc,
157 void *driver_data,
158 struct pinctrl_dev **pctldev);
159
160/* Please use devm_pinctrl_register_and_init() instead */
147extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, 161extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
148 struct pinctrl_desc *pctldesc, 162 struct pinctrl_desc *pctldesc,
149 void *driver_data); 163 void *driver_data);
164
150extern void devm_pinctrl_unregister(struct device *dev, 165extern void devm_pinctrl_unregister(struct device *dev,
151 struct pinctrl_dev *pctldev); 166 struct pinctrl_dev *pctldev);
152 167
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index e69e415d0d98..896cb71a382c 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -41,6 +41,7 @@ struct dw_dma_slave {
41 * @is_private: The device channels should be marked as private and not for 41 * @is_private: The device channels should be marked as private and not for
42 * by the general purpose DMA channel allocator. 42 * by the general purpose DMA channel allocator.
43 * @is_memcpy: The device channels do support memory-to-memory transfers. 43 * @is_memcpy: The device channels do support memory-to-memory transfers.
44 * @is_idma32: The type of the DMA controller is iDMA32
44 * @chan_allocation_order: Allocate channels starting from 0 or 7 45 * @chan_allocation_order: Allocate channels starting from 0 or 7
45 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. 46 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
46 * @block_size: Maximum block size supported by the controller 47 * @block_size: Maximum block size supported by the controller
@@ -53,6 +54,7 @@ struct dw_dma_platform_data {
53 unsigned int nr_channels; 54 unsigned int nr_channels;
54 bool is_private; 55 bool is_private;
55 bool is_memcpy; 56 bool is_memcpy;
57 bool is_idma32;
56#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ 58#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
57#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ 59#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
58 unsigned char chan_allocation_order; 60 unsigned char chan_allocation_order;
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h
new file mode 100644
index 000000000000..942b0c3f8f08
--- /dev/null
+++ b/include/linux/platform_data/intel-spi.h
@@ -0,0 +1,31 @@
1/*
2 * Intel PCH/PCU SPI flash driver.
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef INTEL_SPI_PDATA_H
13#define INTEL_SPI_PDATA_H
14
15enum intel_spi_type {
16 INTEL_SPI_BYT = 1,
17 INTEL_SPI_LPT,
18 INTEL_SPI_BXT,
19};
20
21/**
22 * struct intel_spi_boardinfo - Board specific data for Intel SPI driver
23 * @type: Type which this controller is compatible with
24 * @writeable: The chip is writeable
25 */
26struct intel_spi_boardinfo {
27 enum intel_spi_type type;
28 bool writeable;
29};
30
31#endif /* INTEL_SPI_PDATA_H */
diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h
index 812d87307877..2c94ab568bfa 100644
--- a/include/linux/platform_data/media/ir-rx51.h
+++ b/include/linux/platform_data/media/ir-rx51.h
@@ -1,7 +1,7 @@
1#ifndef _LIRC_RX51_H 1#ifndef _IR_RX51_H
2#define _LIRC_RX51_H 2#define _IR_RX51_H
3 3
4struct lirc_rx51_platform_data { 4struct ir_rx51_platform_data {
5 int(*set_max_mpu_wakeup_lat)(struct device *dev, long t); 5 int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
6}; 6};
7 7
diff --git a/include/linux/platform_data/mmc-mxcmmc.h b/include/linux/platform_data/mmc-mxcmmc.h
index 29115f405af9..b0fdaa9bd185 100644
--- a/include/linux/platform_data/mmc-mxcmmc.h
+++ b/include/linux/platform_data/mmc-mxcmmc.h
@@ -1,6 +1,7 @@
1#ifndef ASMARM_ARCH_MMC_H 1#ifndef ASMARM_ARCH_MMC_H
2#define ASMARM_ARCH_MMC_H 2#define ASMARM_ARCH_MMC_H
3 3
4#include <linux/interrupt.h>
4#include <linux/mmc/host.h> 5#include <linux/mmc/host.h>
5 6
6struct device; 7struct device;
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
index 9bb63ac13f04..171a271c2cbd 100644
--- a/include/linux/platform_data/spi-ep93xx.h
+++ b/include/linux/platform_data/spi-ep93xx.h
@@ -5,25 +5,14 @@ struct spi_device;
5 5
6/** 6/**
7 * struct ep93xx_spi_info - EP93xx specific SPI descriptor 7 * struct ep93xx_spi_info - EP93xx specific SPI descriptor
8 * @num_chipselect: number of chip selects on this board, must be 8 * @chipselect: array of gpio numbers to use as chip selects
9 * at least one 9 * @num_chipselect: ARRAY_SIZE(chipselect)
10 * @use_dma: use DMA for the transfers 10 * @use_dma: use DMA for the transfers
11 */ 11 */
12struct ep93xx_spi_info { 12struct ep93xx_spi_info {
13 int *chipselect;
13 int num_chipselect; 14 int num_chipselect;
14 bool use_dma; 15 bool use_dma;
15}; 16};
16 17
17/**
18 * struct ep93xx_spi_chip_ops - operation callbacks for SPI slave device
19 * @setup: setup the chip select mechanism
20 * @cleanup: cleanup the chip select mechanism
21 * @cs_control: control the device chip select
22 */
23struct ep93xx_spi_chip_ops {
24 int (*setup)(struct spi_device *spi);
25 void (*cleanup)(struct spi_device *spi);
26 void (*cs_control)(struct spi_device *spi, int value);
27};
28
29#endif /* __ASM_MACH_EP93XX_SPI_H */ 18#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
new file mode 100644
index 000000000000..ac72e115093c
--- /dev/null
+++ b/include/linux/platform_data/ti-aemif.h
@@ -0,0 +1,23 @@
1/*
2 * TI DaVinci AEMIF platform glue.
3 *
4 * Copyright (C) 2017 BayLibre SAS
5 *
6 * Author:
7 * Bartosz Golaszewski <bgolaszewski@baylibre.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef __TI_DAVINCI_AEMIF_DATA_H__
15#define __TI_DAVINCI_AEMIF_DATA_H__
16
17#include <linux/of_platform.h>
18
19struct aemif_platform_data {
20 struct of_dev_auxdata *dev_lookup;
21};
22
23#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 81ece61075df..5339ed5bd6f9 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -182,6 +182,9 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
182{ 182{
183 return -ENOTSUPP; 183 return -ENOTSUPP;
184} 184}
185
186#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
187#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
185#endif 188#endif
186 189
187static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 190static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0edd88f93904..a6685b3dde26 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -78,6 +78,9 @@ struct dev_pm_set_opp_data {
78 78
79#if defined(CONFIG_PM_OPP) 79#if defined(CONFIG_PM_OPP)
80 80
81struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
82void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
83
81unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); 84unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
82 85
83unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); 86unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
@@ -88,7 +91,7 @@ int dev_pm_opp_get_opp_count(struct device *dev);
88unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); 91unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
89unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev); 92unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
90unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev); 93unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
91struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev); 94unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
92 95
93struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 96struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
94 unsigned long freq, 97 unsigned long freq,
@@ -99,6 +102,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
99 102
100struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 103struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
101 unsigned long *freq); 104 unsigned long *freq);
105void dev_pm_opp_put(struct dev_pm_opp *opp);
102 106
103int dev_pm_opp_add(struct device *dev, unsigned long freq, 107int dev_pm_opp_add(struct device *dev, unsigned long freq,
104 unsigned long u_volt); 108 unsigned long u_volt);
@@ -108,22 +112,30 @@ int dev_pm_opp_enable(struct device *dev, unsigned long freq);
108 112
109int dev_pm_opp_disable(struct device *dev, unsigned long freq); 113int dev_pm_opp_disable(struct device *dev, unsigned long freq);
110 114
111struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev); 115int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb);
112int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, 116int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb);
113 unsigned int count); 117
114void dev_pm_opp_put_supported_hw(struct device *dev); 118struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
115int dev_pm_opp_set_prop_name(struct device *dev, const char *name); 119void dev_pm_opp_put_supported_hw(struct opp_table *opp_table);
116void dev_pm_opp_put_prop_name(struct device *dev); 120struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name);
121void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
117struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); 122struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
118void dev_pm_opp_put_regulators(struct opp_table *opp_table); 123void dev_pm_opp_put_regulators(struct opp_table *opp_table);
119int dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); 124struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
120void dev_pm_opp_register_put_opp_helper(struct device *dev); 125void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table);
121int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); 126int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
122int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); 127int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
123int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); 128int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
124void dev_pm_opp_remove_table(struct device *dev); 129void dev_pm_opp_remove_table(struct device *dev);
125void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); 130void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
126#else 131#else
132static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
133{
134 return ERR_PTR(-ENOTSUPP);
135}
136
137static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
138
127static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 139static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
128{ 140{
129 return 0; 141 return 0;
@@ -159,9 +171,9 @@ static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device
159 return 0; 171 return 0;
160} 172}
161 173
162static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) 174static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
163{ 175{
164 return NULL; 176 return 0;
165} 177}
166 178
167static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 179static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -182,6 +194,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
182 return ERR_PTR(-ENOTSUPP); 194 return ERR_PTR(-ENOTSUPP);
183} 195}
184 196
197static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
198
185static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, 199static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
186 unsigned long u_volt) 200 unsigned long u_volt)
187{ 201{
@@ -202,35 +216,39 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
202 return 0; 216 return 0;
203} 217}
204 218
205static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( 219static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
206 struct device *dev)
207{ 220{
208 return ERR_PTR(-ENOTSUPP); 221 return -ENOTSUPP;
209} 222}
210 223
211static inline int dev_pm_opp_set_supported_hw(struct device *dev, 224static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb)
212 const u32 *versions,
213 unsigned int count)
214{ 225{
215 return -ENOTSUPP; 226 return -ENOTSUPP;
216} 227}
217 228
218static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} 229static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
230 const u32 *versions,
231 unsigned int count)
232{
233 return ERR_PTR(-ENOTSUPP);
234}
219 235
220static inline int dev_pm_opp_register_set_opp_helper(struct device *dev, 236static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
237
238static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
221 int (*set_opp)(struct dev_pm_set_opp_data *data)) 239 int (*set_opp)(struct dev_pm_set_opp_data *data))
222{ 240{
223 return -ENOTSUPP; 241 return ERR_PTR(-ENOTSUPP);
224} 242}
225 243
226static inline void dev_pm_opp_register_put_opp_helper(struct device *dev) {} 244static inline void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table) {}
227 245
228static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) 246static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
229{ 247{
230 return -ENOTSUPP; 248 return ERR_PTR(-ENOTSUPP);
231} 249}
232 250
233static inline void dev_pm_opp_put_prop_name(struct device *dev) {} 251static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {}
234 252
235static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) 253static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
236{ 254{
@@ -270,6 +288,7 @@ void dev_pm_opp_of_remove_table(struct device *dev);
270int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); 288int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
271void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); 289void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
272int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); 290int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
291struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
273#else 292#else
274static inline int dev_pm_opp_of_add_table(struct device *dev) 293static inline int dev_pm_opp_of_add_table(struct device *dev)
275{ 294{
@@ -293,6 +312,11 @@ static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct
293{ 312{
294 return -ENOTSUPP; 313 return -ENOTSUPP;
295} 314}
315
316static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
317{
318 return NULL;
319}
296#endif 320#endif
297 321
298#endif /* __LINUX_OPP_H__ */ 322#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 0f65d36c2a75..d4d34791e463 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -6,7 +6,6 @@
6 */ 6 */
7#include <linux/plist.h> 7#include <linux/plist.h>
8#include <linux/notifier.h> 8#include <linux/notifier.h>
9#include <linux/miscdevice.h>
10#include <linux/device.h> 9#include <linux/device.h>
11#include <linux/workqueue.h> 10#include <linux/workqueue.h>
12 11
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 51334edec506..a39540326417 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -80,6 +80,7 @@
80/********** kernel/mutexes **********/ 80/********** kernel/mutexes **********/
81#define MUTEX_DEBUG_INIT 0x11 81#define MUTEX_DEBUG_INIT 0x11
82#define MUTEX_DEBUG_FREE 0x22 82#define MUTEX_DEBUG_FREE 0x22
83#define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA)
83 84
84/********** lib/flex_array.c **********/ 85/********** lib/flex_array.c **********/
85#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ 86#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 62d44c176071..64aa189efe21 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -8,19 +8,9 @@
8#include <linux/alarmtimer.h> 8#include <linux/alarmtimer.h>
9 9
10 10
11static inline unsigned long long cputime_to_expires(cputime_t expires)
12{
13 return (__force unsigned long long)expires;
14}
15
16static inline cputime_t expires_to_cputime(unsigned long long expires)
17{
18 return (__force cputime_t)expires;
19}
20
21struct cpu_timer_list { 11struct cpu_timer_list {
22 struct list_head entry; 12 struct list_head entry;
23 unsigned long long expires, incr; 13 u64 expires, incr;
24 struct task_struct *task; 14 struct task_struct *task;
25 int firing; 15 int firing;
26}; 16};
@@ -129,7 +119,7 @@ void run_posix_cpu_timers(struct task_struct *task);
129void posix_cpu_timers_exit(struct task_struct *task); 119void posix_cpu_timers_exit(struct task_struct *task);
130void posix_cpu_timers_exit_group(struct task_struct *task); 120void posix_cpu_timers_exit_group(struct task_struct *task);
131void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, 121void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
132 cputime_t *newval, cputime_t *oldval); 122 u64 *newval, u64 *oldval);
133 123
134long clock_nanosleep_restart(struct restart_block *restart_block); 124long clock_nanosleep_restart(struct restart_block *restart_block);
135 125
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index bed9557b69e7..b312bcef53da 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -4,8 +4,16 @@
4enum bq27xxx_chip { 4enum bq27xxx_chip {
5 BQ27000 = 1, /* bq27000, bq27200 */ 5 BQ27000 = 1, /* bq27000, bq27200 */
6 BQ27010, /* bq27010, bq27210 */ 6 BQ27010, /* bq27010, bq27210 */
7 BQ27500, /* bq27500 */ 7 BQ2750X, /* bq27500 deprecated alias */
8 BQ27510, /* bq27510, bq27520 */ 8 BQ2751X, /* bq27510, bq27520 deprecated alias */
9 BQ27500, /* bq27500/1 */
10 BQ27510G1, /* bq27510G1 */
11 BQ27510G2, /* bq27510G2 */
12 BQ27510G3, /* bq27510G3 */
13 BQ27520G1, /* bq27520G1 */
14 BQ27520G2, /* bq27520G2 */
15 BQ27520G3, /* bq27520G3 */
16 BQ27520G4, /* bq27520G4 */
9 BQ27530, /* bq27530, bq27531 */ 17 BQ27530, /* bq27530, bq27531 */
10 BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ 18 BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
11 BQ27545, /* bq27545 */ 19 BQ27545, /* bq27545 */
diff --git a/include/linux/property.h b/include/linux/property.h
index 856e50b2140c..64e3a9c6d95f 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -160,12 +160,12 @@ struct property_entry {
160 bool is_string; 160 bool is_string;
161 union { 161 union {
162 union { 162 union {
163 void *raw_data; 163 const void *raw_data;
164 u8 *u8_data; 164 const u8 *u8_data;
165 u16 *u16_data; 165 const u16 *u16_data;
166 u32 *u32_data; 166 const u32 *u32_data;
167 u64 *u64_data; 167 const u64 *u64_data;
168 const char **str; 168 const char * const *str;
169 } pointer; 169 } pointer;
170 union { 170 union {
171 unsigned long long raw_data; 171 unsigned long long raw_data;
@@ -241,8 +241,13 @@ struct property_entry {
241 .name = _name_, \ 241 .name = _name_, \
242} 242}
243 243
244struct property_entry *
245property_entries_dup(const struct property_entry *properties);
246
247void property_entries_free(const struct property_entry *properties);
248
244int device_add_properties(struct device *dev, 249int device_add_properties(struct device *dev,
245 struct property_entry *properties); 250 const struct property_entry *properties);
246void device_remove_properties(struct device *dev); 251void device_remove_properties(struct device *dev);
247 252
248bool device_dma_supported(struct device *dev); 253bool device_dma_supported(struct device *dev);
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 2052011bf9fb..6c70444da3b9 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -111,6 +111,11 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
111 return 0; 111 return 0;
112} 112}
113 113
114/*
115 * Note: resize (below) nests producer lock within consumer lock, so if you
116 * consume in interrupt or BH context, you must disable interrupts/BH when
117 * calling this.
118 */
114static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) 119static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
115{ 120{
116 int ret; 121 int ret;
@@ -242,6 +247,11 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
242 return ptr; 247 return ptr;
243} 248}
244 249
250/*
251 * Note: resize (below) nests producer lock within consumer lock, so if you
252 * call this in interrupt or BH context, you must disable interrupts/BH when
253 * producing.
254 */
245static inline void *ptr_ring_consume(struct ptr_ring *r) 255static inline void *ptr_ring_consume(struct ptr_ring *r)
246{ 256{
247 void *ptr; 257 void *ptr;
@@ -357,7 +367,7 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
357 void **old; 367 void **old;
358 void *ptr; 368 void *ptr;
359 369
360 while ((ptr = ptr_ring_consume(r))) 370 while ((ptr = __ptr_ring_consume(r)))
361 if (producer < size) 371 if (producer < size)
362 queue[producer++] = ptr; 372 queue[producer++] = ptr;
363 else if (destroy) 373 else if (destroy)
@@ -372,6 +382,12 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
372 return old; 382 return old;
373} 383}
374 384
385/*
386 * Note: producer lock is nested within consumer lock, so if you
387 * resize you must make sure all uses nest correctly.
388 * In particular if you consume ring in interrupt or BH context, you must
389 * disable interrupts/BH when doing so.
390 */
375static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, 391static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
376 void (*destroy)(void *)) 392 void (*destroy)(void *))
377{ 393{
@@ -382,17 +398,25 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
382 if (!queue) 398 if (!queue)
383 return -ENOMEM; 399 return -ENOMEM;
384 400
385 spin_lock_irqsave(&(r)->producer_lock, flags); 401 spin_lock_irqsave(&(r)->consumer_lock, flags);
402 spin_lock(&(r)->producer_lock);
386 403
387 old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); 404 old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
388 405
389 spin_unlock_irqrestore(&(r)->producer_lock, flags); 406 spin_unlock(&(r)->producer_lock);
407 spin_unlock_irqrestore(&(r)->consumer_lock, flags);
390 408
391 kfree(old); 409 kfree(old);
392 410
393 return 0; 411 return 0;
394} 412}
395 413
414/*
415 * Note: producer lock is nested within consumer lock, so if you
416 * resize you must make sure all uses nest correctly.
417 * In particular if you consume ring in interrupt or BH context, you must
418 * disable interrupts/BH when doing so.
419 */
396static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, 420static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
397 int size, 421 int size,
398 gfp_t gfp, void (*destroy)(void *)) 422 gfp_t gfp, void (*destroy)(void *))
@@ -412,10 +436,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
412 } 436 }
413 437
414 for (i = 0; i < nrings; ++i) { 438 for (i = 0; i < nrings; ++i) {
415 spin_lock_irqsave(&(rings[i])->producer_lock, flags); 439 spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
440 spin_lock(&(rings[i])->producer_lock);
416 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], 441 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
417 size, gfp, destroy); 442 size, gfp, destroy);
418 spin_unlock_irqrestore(&(rings[i])->producer_lock, flags); 443 spin_unlock(&(rings[i])->producer_lock);
444 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
419 } 445 }
420 446
421 for (i = 0; i < nrings; ++i) 447 for (i = 0; i < nrings; ++i)
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 2d6f0c39ed68..a0522328d7aa 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -90,9 +90,9 @@
90#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */ 90#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */
91 91
92#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ 92#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
93#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ 93#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
94#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ 94#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
95#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ 95#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
96 96
97#define RX_THRESH_CE4100_DFLT 2 97#define RX_THRESH_CE4100_DFLT 2
98#define TX_THRESH_CE4100_DFLT 2 98#define TX_THRESH_CE4100_DFLT 2
@@ -106,9 +106,9 @@
106#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ 106#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
107 107
108/* QUARK_X1000 SSCR0 bit definition */ 108/* QUARK_X1000 SSCR0 bit definition */
109#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */ 109#define QUARK_X1000_SSCR0_DSS (0x1F << 0) /* Data Size Select (mask) */
110#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ 110#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
111#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ 111#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
112#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ 112#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
113 113
114#define RX_THRESH_QUARK_X1000_DFLT 1 114#define RX_THRESH_QUARK_X1000_DFLT 1
@@ -121,8 +121,8 @@
121#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ 121#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
122#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ 122#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
123#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ 123#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
124#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ 124#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
125#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ 125#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
126 126
127/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ 127/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
128#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ 128#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 734deb094618..52966b9bfde3 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,10 +1,35 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2016 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
32
8#ifndef _COMMON_HSI_H 33#ifndef _COMMON_HSI_H
9#define _COMMON_HSI_H 34#define _COMMON_HSI_H
10#include <linux/types.h> 35#include <linux/types.h>
@@ -37,6 +62,7 @@
37#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 62#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
38 63
39#define ISCSI_CDU_TASK_SEG_TYPE 0 64#define ISCSI_CDU_TASK_SEG_TYPE 0
65#define FCOE_CDU_TASK_SEG_TYPE 0
40#define RDMA_CDU_TASK_SEG_TYPE 1 66#define RDMA_CDU_TASK_SEG_TYPE 1
41 67
42#define FW_ASSERT_GENERAL_ATTN_IDX 32 68#define FW_ASSERT_GENERAL_ATTN_IDX 32
@@ -180,6 +206,9 @@
180#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 206#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
181#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 207#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
182#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 208#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
209#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
210#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
211#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
183#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 212#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
184#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 213#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
185#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 214#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
@@ -236,6 +265,7 @@
236#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) 265#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
237#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) 266#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
238#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) 267#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
268#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
239#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) 269#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
240#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) 270#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
241#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) 271#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
@@ -266,6 +296,9 @@
266#define DQ_TCM_AGG_FLG_SHIFT_CF6 6 296#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
267#define DQ_TCM_AGG_FLG_SHIFT_CF7 7 297#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
268/* TCM agg counter flag selection (FW) */ 298/* TCM agg counter flag selection (FW) */
299#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
300#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
301#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
269#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) 302#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
270#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) 303#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
271 304
@@ -703,7 +736,7 @@ enum mf_mode {
703/* Per-protocol connection types */ 736/* Per-protocol connection types */
704enum protocol_type { 737enum protocol_type {
705 PROTOCOLID_ISCSI, 738 PROTOCOLID_ISCSI,
706 PROTOCOLID_RESERVED2, 739 PROTOCOLID_FCOE,
707 PROTOCOLID_ROCE, 740 PROTOCOLID_ROCE,
708 PROTOCOLID_CORE, 741 PROTOCOLID_CORE,
709 PROTOCOLID_ETH, 742 PROTOCOLID_ETH,
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 1aa0727c4136..4b402fb0eaad 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef __ETH_COMMON__ 33#ifndef __ETH_COMMON__
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
new file mode 100644
index 000000000000..2e417a45c5f7
--- /dev/null
+++ b/include/linux/qed/fcoe_common.h
@@ -0,0 +1,715 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef __FCOE_COMMON__
10#define __FCOE_COMMON__
11/*********************/
12/* FCOE FW CONSTANTS */
13/*********************/
14
15#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
16#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600)
17
18struct fcoe_abts_pkt {
19 __le32 abts_rsp_fc_payload_lo;
20 __le16 abts_rsp_rx_id;
21 u8 abts_rsp_rctl;
22 u8 reserved2;
23};
24
25/* FCoE additional WQE (Sq/XferQ) information */
26union fcoe_additional_info_union {
27 __le32 previous_tid;
28 __le32 parent_tid;
29 __le32 burst_length;
30 __le32 seq_rec_updated_offset;
31};
32
33struct fcoe_exp_ro {
34 __le32 data_offset;
35 __le32 reserved;
36};
37
38union fcoe_cleanup_addr_exp_ro_union {
39 struct regpair abts_rsp_fc_payload_hi;
40 struct fcoe_exp_ro exp_ro;
41};
42
43/* FCoE Ramrod Command IDs */
44enum fcoe_completion_status {
45 FCOE_COMPLETION_STATUS_SUCCESS,
46 FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
47 FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
48 MAX_FCOE_COMPLETION_STATUS
49};
50
51struct fc_addr_nw {
52 u8 addr_lo;
53 u8 addr_mid;
54 u8 addr_hi;
55};
56
57/* FCoE connection offload */
58struct fcoe_conn_offload_ramrod_data {
59 struct regpair sq_pbl_addr;
60 struct regpair sq_curr_page_addr;
61 struct regpair sq_next_page_addr;
62 struct regpair xferq_pbl_addr;
63 struct regpair xferq_curr_page_addr;
64 struct regpair xferq_next_page_addr;
65 struct regpair respq_pbl_addr;
66 struct regpair respq_curr_page_addr;
67 struct regpair respq_next_page_addr;
68 __le16 dst_mac_addr_lo;
69 __le16 dst_mac_addr_mid;
70 __le16 dst_mac_addr_hi;
71 __le16 src_mac_addr_lo;
72 __le16 src_mac_addr_mid;
73 __le16 src_mac_addr_hi;
74 __le16 tx_max_fc_pay_len;
75 __le16 e_d_tov_timer_val;
76 __le16 rx_max_fc_pay_len;
77 __le16 vlan_tag;
78#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
79#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
80#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
81#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
82#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
83#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
84 __le16 physical_q0;
85 __le16 rec_rr_tov_timer_val;
86 struct fc_addr_nw s_id;
87 u8 max_conc_seqs_c3;
88 struct fc_addr_nw d_id;
89 u8 flags;
90#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
91#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
92#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
93#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
94#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
95#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
96#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
97#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
98#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
99#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
100#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
101#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
102 __le16 conn_id;
103 u8 def_q_idx;
104 u8 reserved[5];
105};
106
107/* FCoE terminate connection request */
108struct fcoe_conn_terminate_ramrod_data {
109 struct regpair terminate_params_addr;
110};
111
112struct fcoe_fast_sgl_ctx {
113 struct regpair sgl_start_addr;
114 __le32 sgl_byte_offset;
115 __le16 task_reuse_cnt;
116 __le16 init_offset_in_first_sge;
117};
118
119struct fcoe_slow_sgl_ctx {
120 struct regpair base_sgl_addr;
121 __le16 curr_sge_off;
122 __le16 remainder_num_sges;
123 __le16 curr_sgl_index;
124 __le16 reserved;
125};
126
127struct fcoe_sge {
128 struct regpair sge_addr;
129 __le16 size;
130 __le16 reserved0;
131 u8 reserved1[3];
132 u8 is_valid_sge;
133};
134
135union fcoe_data_desc_ctx {
136 struct fcoe_fast_sgl_ctx fast;
137 struct fcoe_slow_sgl_ctx slow;
138 struct fcoe_sge single_sge;
139};
140
141union fcoe_dix_desc_ctx {
142 struct fcoe_slow_sgl_ctx dix_sgl;
143 struct fcoe_sge cached_dix_sge;
144};
145
146struct fcoe_fcp_cmd_payload {
147 __le32 opaque[8];
148};
149
150struct fcoe_fcp_rsp_payload {
151 __le32 opaque[6];
152};
153
154struct fcoe_fcp_xfer_payload {
155 __le32 opaque[3];
156};
157
158/* FCoE firmware function init */
159struct fcoe_init_func_ramrod_data {
160 struct scsi_init_func_params func_params;
161 struct scsi_init_func_queues q_params;
162 __le16 mtu;
163 __le16 sq_num_pages_in_pbl;
164 __le32 reserved;
165};
166
167/* FCoE: Mode of the connection: Target or Initiator or both */
168enum fcoe_mode_type {
169 FCOE_INITIATOR_MODE = 0x0,
170 FCOE_TARGET_MODE = 0x1,
171 FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
172 MAX_FCOE_MODE_TYPE
173};
174
175struct fcoe_mstorm_fcoe_task_st_ctx_fp {
176 __le16 flags;
177#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF
178#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0
179#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1
180#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
181 __le16 difDataResidue;
182 __le16 parent_id;
183 __le16 single_sge_saved_offset;
184 __le32 data_2_trns_rem;
185 __le32 offset_in_io;
186 union fcoe_dix_desc_ctx dix_desc;
187 union fcoe_data_desc_ctx data_desc;
188};
189
190struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
191 __le16 flags;
192#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3
193#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0
194#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1
195#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2
196#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1
197#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3
198#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF
199#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4
200#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3
201#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8
202#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1
203#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10
204#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1
205#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
206#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1
207#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12
208#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1
209#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13
210#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1
211#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14
212#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1
213#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15
214 u8 tx_rx_sgl_mode;
215#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7
216#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0
217#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7
218#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3
219#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3
220#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6
221 u8 rsrv2;
222 __le32 num_prm_zero_read;
223 struct regpair rsp_buf_addr;
224};
225
226struct fcoe_rx_stat {
227 struct regpair fcoe_rx_byte_cnt;
228 struct regpair fcoe_rx_data_pkt_cnt;
229 struct regpair fcoe_rx_xfer_pkt_cnt;
230 struct regpair fcoe_rx_other_pkt_cnt;
231 __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
232 __le32 fcoe_silent_drop_pkt_rq_full_cnt;
233 __le32 fcoe_silent_drop_pkt_crc_error_cnt;
234 __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
235 __le32 fcoe_silent_drop_total_pkt_cnt;
236 __le32 rsrv;
237};
238
239enum fcoe_sgl_mode {
240 FCOE_SLOW_SGL,
241 FCOE_SINGLE_FAST_SGE,
242 FCOE_2_FAST_SGE,
243 FCOE_3_FAST_SGE,
244 FCOE_4_FAST_SGE,
245 FCOE_MUL_FAST_SGES,
246 MAX_FCOE_SGL_MODE
247};
248
249struct fcoe_stat_ramrod_data {
250 struct regpair stat_params_addr;
251};
252
253struct protection_info_ctx {
254 __le16 flags;
255#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
256#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
257#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
258#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
259#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
260#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
261#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
262#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
263#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
264#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
265#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
266#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
267 u8 dix_block_size;
268 u8 dst_size;
269};
270
271union protection_info_union_ctx {
272 struct protection_info_ctx info;
273 __le32 value;
274};
275
276struct fcp_rsp_payload_padded {
277 struct fcoe_fcp_rsp_payload rsp_payload;
278 __le32 reserved[2];
279};
280
281struct fcp_xfer_payload_padded {
282 struct fcoe_fcp_xfer_payload xfer_payload;
283 __le32 reserved[5];
284};
285
286struct fcoe_tx_data_params {
287 __le32 data_offset;
288 __le32 offset_in_io;
289 u8 flags;
290#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
291#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
292#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
293#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
294#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
295#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
296#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
297#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
298 u8 dif_residual;
299 __le16 seq_cnt;
300 __le16 single_sge_saved_offset;
301 __le16 next_dif_offset;
302 __le16 seq_id;
303 __le16 reserved3;
304};
305
306struct fcoe_tx_mid_path_params {
307 __le32 parameter;
308 u8 r_ctl;
309 u8 type;
310 u8 cs_ctl;
311 u8 df_ctl;
312 __le16 rx_id;
313 __le16 ox_id;
314};
315
316struct fcoe_tx_params {
317 struct fcoe_tx_data_params data;
318 struct fcoe_tx_mid_path_params mid_path;
319};
320
321union fcoe_tx_info_union_ctx {
322 struct fcoe_fcp_cmd_payload fcp_cmd_payload;
323 struct fcp_rsp_payload_padded fcp_rsp_payload;
324 struct fcp_xfer_payload_padded fcp_xfer_payload;
325 struct fcoe_tx_params tx_params;
326};
327
328struct ystorm_fcoe_task_st_ctx {
329 u8 task_type;
330 u8 sgl_mode;
331#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7
332#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
333#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F
334#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3
335 u8 cached_dix_sge;
336 u8 expect_first_xfer;
337 __le32 num_pbf_zero_write;
338 union protection_info_union_ctx protection_info_union;
339 __le32 data_2_trns_rem;
340 union fcoe_tx_info_union_ctx tx_info_union;
341 union fcoe_dix_desc_ctx dix_desc;
342 union fcoe_data_desc_ctx data_desc;
343 __le16 ox_id;
344 __le16 rx_id;
345 __le32 task_rety_identifier;
346 __le32 reserved1[2];
347};
348
349struct ystorm_fcoe_task_ag_ctx {
350 u8 byte0;
351 u8 byte1;
352 __le16 word0;
353 u8 flags0;
354#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
355#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
356#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
357#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
358#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
359#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
360#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
361#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
362#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
363#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
364 u8 flags1;
365#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
366#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
367#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
368#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
369#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
370#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
371#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
372#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
373#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
374#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
375 u8 flags2;
376#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
377#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
378#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
379#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
380#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
381#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
382#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
383#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
384#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
385#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
386#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
387#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
388#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
389#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
390#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
391#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
392 u8 byte2;
393 __le32 reg0;
394 u8 byte3;
395 u8 byte4;
396 __le16 rx_id;
397 __le16 word2;
398 __le16 word3;
399 __le16 word4;
400 __le16 word5;
401 __le32 reg1;
402 __le32 reg2;
403};
404
405struct tstorm_fcoe_task_ag_ctx {
406 u8 reserved;
407 u8 byte1;
408 __le16 icid;
409 u8 flags0;
410#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
411#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
412#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
413#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
414#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
415#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
416#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
417#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
418#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
419#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
420 u8 flags1;
421#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
422#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
423#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
424#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
425#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
426#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
427#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
428#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
429#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
430#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
431 u8 flags2;
432#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
433#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
434#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
435#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
436#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
437#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
438#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
439#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
440 u8 flags3;
441#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
442#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
443#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
444#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
445#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
446#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
447#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
448#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
449#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
450#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
451#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
452#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
453#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
454#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
455 u8 flags4;
456#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
457#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
458#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
459#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
460#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
461#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
462#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
463#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
464#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
465#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
466#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
467#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
468#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
469#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
470#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
471#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
472 u8 cleanup_state;
473 __le16 last_sent_tid;
474 __le32 rec_rr_tov_exp_timeout;
475 u8 byte3;
476 u8 byte4;
477 __le16 word2;
478 __le16 word3;
479 __le16 word4;
480 __le32 data_offset_end_of_seq;
481 __le32 data_offset_next;
482};
483
484struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
485 union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
486 __le16 flags;
487#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7
488#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
489#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
490#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3
491#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
492#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4
493#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
494#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5
495#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
496#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6
497#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
498#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7
499#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
500#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8
501#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F
502#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10
503 __le16 seq_cnt;
504 u8 seq_id;
505 u8 ooo_rx_seq_id;
506 __le16 rx_id;
507 struct fcoe_abts_pkt abts_data;
508 __le32 e_d_tov_exp_timeout_val;
509 __le16 ooo_rx_seq_cnt;
510 __le16 reserved1;
511};
512
513struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
514 u8 task_type;
515 u8 dev_type;
516 u8 conf_supported;
517 u8 glbl_q_num;
518 __le32 cid;
519 __le32 fcp_cmd_trns_size;
520 __le32 rsrv;
521};
522
523struct tstorm_fcoe_task_st_ctx {
524 struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
525 struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
526};
527
528struct mstorm_fcoe_task_ag_ctx {
529 u8 byte0;
530 u8 byte1;
531 __le16 icid;
532 u8 flags0;
533#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
534#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
535#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
536#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
537#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
538#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
539#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
540#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
541#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
542#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
543 u8 flags1;
544#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
545#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
546#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
547#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
548#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
549#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
550#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
551#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
552#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
553#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
554 u8 flags2;
555#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
556#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
557#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
558#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
559#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
560#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
561#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
562#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
563#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
564#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
565#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
566#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
567#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
568#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
569#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
570#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
571 u8 cleanup_state;
572 __le32 received_bytes;
573 u8 byte3;
574 u8 glbl_q_num;
575 __le16 word1;
576 __le16 tid_to_xfer;
577 __le16 word3;
578 __le16 word4;
579 __le16 word5;
580 __le32 expected_bytes;
581 __le32 reg2;
582};
583
584struct mstorm_fcoe_task_st_ctx {
585 struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp;
586 struct fcoe_mstorm_fcoe_task_st_ctx_fp fp;
587};
588
589struct ustorm_fcoe_task_ag_ctx {
590 u8 reserved;
591 u8 byte1;
592 __le16 icid;
593 u8 flags0;
594#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
595#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
596#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
597#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
598#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
599#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
600#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
601#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
602 u8 flags1;
603#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
604#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
605#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
606#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
607#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
608#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
609#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
610#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
611 u8 flags2;
612#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
613#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
614#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
615#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
616#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
617#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
618#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
619#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
620#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
621#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
622#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
623#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
624#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
625#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
626#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
627#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
628 u8 flags3;
629#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
630#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
631#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
632#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
633#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
634#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
635#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
636#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
637#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
638#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
639 __le32 dif_err_intervals;
640 __le32 dif_error_1st_interval;
641 __le32 global_cq_num;
642 __le32 reg3;
643 __le32 reg4;
644 __le32 reg5;
645};
646
647struct fcoe_task_context {
648 struct ystorm_fcoe_task_st_ctx ystorm_st_context;
649 struct tdif_task_context tdif_context;
650 struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
651 struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
652 struct timers_context timer_context;
653 struct tstorm_fcoe_task_st_ctx tstorm_st_context;
654 struct regpair tstorm_st_padding[2];
655 struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
656 struct mstorm_fcoe_task_st_ctx mstorm_st_context;
657 struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
658 struct rdif_task_context rdif_context;
659};
660
661struct fcoe_tx_stat {
662 struct regpair fcoe_tx_byte_cnt;
663 struct regpair fcoe_tx_data_pkt_cnt;
664 struct regpair fcoe_tx_xfer_pkt_cnt;
665 struct regpair fcoe_tx_other_pkt_cnt;
666};
667
668struct fcoe_wqe {
669 __le16 task_id;
670 __le16 flags;
671#define FCOE_WQE_REQ_TYPE_MASK 0xF
672#define FCOE_WQE_REQ_TYPE_SHIFT 0
673#define FCOE_WQE_SGL_MODE_MASK 0x7
674#define FCOE_WQE_SGL_MODE_SHIFT 4
675#define FCOE_WQE_CONTINUATION_MASK 0x1
676#define FCOE_WQE_CONTINUATION_SHIFT 7
677#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1
678#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8
679#define FCOE_WQE_SUPER_IO_MASK 0x1
680#define FCOE_WQE_SUPER_IO_SHIFT 9
681#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
682#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10
683#define FCOE_WQE_RESERVED0_MASK 0x1F
684#define FCOE_WQE_RESERVED0_SHIFT 11
685 union fcoe_additional_info_union additional_info_union;
686};
687
688struct xfrqe_prot_flags {
689 u8 flags;
690#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
691#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
692#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
693#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
694#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
695#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
696#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
697#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
698};
699
700struct fcoe_db_data {
701 u8 params;
702#define FCOE_DB_DATA_DEST_MASK 0x3
703#define FCOE_DB_DATA_DEST_SHIFT 0
704#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
705#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
706#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
707#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
708#define FCOE_DB_DATA_RESERVED_MASK 0x1
709#define FCOE_DB_DATA_RESERVED_SHIFT 5
710#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
711#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
712 u8 agg_flags;
713 __le16 sq_prod;
714};
715#endif /* __FCOE_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 8f64b1223c2f..4c5747babcf6 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef __ISCSI_COMMON__ 33#ifndef __ISCSI_COMMON__
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 37dfba101c6c..5cd7a4608c9b 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef _QED_CHAIN_H 33#ifndef _QED_CHAIN_H
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 7a52f7c58c37..4cd1f0ccfa36 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef _QED_ETH_IF_H 33#ifndef _QED_ETH_IF_H
@@ -53,7 +77,7 @@ struct qed_dev_eth_info {
53}; 77};
54 78
55struct qed_update_vport_rss_params { 79struct qed_update_vport_rss_params {
56 u16 rss_ind_table[128]; 80 void *rss_ind_table[128];
57 u32 rss_key[10]; 81 u32 rss_key[10];
58 u8 rss_caps; 82 u8 rss_caps;
59}; 83};
@@ -72,6 +96,7 @@ struct qed_update_vport_params {
72 96
73struct qed_start_vport_params { 97struct qed_start_vport_params {
74 bool remove_inner_vlan; 98 bool remove_inner_vlan;
99 bool handle_ptp_pkts;
75 bool gro_enable; 100 bool gro_enable;
76 bool drop_ttl0; 101 bool drop_ttl0;
77 u8 vport_id; 102 u8 vport_id;
@@ -135,6 +160,15 @@ struct qed_eth_cb_ops {
135 void (*force_mac) (void *dev, u8 *mac, bool forced); 160 void (*force_mac) (void *dev, u8 *mac, bool forced);
136}; 161};
137 162
163#define QED_MAX_PHC_DRIFT_PPB 291666666
164
165enum qed_ptp_filter_type {
166 QED_PTP_FILTER_L2,
167 QED_PTP_FILTER_IPV4,
168 QED_PTP_FILTER_IPV4_IPV6,
169 QED_PTP_FILTER_L2_IPV4_IPV6
170};
171
138#ifdef CONFIG_DCB 172#ifdef CONFIG_DCB
139/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration 173/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration
140 * of dcbnl_rtnl_ops structure. 174 * of dcbnl_rtnl_ops structure.
@@ -194,6 +228,17 @@ struct qed_eth_dcbnl_ops {
194}; 228};
195#endif 229#endif
196 230
231struct qed_eth_ptp_ops {
232 int (*hwtstamp_tx_on)(struct qed_dev *);
233 int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type);
234 int (*read_rx_ts)(struct qed_dev *, u64 *);
235 int (*read_tx_ts)(struct qed_dev *, u64 *);
236 int (*read_cc)(struct qed_dev *, u64 *);
237 int (*disable)(struct qed_dev *);
238 int (*adjfreq)(struct qed_dev *, s32);
239 int (*enable)(struct qed_dev *);
240};
241
197struct qed_eth_ops { 242struct qed_eth_ops {
198 const struct qed_common_ops *common; 243 const struct qed_common_ops *common;
199#ifdef CONFIG_QED_SRIOV 244#ifdef CONFIG_QED_SRIOV
@@ -202,6 +247,7 @@ struct qed_eth_ops {
202#ifdef CONFIG_DCB 247#ifdef CONFIG_DCB
203 const struct qed_eth_dcbnl_ops *dcb; 248 const struct qed_eth_dcbnl_ops *dcb;
204#endif 249#endif
250 const struct qed_eth_ptp_ops *ptp;
205 251
206 int (*fill_dev_info)(struct qed_dev *cdev, 252 int (*fill_dev_info)(struct qed_dev *cdev,
207 struct qed_dev_eth_info *info); 253 struct qed_dev_eth_info *info);
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
new file mode 100644
index 000000000000..bd6bcb809415
--- /dev/null
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -0,0 +1,145 @@
1#ifndef _QED_FCOE_IF_H
2#define _QED_FCOE_IF_H
3#include <linux/types.h>
4#include <linux/qed/qed_if.h>
5struct qed_fcoe_stats {
6 u64 fcoe_rx_byte_cnt;
7 u64 fcoe_rx_data_pkt_cnt;
8 u64 fcoe_rx_xfer_pkt_cnt;
9 u64 fcoe_rx_other_pkt_cnt;
10 u32 fcoe_silent_drop_pkt_cmdq_full_cnt;
11 u32 fcoe_silent_drop_pkt_rq_full_cnt;
12 u32 fcoe_silent_drop_pkt_crc_error_cnt;
13 u32 fcoe_silent_drop_pkt_task_invalid_cnt;
14 u32 fcoe_silent_drop_total_pkt_cnt;
15
16 u64 fcoe_tx_byte_cnt;
17 u64 fcoe_tx_data_pkt_cnt;
18 u64 fcoe_tx_xfer_pkt_cnt;
19 u64 fcoe_tx_other_pkt_cnt;
20};
21
22struct qed_dev_fcoe_info {
23 struct qed_dev_info common;
24
25 void __iomem *primary_dbq_rq_addr;
26 void __iomem *secondary_bdq_rq_addr;
27};
28
29struct qed_fcoe_params_offload {
30 dma_addr_t sq_pbl_addr;
31 dma_addr_t sq_curr_page_addr;
32 dma_addr_t sq_next_page_addr;
33
34 u8 src_mac[ETH_ALEN];
35 u8 dst_mac[ETH_ALEN];
36
37 u16 tx_max_fc_pay_len;
38 u16 e_d_tov_timer_val;
39 u16 rec_tov_timer_val;
40 u16 rx_max_fc_pay_len;
41 u16 vlan_tag;
42
43 struct fc_addr_nw s_id;
44 u8 max_conc_seqs_c3;
45 struct fc_addr_nw d_id;
46 u8 flags;
47 u8 def_q_idx;
48};
49
50#define MAX_TID_BLOCKS_FCOE (512)
51struct qed_fcoe_tid {
52 u32 size; /* In bytes per task */
53 u32 num_tids_per_block;
54 u8 *blocks[MAX_TID_BLOCKS_FCOE];
55};
56
57struct qed_fcoe_cb_ops {
58 struct qed_common_cb_ops common;
59 u32 (*get_login_failures)(void *cookie);
60};
61
62void qed_fcoe_set_pf_params(struct qed_dev *cdev,
63 struct qed_fcoe_pf_params *params);
64
65/**
66 * struct qed_fcoe_ops - qed FCoE operations.
67 * @common: common operations pointer
68 * @fill_dev_info: fills FCoE specific information
69 * @param cdev
70 * @param info
71 * @return 0 on sucesss, otherwise error value.
72 * @register_ops: register FCoE operations
73 * @param cdev
74 * @param ops - specified using qed_iscsi_cb_ops
75 * @param cookie - driver private
76 * @ll2: light L2 operations pointer
77 * @start: fcoe in FW
78 * @param cdev
79 * @param tasks - qed will fill information about tasks
80 * return 0 on success, otherwise error value.
81 * @stop: stops fcoe in FW
82 * @param cdev
83 * return 0 on success, otherwise error value.
84 * @acquire_conn: acquire a new fcoe connection
85 * @param cdev
86 * @param handle - qed will fill handle that should be
87 * used henceforth as identifier of the
88 * connection.
89 * @param p_doorbell - qed will fill the address of the
90 * doorbell.
91 * return 0 on sucesss, otherwise error value.
92 * @release_conn: release a previously acquired fcoe connection
93 * @param cdev
94 * @param handle - the connection handle.
95 * return 0 on success, otherwise error value.
96 * @offload_conn: configures an offloaded connection
97 * @param cdev
98 * @param handle - the connection handle.
99 * @param conn_info - the configuration to use for the
100 * offload.
101 * return 0 on success, otherwise error value.
102 * @destroy_conn: stops an offloaded connection
103 * @param cdev
104 * @param handle - the connection handle.
105 * @param terminate_params
106 * return 0 on success, otherwise error value.
107 * @get_stats: gets FCoE related statistics
108 * @param cdev
109 * @param stats - pointer to struck that would be filled
110 * we stats
111 * return 0 on success, error otherwise.
112 */
113struct qed_fcoe_ops {
114 const struct qed_common_ops *common;
115
116 int (*fill_dev_info)(struct qed_dev *cdev,
117 struct qed_dev_fcoe_info *info);
118
119 void (*register_ops)(struct qed_dev *cdev,
120 struct qed_fcoe_cb_ops *ops, void *cookie);
121
122 const struct qed_ll2_ops *ll2;
123
124 int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks);
125
126 int (*stop)(struct qed_dev *cdev);
127
128 int (*acquire_conn)(struct qed_dev *cdev,
129 u32 *handle,
130 u32 *fw_cid, void __iomem **p_doorbell);
131
132 int (*release_conn)(struct qed_dev *cdev, u32 handle);
133
134 int (*offload_conn)(struct qed_dev *cdev,
135 u32 handle,
136 struct qed_fcoe_params_offload *conn_info);
137 int (*destroy_conn)(struct qed_dev *cdev,
138 u32 handle, dma_addr_t terminate_params);
139
140 int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats);
141};
142
143const struct qed_fcoe_ops *qed_get_fcoe_ops(void);
144void qed_put_fcoe_ops(void);
145#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 4b454f4f5b25..fde56c436f71 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -1,10 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * Copyright (c) 2015 QLogic Corporation 3 *
4 * 4 * This software is available to you under a choice of one of two
5 * This software is available under the terms of the GNU General Public License 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * General Public License (GPL) Version 2, available from the file
7 * this source tree. 7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
8 */ 31 */
9 32
10#ifndef _QED_IF_H 33#ifndef _QED_IF_H
@@ -36,7 +59,6 @@ enum dcbx_protocol_type {
36 59
37#define QED_ROCE_PROTOCOL_INDEX (3) 60#define QED_ROCE_PROTOCOL_INDEX (3)
38 61
39#ifdef CONFIG_DCB
40#define QED_LLDP_CHASSIS_ID_STAT_LEN 4 62#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
41#define QED_LLDP_PORT_ID_STAT_LEN 4 63#define QED_LLDP_PORT_ID_STAT_LEN 4
42#define QED_DCBX_MAX_APP_PROTOCOL 32 64#define QED_DCBX_MAX_APP_PROTOCOL 32
@@ -132,7 +154,6 @@ struct qed_dcbx_get {
132 struct qed_dcbx_remote_params remote; 154 struct qed_dcbx_remote_params remote;
133 struct qed_dcbx_admin_params local; 155 struct qed_dcbx_admin_params local;
134}; 156};
135#endif
136 157
137enum qed_led_mode { 158enum qed_led_mode {
138 QED_LED_MODE_OFF, 159 QED_LED_MODE_OFF,
@@ -159,6 +180,38 @@ struct qed_eth_pf_params {
159 u16 num_cons; 180 u16 num_cons;
160}; 181};
161 182
183struct qed_fcoe_pf_params {
184 /* The following parameters are used during protocol-init */
185 u64 glbl_q_params_addr;
186 u64 bdq_pbl_base_addr[2];
187
188 /* The following parameters are used during HW-init
189 * and these parameters need to be passed as arguments
190 * to update_pf_params routine invoked before slowpath start
191 */
192 u16 num_cons;
193 u16 num_tasks;
194
195 /* The following parameters are used during protocol-init */
196 u16 sq_num_pbl_pages;
197
198 u16 cq_num_entries;
199 u16 cmdq_num_entries;
200 u16 rq_buffer_log_size;
201 u16 mtu;
202 u16 dummy_icid;
203 u16 bdq_xoff_threshold[2];
204 u16 bdq_xon_threshold[2];
205 u16 rq_buffer_size;
206 u8 num_cqs; /* num of global CQs */
207 u8 log_page_size;
208 u8 gl_rq_pi;
209 u8 gl_cmd_pi;
210 u8 debug_mode;
211 u8 is_target;
212 u8 bdq_pbl_num_entries[2];
213};
214
162/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ 215/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
163struct qed_iscsi_pf_params { 216struct qed_iscsi_pf_params {
164 u64 glbl_q_params_addr; 217 u64 glbl_q_params_addr;
@@ -222,6 +275,7 @@ struct qed_rdma_pf_params {
222 275
223struct qed_pf_params { 276struct qed_pf_params {
224 struct qed_eth_pf_params eth_pf_params; 277 struct qed_eth_pf_params eth_pf_params;
278 struct qed_fcoe_pf_params fcoe_pf_params;
225 struct qed_iscsi_pf_params iscsi_pf_params; 279 struct qed_iscsi_pf_params iscsi_pf_params;
226 struct qed_rdma_pf_params rdma_pf_params; 280 struct qed_rdma_pf_params rdma_pf_params;
227}; 281};
@@ -282,6 +336,7 @@ enum qed_sb_type {
282enum qed_protocol { 336enum qed_protocol {
283 QED_PROTOCOL_ETH, 337 QED_PROTOCOL_ETH,
284 QED_PROTOCOL_ISCSI, 338 QED_PROTOCOL_ISCSI,
339 QED_PROTOCOL_FCOE,
285}; 340};
286 341
287enum qed_link_mode_bits { 342enum qed_link_mode_bits {
@@ -368,6 +423,7 @@ struct qed_int_info {
368struct qed_common_cb_ops { 423struct qed_common_cb_ops {
369 void (*link_update)(void *dev, 424 void (*link_update)(void *dev,
370 struct qed_link_output *link); 425 struct qed_link_output *link);
426 void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
371}; 427};
372 428
373struct qed_selftest_ops { 429struct qed_selftest_ops {
@@ -471,6 +527,10 @@ struct qed_common_ops {
471 527
472 void (*simd_handler_clean)(struct qed_dev *cdev, 528 void (*simd_handler_clean)(struct qed_dev *cdev,
473 int index); 529 int index);
530 int (*dbg_grc)(struct qed_dev *cdev,
531 void *buffer, u32 *num_dumped_bytes);
532
533 int (*dbg_grc_size)(struct qed_dev *cdev);
474 534
475 int (*dbg_all_data) (struct qed_dev *cdev, void *buffer); 535 int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
476 536
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
index 5a4f8d0899e9..ac2e6a3199a3 100644
--- a/include/linux/qed/qed_iov_if.h
+++ b/include/linux/qed/qed_iov_if.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef _QED_IOV_IF_H 33#ifndef _QED_IOV_IF_H
@@ -29,6 +53,8 @@ struct qed_iov_hv_ops {
29 53
30 int (*set_rate) (struct qed_dev *cdev, int vfid, 54 int (*set_rate) (struct qed_dev *cdev, int vfid,
31 u32 min_rate, u32 max_rate); 55 u32 min_rate, u32 max_rate);
56
57 int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust);
32}; 58};
33 59
34#endif 60#endif
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index d27912480cb3..f70bb81b8b6a 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef _QED_ISCSI_IF_H 33#ifndef _QED_ISCSI_IF_H
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index fd75c265dba3..4fb4666ea879 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -1,10 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
2 * 3 *
3 * Copyright (c) 2015 QLogic Corporation 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4 * 9 *
5 * This software is available under the terms of the GNU General Public License 10 * Redistribution and use in source and binary forms, with or
6 * (GPL) Version 2, available from the file COPYING in the main directory of 11 * without modification, are permitted provided that the following
7 * this source tree. 12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
8 */ 31 */
9 32
10#ifndef _QED_LL2_IF_H 33#ifndef _QED_LL2_IF_H
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
index 53047d3fa678..f742d4312c9d 100644
--- a/include/linux/qed/qed_roce_if.h
+++ b/include/linux/qed/qed_roce_if.h
@@ -1,5 +1,5 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index f48d64b0e2fb..3b8dd551a98c 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -1,5 +1,5 @@
1/* QLogic qedr NIC Driver 1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 7663725faa94..f773aa5e746f 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef __RDMA_COMMON__ 33#ifndef __RDMA_COMMON__
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index 2eeaf3dc6646..bad02df213df 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef __ROCE_COMMON__ 33#ifndef __ROCE_COMMON__
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 3b8e1efd9bc2..03f3e37ab059 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef __STORAGE_COMMON__ 33#ifndef __STORAGE_COMMON__
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index dc3889d1bbe6..46fe7856f1b2 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -1,9 +1,33 @@
1/* QLogic qed NIC Driver 1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation 2 * Copyright (c) 2015-2017 QLogic Corporation
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available to you under a choice of one of two
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * licenses. You may choose to be licensed under the terms of the GNU
6 * this source tree. 6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
7 */ 31 */
8 32
9#ifndef __TCP_COMMON__ 33#ifndef __TCP_COMMON__
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 321f9ed552a9..6ade6a52d9d4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
444#error "Unknown RCU implementation specified to kernel configuration" 444#error "Unknown RCU implementation specified to kernel configuration"
445#endif 445#endif
446 446
447#define RCU_SCHEDULER_INACTIVE 0
448#define RCU_SCHEDULER_INIT 1
449#define RCU_SCHEDULER_RUNNING 2
450
447/* 451/*
448 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic 452 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
449 * initialization and destruction of rcu_head on the stack. rcu_head structures 453 * initialization and destruction of rcu_head on the stack. rcu_head structures
@@ -1157,5 +1161,17 @@ do { \
1157 ftrace_dump(oops_dump_mode); \ 1161 ftrace_dump(oops_dump_mode); \
1158} while (0) 1162} while (0)
1159 1163
1164/*
1165 * Place this after a lock-acquisition primitive to guarantee that
1166 * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies
1167 * if the UNLOCK and LOCK are executed by the same CPU or if the
1168 * UNLOCK and LOCK operate on the same lock variable.
1169 */
1170#ifdef CONFIG_PPC
1171#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
1172#else /* #ifdef CONFIG_PPC */
1173#define smp_mb__after_unlock_lock() do { } while (0)
1174#endif /* #else #ifdef CONFIG_PPC */
1175
1160 1176
1161#endif /* __LINUX_RCUPDATE_H */ 1177#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index ac81e4063b40..4f9b2fa2173d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,6 +27,12 @@
27 27
28#include <linux/cache.h> 28#include <linux/cache.h>
29 29
30struct rcu_dynticks;
31static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
32{
33 return 0;
34}
35
30static inline unsigned long get_state_synchronize_rcu(void) 36static inline unsigned long get_state_synchronize_rcu(void)
31{ 37{
32 return 0; 38 return 0;
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
new file mode 100644
index 000000000000..a4ede51b3e7c
--- /dev/null
+++ b/include/linux/rcuwait.h
@@ -0,0 +1,63 @@
1#ifndef _LINUX_RCUWAIT_H_
2#define _LINUX_RCUWAIT_H_
3
4#include <linux/rcupdate.h>
5
6/*
7 * rcuwait provides a way of blocking and waking up a single
8 * task in an rcu-safe manner; where it is forbidden to use
9 * after exit_notify(). task_struct is not properly rcu protected,
10 * unless dealing with rcu-aware lists, ie: find_task_by_*().
11 *
12 * Alternatively we have task_rcu_dereference(), but the return
13 * semantics have different implications which would break the
14 * wakeup side. The only time @task is non-nil is when a user is
15 * blocked (or checking if it needs to) on a condition, and reset
16 * as soon as we know that the condition has succeeded and are
17 * awoken.
18 */
19struct rcuwait {
20 struct task_struct *task;
21};
22
23#define __RCUWAIT_INITIALIZER(name) \
24 { .task = NULL, }
25
26static inline void rcuwait_init(struct rcuwait *w)
27{
28 w->task = NULL;
29}
30
31extern void rcuwait_wake_up(struct rcuwait *w);
32
33/*
34 * The caller is responsible for locking around rcuwait_wait_event(),
35 * such that writes to @task are properly serialized.
36 */
37#define rcuwait_wait_event(w, condition) \
38({ \
39 /* \
40 * Complain if we are called after do_exit()/exit_notify(), \
41 * as we cannot rely on the rcu critical region for the \
42 * wakeup side. \
43 */ \
44 WARN_ON(current->exit_state); \
45 \
46 rcu_assign_pointer((w)->task, current); \
47 for (;;) { \
48 /* \
49 * Implicit barrier (A) pairs with (B) in \
50 * rcuwait_wake_up(). \
51 */ \
52 set_current_state(TASK_UNINTERRUPTIBLE); \
53 if (condition) \
54 break; \
55 \
56 schedule(); \
57 } \
58 \
59 WRITE_ONCE((w)->task, NULL); \
60 __set_current_state(TASK_RUNNING); \
61})
62
63#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
new file mode 100644
index 000000000000..600aadf9cca4
--- /dev/null
+++ b/include/linux/refcount.h
@@ -0,0 +1,294 @@
1#ifndef _LINUX_REFCOUNT_H
2#define _LINUX_REFCOUNT_H
3
4/*
5 * Variant of atomic_t specialized for reference counts.
6 *
7 * The interface matches the atomic_t interface (to aid in porting) but only
8 * provides the few functions one should use for reference counting.
9 *
10 * It differs in that the counter saturates at UINT_MAX and will not move once
11 * there. This avoids wrapping the counter and causing 'spurious'
12 * use-after-free issues.
13 *
14 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
15 * and provide only what is strictly required for refcounts.
16 *
17 * The increments are fully relaxed; these will not provide ordering. The
18 * rationale is that whatever is used to obtain the object we're increasing the
19 * reference count on will provide the ordering. For locked data structures,
20 * its the lock acquire, for RCU/lockless data structures its the dependent
21 * load.
22 *
23 * Do note that inc_not_zero() provides a control dependency which will order
24 * future stores against the inc, this ensures we'll never modify the object
25 * if we did not in fact acquire a reference.
26 *
27 * The decrements will provide release order, such that all the prior loads and
28 * stores will be issued before, it also provides a control dependency, which
29 * will order us against the subsequent free().
30 *
31 * The control dependency is against the load of the cmpxchg (ll/sc) that
32 * succeeded. This means the stores aren't fully ordered, but this is fine
33 * because the 1->0 transition indicates no concurrency.
34 *
35 * Note that the allocator is responsible for ordering things between free()
36 * and alloc().
37 *
38 */
39
40#include <linux/atomic.h>
41#include <linux/bug.h>
42#include <linux/mutex.h>
43#include <linux/spinlock.h>
44
45#ifdef CONFIG_DEBUG_REFCOUNT
46#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
47#define __refcount_check __must_check
48#else
49#define REFCOUNT_WARN(cond, str) (void)(cond)
50#define __refcount_check
51#endif
52
53typedef struct refcount_struct {
54 atomic_t refs;
55} refcount_t;
56
57#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
58
59static inline void refcount_set(refcount_t *r, unsigned int n)
60{
61 atomic_set(&r->refs, n);
62}
63
64static inline unsigned int refcount_read(const refcount_t *r)
65{
66 return atomic_read(&r->refs);
67}
68
69static inline __refcount_check
70bool refcount_add_not_zero(unsigned int i, refcount_t *r)
71{
72 unsigned int old, new, val = atomic_read(&r->refs);
73
74 for (;;) {
75 if (!val)
76 return false;
77
78 if (unlikely(val == UINT_MAX))
79 return true;
80
81 new = val + i;
82 if (new < val)
83 new = UINT_MAX;
84 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
85 if (old == val)
86 break;
87
88 val = old;
89 }
90
91 REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
92
93 return true;
94}
95
96static inline void refcount_add(unsigned int i, refcount_t *r)
97{
98 REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
99}
100
101/*
102 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
103 *
104 * Provides no memory ordering, it is assumed the caller has guaranteed the
105 * object memory to be stable (RCU, etc.). It does provide a control dependency
106 * and thereby orders future stores. See the comment on top.
107 */
108static inline __refcount_check
109bool refcount_inc_not_zero(refcount_t *r)
110{
111 unsigned int old, new, val = atomic_read(&r->refs);
112
113 for (;;) {
114 new = val + 1;
115
116 if (!val)
117 return false;
118
119 if (unlikely(!new))
120 return true;
121
122 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
123 if (old == val)
124 break;
125
126 val = old;
127 }
128
129 REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
130
131 return true;
132}
133
134/*
135 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
136 *
137 * Provides no memory ordering, it is assumed the caller already has a
138 * reference on the object, will WARN when this is not so.
139 */
140static inline void refcount_inc(refcount_t *r)
141{
142 REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
143}
144
145/*
146 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
147 * decrement when saturated at UINT_MAX.
148 *
149 * Provides release memory ordering, such that prior loads and stores are done
150 * before, and provides a control dependency such that free() must come after.
151 * See the comment on top.
152 */
153static inline __refcount_check
154bool refcount_sub_and_test(unsigned int i, refcount_t *r)
155{
156 unsigned int old, new, val = atomic_read(&r->refs);
157
158 for (;;) {
159 if (unlikely(val == UINT_MAX))
160 return false;
161
162 new = val - i;
163 if (new > val) {
164 REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
165 return false;
166 }
167
168 old = atomic_cmpxchg_release(&r->refs, val, new);
169 if (old == val)
170 break;
171
172 val = old;
173 }
174
175 return !new;
176}
177
178static inline __refcount_check
179bool refcount_dec_and_test(refcount_t *r)
180{
181 return refcount_sub_and_test(1, r);
182}
183
184/*
185 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
186 * when saturated at UINT_MAX.
187 *
188 * Provides release memory ordering, such that prior loads and stores are done
189 * before.
190 */
191static inline
192void refcount_dec(refcount_t *r)
193{
194 REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
195}
196
197/*
198 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
199 * success thereof.
200 *
201 * Like all decrement operations, it provides release memory order and provides
202 * a control dependency.
203 *
204 * It can be used like a try-delete operator; this explicit case is provided
205 * and not cmpxchg in generic, because that would allow implementing unsafe
206 * operations.
207 */
208static inline __refcount_check
209bool refcount_dec_if_one(refcount_t *r)
210{
211 return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
212}
213
214/*
215 * No atomic_t counterpart, it decrements unless the value is 1, in which case
216 * it will return false.
217 *
218 * Was often done like: atomic_add_unless(&var, -1, 1)
219 */
220static inline __refcount_check
221bool refcount_dec_not_one(refcount_t *r)
222{
223 unsigned int old, new, val = atomic_read(&r->refs);
224
225 for (;;) {
226 if (unlikely(val == UINT_MAX))
227 return true;
228
229 if (val == 1)
230 return false;
231
232 new = val - 1;
233 if (new > val) {
234 REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
235 return true;
236 }
237
238 old = atomic_cmpxchg_release(&r->refs, val, new);
239 if (old == val)
240 break;
241
242 val = old;
243 }
244
245 return true;
246}
247
248/*
249 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
250 * to decrement when saturated at UINT_MAX.
251 *
252 * Provides release memory ordering, such that prior loads and stores are done
253 * before, and provides a control dependency such that free() must come after.
254 * See the comment on top.
255 */
256static inline __refcount_check
257bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
258{
259 if (refcount_dec_not_one(r))
260 return false;
261
262 mutex_lock(lock);
263 if (!refcount_dec_and_test(r)) {
264 mutex_unlock(lock);
265 return false;
266 }
267
268 return true;
269}
270
271/*
272 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
273 * decrement when saturated at UINT_MAX.
274 *
275 * Provides release memory ordering, such that prior loads and stores are done
276 * before, and provides a control dependency such that free() must come after.
277 * See the comment on top.
278 */
279static inline __refcount_check
280bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
281{
282 if (refcount_dec_not_one(r))
283 return false;
284
285 spin_lock(lock);
286 if (!refcount_dec_and_test(r)) {
287 spin_unlock(lock);
288 return false;
289 }
290
291 return true;
292}
293
294#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index f6673132431d..e88649225a60 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -40,12 +40,13 @@ enum regcache_type {
40}; 40};
41 41
42/** 42/**
43 * Default value for a register. We use an array of structs rather 43 * struct reg_default - Default value for a register.
44 * than a simple array as many modern devices have very sparse
45 * register maps.
46 * 44 *
47 * @reg: Register address. 45 * @reg: Register address.
48 * @def: Register default value. 46 * @def: Register default value.
47 *
48 * We use an array of structs rather than a simple array as many modern devices
49 * have very sparse register maps.
49 */ 50 */
50struct reg_default { 51struct reg_default {
51 unsigned int reg; 52 unsigned int reg;
@@ -53,12 +54,14 @@ struct reg_default {
53}; 54};
54 55
55/** 56/**
56 * Register/value pairs for sequences of writes with an optional delay in 57 * struct reg_sequence - An individual write from a sequence of writes.
57 * microseconds to be applied after each write.
58 * 58 *
59 * @reg: Register address. 59 * @reg: Register address.
60 * @def: Register value. 60 * @def: Register value.
61 * @delay_us: Delay to be applied after the register write in microseconds 61 * @delay_us: Delay to be applied after the register write in microseconds
62 *
63 * Register/value pairs for sequences of writes with an optional delay in
64 * microseconds to be applied after each write.
62 */ 65 */
63struct reg_sequence { 66struct reg_sequence {
64 unsigned int reg; 67 unsigned int reg;
@@ -98,6 +101,7 @@ struct reg_sequence {
98 101
99/** 102/**
100 * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs 103 * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
104 *
101 * @map: Regmap to read from 105 * @map: Regmap to read from
102 * @addr: Address to poll 106 * @addr: Address to poll
103 * @val: Unsigned integer variable to read the value into 107 * @val: Unsigned integer variable to read the value into
@@ -146,8 +150,8 @@ enum regmap_endian {
146}; 150};
147 151
148/** 152/**
149 * A register range, used for access related checks 153 * struct regmap_range - A register range, used for access related checks
150 * (readable/writeable/volatile/precious checks) 154 * (readable/writeable/volatile/precious checks)
151 * 155 *
152 * @range_min: address of first register 156 * @range_min: address of first register
153 * @range_max: address of last register 157 * @range_max: address of last register
@@ -159,16 +163,18 @@ struct regmap_range {
159 163
160#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, } 164#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
161 165
162/* 166/**
163 * A table of ranges including some yes ranges and some no ranges. 167 * struct regmap_access_table - A table of register ranges for access checks
164 * If a register belongs to a no_range, the corresponding check function
165 * will return false. If a register belongs to a yes range, the corresponding
166 * check function will return true. "no_ranges" are searched first.
167 * 168 *
168 * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges" 169 * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges"
169 * @n_yes_ranges: size of the above array 170 * @n_yes_ranges: size of the above array
170 * @no_ranges: pointer to an array of regmap ranges used as "no ranges" 171 * @no_ranges: pointer to an array of regmap ranges used as "no ranges"
171 * @n_no_ranges: size of the above array 172 * @n_no_ranges: size of the above array
173 *
174 * A table of ranges including some yes ranges and some no ranges.
175 * If a register belongs to a no_range, the corresponding check function
176 * will return false. If a register belongs to a yes range, the corresponding
177 * check function will return true. "no_ranges" are searched first.
172 */ 178 */
173struct regmap_access_table { 179struct regmap_access_table {
174 const struct regmap_range *yes_ranges; 180 const struct regmap_range *yes_ranges;
@@ -181,7 +187,7 @@ typedef void (*regmap_lock)(void *);
181typedef void (*regmap_unlock)(void *); 187typedef void (*regmap_unlock)(void *);
182 188
183/** 189/**
184 * Configuration for the register map of a device. 190 * struct regmap_config - Configuration for the register map of a device.
185 * 191 *
186 * @name: Optional name of the regmap. Useful when a device has multiple 192 * @name: Optional name of the regmap. Useful when a device has multiple
187 * register regions. 193 * register regions.
@@ -314,22 +320,24 @@ struct regmap_config {
314}; 320};
315 321
316/** 322/**
317 * Configuration for indirectly accessed or paged registers. 323 * struct regmap_range_cfg - Configuration for indirectly accessed or paged
318 * Registers, mapped to this virtual range, are accessed in two steps: 324 * registers.
319 * 1. page selector register update;
320 * 2. access through data window registers.
321 * 325 *
322 * @name: Descriptive name for diagnostics 326 * @name: Descriptive name for diagnostics
323 * 327 *
324 * @range_min: Address of the lowest register address in virtual range. 328 * @range_min: Address of the lowest register address in virtual range.
325 * @range_max: Address of the highest register in virtual range. 329 * @range_max: Address of the highest register in virtual range.
326 * 330 *
327 * @page_sel_reg: Register with selector field. 331 * @selector_reg: Register with selector field.
328 * @page_sel_mask: Bit shift for selector value. 332 * @selector_mask: Bit shift for selector value.
329 * @page_sel_shift: Bit mask for selector value. 333 * @selector_shift: Bit mask for selector value.
330 * 334 *
331 * @window_start: Address of first (lowest) register in data window. 335 * @window_start: Address of first (lowest) register in data window.
332 * @window_len: Number of registers in data window. 336 * @window_len: Number of registers in data window.
337 *
338 * Registers, mapped to this virtual range, are accessed in two steps:
339 * 1. page selector register update;
340 * 2. access through data window registers.
333 */ 341 */
334struct regmap_range_cfg { 342struct regmap_range_cfg {
335 const char *name; 343 const char *name;
@@ -372,7 +380,8 @@ typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
372typedef void (*regmap_hw_free_context)(void *context); 380typedef void (*regmap_hw_free_context)(void *context);
373 381
374/** 382/**
375 * Description of a hardware bus for the register map infrastructure. 383 * struct regmap_bus - Description of a hardware bus for the register map
384 * infrastructure.
376 * 385 *
377 * @fast_io: Register IO is fast. Use a spinlock instead of a mutex 386 * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
378 * to perform locking. This field is ignored if custom lock/unlock 387 * to perform locking. This field is ignored if custom lock/unlock
@@ -385,6 +394,10 @@ typedef void (*regmap_hw_free_context)(void *context);
385 * must serialise with respect to non-async I/O. 394 * must serialise with respect to non-async I/O.
386 * @reg_write: Write a single register value to the given register address. This 395 * @reg_write: Write a single register value to the given register address. This
387 * write operation has to complete when returning from the function. 396 * write operation has to complete when returning from the function.
397 * @reg_update_bits: Update bits operation to be used against volatile
398 * registers, intended for devices supporting some mechanism
399 * for setting clearing bits without having to
400 * read/modify/write.
388 * @read: Read operation. Data is returned in the buffer used to transmit 401 * @read: Read operation. Data is returned in the buffer used to transmit
389 * data. 402 * data.
390 * @reg_read: Read a single register value from a given register address. 403 * @reg_read: Read a single register value from a given register address.
@@ -514,7 +527,7 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
514#endif 527#endif
515 528
516/** 529/**
517 * regmap_init(): Initialise register map 530 * regmap_init() - Initialise register map
518 * 531 *
519 * @dev: Device that will be interacted with 532 * @dev: Device that will be interacted with
520 * @bus: Bus-specific callbacks to use with device 533 * @bus: Bus-specific callbacks to use with device
@@ -532,7 +545,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
532 const struct regmap_config *config); 545 const struct regmap_config *config);
533 546
534/** 547/**
535 * regmap_init_i2c(): Initialise register map 548 * regmap_init_i2c() - Initialise register map
536 * 549 *
537 * @i2c: Device that will be interacted with 550 * @i2c: Device that will be interacted with
538 * @config: Configuration for register map 551 * @config: Configuration for register map
@@ -545,9 +558,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
545 i2c, config) 558 i2c, config)
546 559
547/** 560/**
548 * regmap_init_spi(): Initialise register map 561 * regmap_init_spi() - Initialise register map
549 * 562 *
550 * @spi: Device that will be interacted with 563 * @dev: Device that will be interacted with
551 * @config: Configuration for register map 564 * @config: Configuration for register map
552 * 565 *
553 * The return value will be an ERR_PTR() on error or a valid pointer to 566 * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -558,8 +571,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
558 dev, config) 571 dev, config)
559 572
560/** 573/**
561 * regmap_init_spmi_base(): Create regmap for the Base register space 574 * regmap_init_spmi_base() - Create regmap for the Base register space
562 * @sdev: SPMI device that will be interacted with 575 *
576 * @dev: SPMI device that will be interacted with
563 * @config: Configuration for register map 577 * @config: Configuration for register map
564 * 578 *
565 * The return value will be an ERR_PTR() on error or a valid pointer to 579 * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -570,8 +584,9 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
570 dev, config) 584 dev, config)
571 585
572/** 586/**
573 * regmap_init_spmi_ext(): Create regmap for Ext register space 587 * regmap_init_spmi_ext() - Create regmap for Ext register space
574 * @sdev: Device that will be interacted with 588 *
589 * @dev: Device that will be interacted with
575 * @config: Configuration for register map 590 * @config: Configuration for register map
576 * 591 *
577 * The return value will be an ERR_PTR() on error or a valid pointer to 592 * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -582,7 +597,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
582 dev, config) 597 dev, config)
583 598
584/** 599/**
585 * regmap_init_mmio_clk(): Initialise register map with register clock 600 * regmap_init_mmio_clk() - Initialise register map with register clock
586 * 601 *
587 * @dev: Device that will be interacted with 602 * @dev: Device that will be interacted with
588 * @clk_id: register clock consumer ID 603 * @clk_id: register clock consumer ID
@@ -597,7 +612,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
597 dev, clk_id, regs, config) 612 dev, clk_id, regs, config)
598 613
599/** 614/**
600 * regmap_init_mmio(): Initialise register map 615 * regmap_init_mmio() - Initialise register map
601 * 616 *
602 * @dev: Device that will be interacted with 617 * @dev: Device that will be interacted with
603 * @regs: Pointer to memory-mapped IO region 618 * @regs: Pointer to memory-mapped IO region
@@ -610,7 +625,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
610 regmap_init_mmio_clk(dev, NULL, regs, config) 625 regmap_init_mmio_clk(dev, NULL, regs, config)
611 626
612/** 627/**
613 * regmap_init_ac97(): Initialise AC'97 register map 628 * regmap_init_ac97() - Initialise AC'97 register map
614 * 629 *
615 * @ac97: Device that will be interacted with 630 * @ac97: Device that will be interacted with
616 * @config: Configuration for register map 631 * @config: Configuration for register map
@@ -624,7 +639,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
624bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); 639bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
625 640
626/** 641/**
627 * devm_regmap_init(): Initialise managed register map 642 * devm_regmap_init() - Initialise managed register map
628 * 643 *
629 * @dev: Device that will be interacted with 644 * @dev: Device that will be interacted with
630 * @bus: Bus-specific callbacks to use with device 645 * @bus: Bus-specific callbacks to use with device
@@ -641,7 +656,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
641 dev, bus, bus_context, config) 656 dev, bus, bus_context, config)
642 657
643/** 658/**
644 * devm_regmap_init_i2c(): Initialise managed register map 659 * devm_regmap_init_i2c() - Initialise managed register map
645 * 660 *
646 * @i2c: Device that will be interacted with 661 * @i2c: Device that will be interacted with
647 * @config: Configuration for register map 662 * @config: Configuration for register map
@@ -655,9 +670,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
655 i2c, config) 670 i2c, config)
656 671
657/** 672/**
658 * devm_regmap_init_spi(): Initialise register map 673 * devm_regmap_init_spi() - Initialise register map
659 * 674 *
660 * @spi: Device that will be interacted with 675 * @dev: Device that will be interacted with
661 * @config: Configuration for register map 676 * @config: Configuration for register map
662 * 677 *
663 * The return value will be an ERR_PTR() on error or a valid pointer 678 * The return value will be an ERR_PTR() on error or a valid pointer
@@ -669,8 +684,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
669 dev, config) 684 dev, config)
670 685
671/** 686/**
672 * devm_regmap_init_spmi_base(): Create managed regmap for Base register space 687 * devm_regmap_init_spmi_base() - Create managed regmap for Base register space
673 * @sdev: SPMI device that will be interacted with 688 *
689 * @dev: SPMI device that will be interacted with
674 * @config: Configuration for register map 690 * @config: Configuration for register map
675 * 691 *
676 * The return value will be an ERR_PTR() on error or a valid pointer 692 * The return value will be an ERR_PTR() on error or a valid pointer
@@ -682,8 +698,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
682 dev, config) 698 dev, config)
683 699
684/** 700/**
685 * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space 701 * devm_regmap_init_spmi_ext() - Create managed regmap for Ext register space
686 * @sdev: SPMI device that will be interacted with 702 *
703 * @dev: SPMI device that will be interacted with
687 * @config: Configuration for register map 704 * @config: Configuration for register map
688 * 705 *
689 * The return value will be an ERR_PTR() on error or a valid pointer 706 * The return value will be an ERR_PTR() on error or a valid pointer
@@ -695,7 +712,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
695 dev, config) 712 dev, config)
696 713
697/** 714/**
698 * devm_regmap_init_mmio_clk(): Initialise managed register map with clock 715 * devm_regmap_init_mmio_clk() - Initialise managed register map with clock
699 * 716 *
700 * @dev: Device that will be interacted with 717 * @dev: Device that will be interacted with
701 * @clk_id: register clock consumer ID 718 * @clk_id: register clock consumer ID
@@ -711,7 +728,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
711 dev, clk_id, regs, config) 728 dev, clk_id, regs, config)
712 729
713/** 730/**
714 * devm_regmap_init_mmio(): Initialise managed register map 731 * devm_regmap_init_mmio() - Initialise managed register map
715 * 732 *
716 * @dev: Device that will be interacted with 733 * @dev: Device that will be interacted with
717 * @regs: Pointer to memory-mapped IO region 734 * @regs: Pointer to memory-mapped IO region
@@ -725,7 +742,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
725 devm_regmap_init_mmio_clk(dev, NULL, regs, config) 742 devm_regmap_init_mmio_clk(dev, NULL, regs, config)
726 743
727/** 744/**
728 * devm_regmap_init_ac97(): Initialise AC'97 register map 745 * devm_regmap_init_ac97() - Initialise AC'97 register map
729 * 746 *
730 * @ac97: Device that will be interacted with 747 * @ac97: Device that will be interacted with
731 * @config: Configuration for register map 748 * @config: Configuration for register map
@@ -800,7 +817,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
800 unsigned int nranges); 817 unsigned int nranges);
801 818
802/** 819/**
803 * Description of an register field 820 * struct reg_field - Description of an register field
804 * 821 *
805 * @reg: Offset of the register within the regmap bank 822 * @reg: Offset of the register within the regmap bank
806 * @lsb: lsb of the register field. 823 * @lsb: lsb of the register field.
@@ -841,7 +858,7 @@ int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
841 bool *change, bool async, bool force); 858 bool *change, bool async, bool force);
842 859
843/** 860/**
844 * Description of an IRQ for the generic regmap irq_chip. 861 * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
845 * 862 *
846 * @reg_offset: Offset of the status/mask register within the bank 863 * @reg_offset: Offset of the status/mask register within the bank
847 * @mask: Mask used to flag/control the register. 864 * @mask: Mask used to flag/control the register.
@@ -861,9 +878,7 @@ struct regmap_irq {
861 [_irq] = { .reg_offset = (_off), .mask = (_mask) } 878 [_irq] = { .reg_offset = (_off), .mask = (_mask) }
862 879
863/** 880/**
864 * Description of a generic regmap irq_chip. This is not intended to 881 * struct regmap_irq_chip - Description of a generic regmap irq_chip.
865 * handle every possible interrupt controller, but it should handle a
866 * substantial proportion of those that are found in the wild.
867 * 882 *
868 * @name: Descriptive name for IRQ controller. 883 * @name: Descriptive name for IRQ controller.
869 * 884 *
@@ -897,6 +912,10 @@ struct regmap_irq {
897 * after handling the interrupts in regmap_irq_handler(). 912 * after handling the interrupts in regmap_irq_handler().
898 * @irq_drv_data: Driver specific IRQ data which is passed as parameter when 913 * @irq_drv_data: Driver specific IRQ data which is passed as parameter when
899 * driver specific pre/post interrupt handler is called. 914 * driver specific pre/post interrupt handler is called.
915 *
916 * This is not intended to handle every possible interrupt controller, but
917 * it should handle a substantial proportion of those that are found in the
918 * wild.
900 */ 919 */
901struct regmap_irq_chip { 920struct regmap_irq_chip {
902 const char *name; 921 const char *name;
diff --git a/include/linux/rfkill-regulator.h b/include/linux/rfkill-regulator.h
deleted file mode 100644
index aca36bc83315..000000000000
--- a/include/linux/rfkill-regulator.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * rfkill-regulator.c - Regulator consumer driver for rfkill
3 *
4 * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com>
5 * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#ifndef __LINUX_RFKILL_REGULATOR_H
14#define __LINUX_RFKILL_REGULATOR_H
15
16/*
17 * Use "vrfkill" as supply id when declaring the regulator consumer:
18 *
19 * static struct regulator_consumer_supply pcap_regulator_V6_consumers [] = {
20 * { .dev_name = "rfkill-regulator.0", .supply = "vrfkill" },
21 * };
22 *
23 * If you have several regulator driven rfkill, you can append a numerical id to
24 * .dev_name as done above, and use the same id when declaring the platform
25 * device:
26 *
27 * static struct rfkill_regulator_platform_data ezx_rfkill_bt_data = {
28 * .name = "ezx-bluetooth",
29 * .type = RFKILL_TYPE_BLUETOOTH,
30 * };
31 *
32 * static struct platform_device a910_rfkill = {
33 * .name = "rfkill-regulator",
34 * .id = 0,
35 * .dev = {
36 * .platform_data = &ezx_rfkill_bt_data,
37 * },
38 * };
39 */
40
41#include <linux/rfkill.h>
42
43struct rfkill_regulator_platform_data {
44 char *name; /* the name for the rfkill switch */
45 enum rfkill_type type; /* the type as specified in rfkill.h */
46};
47
48#endif /* __LINUX_RFKILL_REGULATOR_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 5c132d3188be..f2e12a845910 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -61,6 +61,7 @@ struct rhlist_head {
61/** 61/**
62 * struct bucket_table - Table of hash buckets 62 * struct bucket_table - Table of hash buckets
63 * @size: Number of hash buckets 63 * @size: Number of hash buckets
64 * @nest: Number of bits of first-level nested table.
64 * @rehash: Current bucket being rehashed 65 * @rehash: Current bucket being rehashed
65 * @hash_rnd: Random seed to fold into hash 66 * @hash_rnd: Random seed to fold into hash
66 * @locks_mask: Mask to apply before accessing locks[] 67 * @locks_mask: Mask to apply before accessing locks[]
@@ -68,10 +69,12 @@ struct rhlist_head {
68 * @walkers: List of active walkers 69 * @walkers: List of active walkers
69 * @rcu: RCU structure for freeing the table 70 * @rcu: RCU structure for freeing the table
70 * @future_tbl: Table under construction during rehashing 71 * @future_tbl: Table under construction during rehashing
72 * @ntbl: Nested table used when out of memory.
71 * @buckets: size * hash buckets 73 * @buckets: size * hash buckets
72 */ 74 */
73struct bucket_table { 75struct bucket_table {
74 unsigned int size; 76 unsigned int size;
77 unsigned int nest;
75 unsigned int rehash; 78 unsigned int rehash;
76 u32 hash_rnd; 79 u32 hash_rnd;
77 unsigned int locks_mask; 80 unsigned int locks_mask;
@@ -81,7 +84,7 @@ struct bucket_table {
81 84
82 struct bucket_table __rcu *future_tbl; 85 struct bucket_table __rcu *future_tbl;
83 86
84 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; 87 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
85}; 88};
86 89
87/** 90/**
@@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
374 void *arg); 377 void *arg);
375void rhashtable_destroy(struct rhashtable *ht); 378void rhashtable_destroy(struct rhashtable *ht);
376 379
380struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
381 unsigned int hash);
382struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
383 struct bucket_table *tbl,
384 unsigned int hash);
385
377#define rht_dereference(p, ht) \ 386#define rht_dereference(p, ht) \
378 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) 387 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
379 388
@@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht);
389#define rht_entry(tpos, pos, member) \ 398#define rht_entry(tpos, pos, member) \
390 ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) 399 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
391 400
401static inline struct rhash_head __rcu *const *rht_bucket(
402 const struct bucket_table *tbl, unsigned int hash)
403{
404 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
405 &tbl->buckets[hash];
406}
407
408static inline struct rhash_head __rcu **rht_bucket_var(
409 struct bucket_table *tbl, unsigned int hash)
410{
411 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
412 &tbl->buckets[hash];
413}
414
415static inline struct rhash_head __rcu **rht_bucket_insert(
416 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
417{
418 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
419 &tbl->buckets[hash];
420}
421
392/** 422/**
393 * rht_for_each_continue - continue iterating over hash chain 423 * rht_for_each_continue - continue iterating over hash chain
394 * @pos: the &struct rhash_head to use as a loop cursor. 424 * @pos: the &struct rhash_head to use as a loop cursor.
@@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht);
408 * @hash: the hash value / bucket index 438 * @hash: the hash value / bucket index
409 */ 439 */
410#define rht_for_each(pos, tbl, hash) \ 440#define rht_for_each(pos, tbl, hash) \
411 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) 441 rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
412 442
413/** 443/**
414 * rht_for_each_entry_continue - continue iterating over hash chain 444 * rht_for_each_entry_continue - continue iterating over hash chain
@@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht);
433 * @member: name of the &struct rhash_head within the hashable struct. 463 * @member: name of the &struct rhash_head within the hashable struct.
434 */ 464 */
435#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ 465#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
436 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ 466 rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
437 tbl, hash, member) 467 tbl, hash, member)
438 468
439/** 469/**
@@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht);
448 * This hash chain list-traversal primitive allows for the looped code to 478 * This hash chain list-traversal primitive allows for the looped code to
449 * remove the loop cursor from the list. 479 * remove the loop cursor from the list.
450 */ 480 */
451#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ 481#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
452 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ 482 for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
453 next = !rht_is_a_nulls(pos) ? \ 483 next = !rht_is_a_nulls(pos) ? \
454 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ 484 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
455 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ 485 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
456 pos = next, \ 486 pos = next, \
457 next = !rht_is_a_nulls(pos) ? \ 487 next = !rht_is_a_nulls(pos) ? \
458 rht_dereference_bucket(pos->next, tbl, hash) : NULL) 488 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
459 489
460/** 490/**
@@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht);
485 * traversal is guarded by rcu_read_lock(). 515 * traversal is guarded by rcu_read_lock().
486 */ 516 */
487#define rht_for_each_rcu(pos, tbl, hash) \ 517#define rht_for_each_rcu(pos, tbl, hash) \
488 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) 518 rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
489 519
490/** 520/**
491 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain 521 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
@@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht);
518 * the _rcu mutation primitives such as rhashtable_insert() as long as the 548 * the _rcu mutation primitives such as rhashtable_insert() as long as the
519 * traversal is guarded by rcu_read_lock(). 549 * traversal is guarded by rcu_read_lock().
520 */ 550 */
521#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ 551#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
522 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ 552 rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
523 tbl, hash, member) 553 tbl, hash, member)
524 554
525/** 555/**
@@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup(
565 .ht = ht, 595 .ht = ht,
566 .key = key, 596 .key = key,
567 }; 597 };
568 const struct bucket_table *tbl; 598 struct bucket_table *tbl;
569 struct rhash_head *he; 599 struct rhash_head *he;
570 unsigned int hash; 600 unsigned int hash;
571 601
@@ -697,8 +727,12 @@ slow_path:
697 } 727 }
698 728
699 elasticity = ht->elasticity; 729 elasticity = ht->elasticity;
700 pprev = &tbl->buckets[hash]; 730 pprev = rht_bucket_insert(ht, tbl, hash);
701 rht_for_each(head, tbl, hash) { 731 data = ERR_PTR(-ENOMEM);
732 if (!pprev)
733 goto out;
734
735 rht_for_each_continue(head, *pprev, tbl, hash) {
702 struct rhlist_head *plist; 736 struct rhlist_head *plist;
703 struct rhlist_head *list; 737 struct rhlist_head *list;
704 738
@@ -736,7 +770,7 @@ slow_path:
736 if (unlikely(rht_grow_above_100(ht, tbl))) 770 if (unlikely(rht_grow_above_100(ht, tbl)))
737 goto slow_path; 771 goto slow_path;
738 772
739 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 773 head = rht_dereference_bucket(*pprev, tbl, hash);
740 774
741 RCU_INIT_POINTER(obj->next, head); 775 RCU_INIT_POINTER(obj->next, head);
742 if (rhlist) { 776 if (rhlist) {
@@ -746,7 +780,7 @@ slow_path:
746 RCU_INIT_POINTER(list->next, NULL); 780 RCU_INIT_POINTER(list->next, NULL);
747 } 781 }
748 782
749 rcu_assign_pointer(tbl->buckets[hash], obj); 783 rcu_assign_pointer(*pprev, obj);
750 784
751 atomic_inc(&ht->nelems); 785 atomic_inc(&ht->nelems);
752 if (rht_grow_above_75(ht, tbl)) 786 if (rht_grow_above_75(ht, tbl))
@@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one(
955 989
956 spin_lock_bh(lock); 990 spin_lock_bh(lock);
957 991
958 pprev = &tbl->buckets[hash]; 992 pprev = rht_bucket_var(tbl, hash);
959 rht_for_each(he, tbl, hash) { 993 rht_for_each_continue(he, *pprev, tbl, hash) {
960 struct rhlist_head *list; 994 struct rhlist_head *list;
961 995
962 list = container_of(he, struct rhlist_head, rhead); 996 list = container_of(he, struct rhlist_head, rhead);
@@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast(
1107 1141
1108 spin_lock_bh(lock); 1142 spin_lock_bh(lock);
1109 1143
1110 pprev = &tbl->buckets[hash]; 1144 pprev = rht_bucket_var(tbl, hash);
1111 rht_for_each(he, tbl, hash) { 1145 rht_for_each_continue(he, *pprev, tbl, hash) {
1112 if (he != obj_old) { 1146 if (he != obj_old) {
1113 pprev = &he->next; 1147 pprev = &he->next;
1114 continue; 1148 continue;
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index f017fd6e69c4..d4e0a204c118 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -259,6 +259,26 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
259unsigned int sbitmap_weight(const struct sbitmap *sb); 259unsigned int sbitmap_weight(const struct sbitmap *sb);
260 260
261/** 261/**
262 * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
263 * @sb: Bitmap to show.
264 * @m: struct seq_file to write to.
265 *
266 * This is intended for debugging. The format may change at any time.
267 */
268void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
269
270/**
271 * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
272 * seq_file.
273 * @sb: Bitmap to show.
274 * @m: struct seq_file to write to.
275 *
276 * This is intended for debugging. The output isn't guaranteed to be internally
277 * consistent.
278 */
279void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
280
281/**
262 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific 282 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
263 * memory node. 283 * memory node.
264 * @sbq: Bitmap queue to initialize. 284 * @sbq: Bitmap queue to initialize.
@@ -370,4 +390,14 @@ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
370 */ 390 */
371void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); 391void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
372 392
393/**
394 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
395 * seq_file.
396 * @sbq: Bitmap queue to show.
397 * @m: struct seq_file to write to.
398 *
399 * This is intended for debugging. The format may change at any time.
400 */
401void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
402
373#endif /* __LINUX_SCALE_BITMAP_H */ 403#endif /* __LINUX_SCALE_BITMAP_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ad3ec9ec61f7..c8e519d0b4a3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -29,7 +29,6 @@ struct sched_param {
29 29
30#include <asm/page.h> 30#include <asm/page.h>
31#include <asm/ptrace.h> 31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33 32
34#include <linux/smp.h> 33#include <linux/smp.h>
35#include <linux/sem.h> 34#include <linux/sem.h>
@@ -227,7 +226,7 @@ extern void proc_sched_set_task(struct task_struct *p);
227extern char ___assert_task_state[1 - 2*!!( 226extern char ___assert_task_state[1 - 2*!!(
228 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 227 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
229 228
230/* Convenience macros for the sake of set_task_state */ 229/* Convenience macros for the sake of set_current_state */
231#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 230#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
232#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 231#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
233#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 232#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
@@ -254,17 +253,6 @@ extern char ___assert_task_state[1 - 2*!!(
254 253
255#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 254#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
256 255
257#define __set_task_state(tsk, state_value) \
258 do { \
259 (tsk)->task_state_change = _THIS_IP_; \
260 (tsk)->state = (state_value); \
261 } while (0)
262#define set_task_state(tsk, state_value) \
263 do { \
264 (tsk)->task_state_change = _THIS_IP_; \
265 smp_store_mb((tsk)->state, (state_value)); \
266 } while (0)
267
268#define __set_current_state(state_value) \ 256#define __set_current_state(state_value) \
269 do { \ 257 do { \
270 current->task_state_change = _THIS_IP_; \ 258 current->task_state_change = _THIS_IP_; \
@@ -277,20 +265,6 @@ extern char ___assert_task_state[1 - 2*!!(
277 } while (0) 265 } while (0)
278 266
279#else 267#else
280
281/*
282 * @tsk had better be current, or you get to keep the pieces.
283 *
284 * The only reason is that computing current can be more expensive than
285 * using a pointer that's already available.
286 *
287 * Therefore, see set_current_state().
288 */
289#define __set_task_state(tsk, state_value) \
290 do { (tsk)->state = (state_value); } while (0)
291#define set_task_state(tsk, state_value) \
292 smp_store_mb((tsk)->state, (state_value))
293
294/* 268/*
295 * set_current_state() includes a barrier so that the write of current->state 269 * set_current_state() includes a barrier so that the write of current->state
296 * is correctly serialised wrt the caller's subsequent test of whether to 270 * is correctly serialised wrt the caller's subsequent test of whether to
@@ -461,12 +435,10 @@ extern signed long schedule_timeout_idle(signed long timeout);
461asmlinkage void schedule(void); 435asmlinkage void schedule(void);
462extern void schedule_preempt_disabled(void); 436extern void schedule_preempt_disabled(void);
463 437
438extern int __must_check io_schedule_prepare(void);
439extern void io_schedule_finish(int token);
464extern long io_schedule_timeout(long timeout); 440extern long io_schedule_timeout(long timeout);
465 441extern void io_schedule(void);
466static inline void io_schedule(void)
467{
468 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
469}
470 442
471void __noreturn do_task_dead(void); 443void __noreturn do_task_dead(void);
472 444
@@ -565,15 +537,13 @@ struct pacct_struct {
565 int ac_flag; 537 int ac_flag;
566 long ac_exitcode; 538 long ac_exitcode;
567 unsigned long ac_mem; 539 unsigned long ac_mem;
568 cputime_t ac_utime, ac_stime; 540 u64 ac_utime, ac_stime;
569 unsigned long ac_minflt, ac_majflt; 541 unsigned long ac_minflt, ac_majflt;
570}; 542};
571 543
572struct cpu_itimer { 544struct cpu_itimer {
573 cputime_t expires; 545 u64 expires;
574 cputime_t incr; 546 u64 incr;
575 u32 error;
576 u32 incr_error;
577}; 547};
578 548
579/** 549/**
@@ -587,8 +557,8 @@ struct cpu_itimer {
587 */ 557 */
588struct prev_cputime { 558struct prev_cputime {
589#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 559#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
590 cputime_t utime; 560 u64 utime;
591 cputime_t stime; 561 u64 stime;
592 raw_spinlock_t lock; 562 raw_spinlock_t lock;
593#endif 563#endif
594}; 564};
@@ -603,8 +573,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
603 573
604/** 574/**
605 * struct task_cputime - collected CPU time counts 575 * struct task_cputime - collected CPU time counts
606 * @utime: time spent in user mode, in &cputime_t units 576 * @utime: time spent in user mode, in nanoseconds
607 * @stime: time spent in kernel mode, in &cputime_t units 577 * @stime: time spent in kernel mode, in nanoseconds
608 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 578 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
609 * 579 *
610 * This structure groups together three kinds of CPU time that are tracked for 580 * This structure groups together three kinds of CPU time that are tracked for
@@ -612,8 +582,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
612 * these counts together and treat all three of them in parallel. 582 * these counts together and treat all three of them in parallel.
613 */ 583 */
614struct task_cputime { 584struct task_cputime {
615 cputime_t utime; 585 u64 utime;
616 cputime_t stime; 586 u64 stime;
617 unsigned long long sum_exec_runtime; 587 unsigned long long sum_exec_runtime;
618}; 588};
619 589
@@ -622,13 +592,6 @@ struct task_cputime {
622#define prof_exp stime 592#define prof_exp stime
623#define sched_exp sum_exec_runtime 593#define sched_exp sum_exec_runtime
624 594
625#define INIT_CPUTIME \
626 (struct task_cputime) { \
627 .utime = 0, \
628 .stime = 0, \
629 .sum_exec_runtime = 0, \
630 }
631
632/* 595/*
633 * This is the atomic variant of task_cputime, which can be used for 596 * This is the atomic variant of task_cputime, which can be used for
634 * storing and updating task_cputime statistics without locking. 597 * storing and updating task_cputime statistics without locking.
@@ -734,13 +697,14 @@ struct signal_struct {
734 unsigned int is_child_subreaper:1; 697 unsigned int is_child_subreaper:1;
735 unsigned int has_child_subreaper:1; 698 unsigned int has_child_subreaper:1;
736 699
700#ifdef CONFIG_POSIX_TIMERS
701
737 /* POSIX.1b Interval Timers */ 702 /* POSIX.1b Interval Timers */
738 int posix_timer_id; 703 int posix_timer_id;
739 struct list_head posix_timers; 704 struct list_head posix_timers;
740 705
741 /* ITIMER_REAL timer for the process */ 706 /* ITIMER_REAL timer for the process */
742 struct hrtimer real_timer; 707 struct hrtimer real_timer;
743 struct pid *leader_pid;
744 ktime_t it_real_incr; 708 ktime_t it_real_incr;
745 709
746 /* 710 /*
@@ -759,12 +723,16 @@ struct signal_struct {
759 /* Earliest-expiration cache. */ 723 /* Earliest-expiration cache. */
760 struct task_cputime cputime_expires; 724 struct task_cputime cputime_expires;
761 725
726 struct list_head cpu_timers[3];
727
728#endif
729
730 struct pid *leader_pid;
731
762#ifdef CONFIG_NO_HZ_FULL 732#ifdef CONFIG_NO_HZ_FULL
763 atomic_t tick_dep_mask; 733 atomic_t tick_dep_mask;
764#endif 734#endif
765 735
766 struct list_head cpu_timers[3];
767
768 struct pid *tty_old_pgrp; 736 struct pid *tty_old_pgrp;
769 737
770 /* boolean value for session group leader */ 738 /* boolean value for session group leader */
@@ -782,9 +750,9 @@ struct signal_struct {
782 * in __exit_signal, except for the group leader. 750 * in __exit_signal, except for the group leader.
783 */ 751 */
784 seqlock_t stats_lock; 752 seqlock_t stats_lock;
785 cputime_t utime, stime, cutime, cstime; 753 u64 utime, stime, cutime, cstime;
786 cputime_t gtime; 754 u64 gtime;
787 cputime_t cgtime; 755 u64 cgtime;
788 struct prev_cputime prev_cputime; 756 struct prev_cputime prev_cputime;
789 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 757 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
790 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 758 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
@@ -1025,8 +993,8 @@ enum cpu_idle_type {
1025 * 993 *
1026 * The DEFINE_WAKE_Q macro declares and initializes the list head. 994 * The DEFINE_WAKE_Q macro declares and initializes the list head.
1027 * wake_up_q() does NOT reinitialize the list; it's expected to be 995 * wake_up_q() does NOT reinitialize the list; it's expected to be
1028 * called near the end of a function, where the fact that the queue is 996 * called near the end of a function. Otherwise, the list can be
1029 * not used again will be easy to see by inspection. 997 * re-initialized for later re-use by wake_q_init().
1030 * 998 *
1031 * Note that this can cause spurious wakeups. schedule() callers 999 * Note that this can cause spurious wakeups. schedule() callers
1032 * must ensure the call is done inside a loop, confirming that the 1000 * must ensure the call is done inside a loop, confirming that the
@@ -1046,6 +1014,12 @@ struct wake_q_head {
1046#define DEFINE_WAKE_Q(name) \ 1014#define DEFINE_WAKE_Q(name) \
1047 struct wake_q_head name = { WAKE_Q_TAIL, &name.first } 1015 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
1048 1016
1017static inline void wake_q_init(struct wake_q_head *head)
1018{
1019 head->first = WAKE_Q_TAIL;
1020 head->lastp = &head->first;
1021}
1022
1049extern void wake_q_add(struct wake_q_head *head, 1023extern void wake_q_add(struct wake_q_head *head,
1050 struct task_struct *task); 1024 struct task_struct *task);
1051extern void wake_up_q(struct wake_q_head *head); 1025extern void wake_up_q(struct wake_q_head *head);
@@ -1663,11 +1637,11 @@ struct task_struct {
1663 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 1637 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1664 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 1638 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1665 1639
1666 cputime_t utime, stime; 1640 u64 utime, stime;
1667#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 1641#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1668 cputime_t utimescaled, stimescaled; 1642 u64 utimescaled, stimescaled;
1669#endif 1643#endif
1670 cputime_t gtime; 1644 u64 gtime;
1671 struct prev_cputime prev_cputime; 1645 struct prev_cputime prev_cputime;
1672#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1646#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1673 seqcount_t vtime_seqcount; 1647 seqcount_t vtime_seqcount;
@@ -1691,8 +1665,10 @@ struct task_struct {
1691/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1665/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1692 unsigned long min_flt, maj_flt; 1666 unsigned long min_flt, maj_flt;
1693 1667
1668#ifdef CONFIG_POSIX_TIMERS
1694 struct task_cputime cputime_expires; 1669 struct task_cputime cputime_expires;
1695 struct list_head cpu_timers[3]; 1670 struct list_head cpu_timers[3];
1671#endif
1696 1672
1697/* process credentials */ 1673/* process credentials */
1698 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ 1674 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
@@ -1817,7 +1793,7 @@ struct task_struct {
1817#if defined(CONFIG_TASK_XACCT) 1793#if defined(CONFIG_TASK_XACCT)
1818 u64 acct_rss_mem1; /* accumulated rss usage */ 1794 u64 acct_rss_mem1; /* accumulated rss usage */
1819 u64 acct_vm_mem1; /* accumulated virtual memory usage */ 1795 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1820 cputime_t acct_timexpd; /* stime + utime since last update */ 1796 u64 acct_timexpd; /* stime + utime since last update */
1821#endif 1797#endif
1822#ifdef CONFIG_CPUSETS 1798#ifdef CONFIG_CPUSETS
1823 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1799 nodemask_t mems_allowed; /* Protected by alloc_lock */
@@ -2262,17 +2238,17 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
2262 2238
2263#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 2239#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2264extern void task_cputime(struct task_struct *t, 2240extern void task_cputime(struct task_struct *t,
2265 cputime_t *utime, cputime_t *stime); 2241 u64 *utime, u64 *stime);
2266extern cputime_t task_gtime(struct task_struct *t); 2242extern u64 task_gtime(struct task_struct *t);
2267#else 2243#else
2268static inline void task_cputime(struct task_struct *t, 2244static inline void task_cputime(struct task_struct *t,
2269 cputime_t *utime, cputime_t *stime) 2245 u64 *utime, u64 *stime)
2270{ 2246{
2271 *utime = t->utime; 2247 *utime = t->utime;
2272 *stime = t->stime; 2248 *stime = t->stime;
2273} 2249}
2274 2250
2275static inline cputime_t task_gtime(struct task_struct *t) 2251static inline u64 task_gtime(struct task_struct *t)
2276{ 2252{
2277 return t->gtime; 2253 return t->gtime;
2278} 2254}
@@ -2280,23 +2256,23 @@ static inline cputime_t task_gtime(struct task_struct *t)
2280 2256
2281#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 2257#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2282static inline void task_cputime_scaled(struct task_struct *t, 2258static inline void task_cputime_scaled(struct task_struct *t,
2283 cputime_t *utimescaled, 2259 u64 *utimescaled,
2284 cputime_t *stimescaled) 2260 u64 *stimescaled)
2285{ 2261{
2286 *utimescaled = t->utimescaled; 2262 *utimescaled = t->utimescaled;
2287 *stimescaled = t->stimescaled; 2263 *stimescaled = t->stimescaled;
2288} 2264}
2289#else 2265#else
2290static inline void task_cputime_scaled(struct task_struct *t, 2266static inline void task_cputime_scaled(struct task_struct *t,
2291 cputime_t *utimescaled, 2267 u64 *utimescaled,
2292 cputime_t *stimescaled) 2268 u64 *stimescaled)
2293{ 2269{
2294 task_cputime(t, utimescaled, stimescaled); 2270 task_cputime(t, utimescaled, stimescaled);
2295} 2271}
2296#endif 2272#endif
2297 2273
2298extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2274extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
2299extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2275extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
2300 2276
2301/* 2277/*
2302 * Per process flags 2278 * Per process flags
@@ -2515,10 +2491,18 @@ extern u64 sched_clock_cpu(int cpu);
2515extern void sched_clock_init(void); 2491extern void sched_clock_init(void);
2516 2492
2517#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 2493#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2494static inline void sched_clock_init_late(void)
2495{
2496}
2497
2518static inline void sched_clock_tick(void) 2498static inline void sched_clock_tick(void)
2519{ 2499{
2520} 2500}
2521 2501
2502static inline void clear_sched_clock_stable(void)
2503{
2504}
2505
2522static inline void sched_clock_idle_sleep_event(void) 2506static inline void sched_clock_idle_sleep_event(void)
2523{ 2507{
2524} 2508}
@@ -2537,6 +2521,7 @@ static inline u64 local_clock(void)
2537 return sched_clock(); 2521 return sched_clock();
2538} 2522}
2539#else 2523#else
2524extern void sched_clock_init_late(void);
2540/* 2525/*
2541 * Architectures can set this to 1 if they have specified 2526 * Architectures can set this to 1 if they have specified
2542 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 2527 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
@@ -2544,7 +2529,6 @@ static inline u64 local_clock(void)
2544 * is reliable after all: 2529 * is reliable after all:
2545 */ 2530 */
2546extern int sched_clock_stable(void); 2531extern int sched_clock_stable(void);
2547extern void set_sched_clock_stable(void);
2548extern void clear_sched_clock_stable(void); 2532extern void clear_sched_clock_stable(void);
2549 2533
2550extern void sched_clock_tick(void); 2534extern void sched_clock_tick(void);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 441145351301..49308e142aae 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -59,6 +59,7 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
59extern unsigned int sysctl_sched_autogroup_enabled; 59extern unsigned int sysctl_sched_autogroup_enabled;
60#endif 60#endif
61 61
62extern int sysctl_sched_rr_timeslice;
62extern int sched_rr_timeslice; 63extern int sched_rr_timeslice;
63 64
64extern int sched_rr_handler(struct ctl_table *table, int write, 65extern int sched_rr_handler(struct ctl_table *table, int write,
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index fcb4c3646173..7a4804c4a593 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -62,7 +62,7 @@ typedef struct sctphdr {
62 __be16 dest; 62 __be16 dest;
63 __be32 vtag; 63 __be32 vtag;
64 __le32 checksum; 64 __le32 checksum;
65} __packed sctp_sctphdr_t; 65} sctp_sctphdr_t;
66 66
67static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb) 67static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb)
68{ 68{
@@ -74,7 +74,7 @@ typedef struct sctp_chunkhdr {
74 __u8 type; 74 __u8 type;
75 __u8 flags; 75 __u8 flags;
76 __be16 length; 76 __be16 length;
77} __packed sctp_chunkhdr_t; 77} sctp_chunkhdr_t;
78 78
79 79
80/* Section 3.2. Chunk Type Values. 80/* Section 3.2. Chunk Type Values.
@@ -108,6 +108,7 @@ typedef enum {
108 /* Use hex, as defined in ADDIP sec. 3.1 */ 108 /* Use hex, as defined in ADDIP sec. 3.1 */
109 SCTP_CID_ASCONF = 0xC1, 109 SCTP_CID_ASCONF = 0xC1,
110 SCTP_CID_ASCONF_ACK = 0x80, 110 SCTP_CID_ASCONF_ACK = 0x80,
111 SCTP_CID_RECONF = 0x82,
111} sctp_cid_t; /* enum */ 112} sctp_cid_t; /* enum */
112 113
113 114
@@ -164,7 +165,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 };
164typedef struct sctp_paramhdr { 165typedef struct sctp_paramhdr {
165 __be16 type; 166 __be16 type;
166 __be16 length; 167 __be16 length;
167} __packed sctp_paramhdr_t; 168} sctp_paramhdr_t;
168 169
169typedef enum { 170typedef enum {
170 171
@@ -199,6 +200,13 @@ typedef enum {
199 SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005), 200 SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005),
200 SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006), 201 SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006),
201 202
203 /* RE-CONFIG. Section 4 */
204 SCTP_PARAM_RESET_OUT_REQUEST = cpu_to_be16(0x000d),
205 SCTP_PARAM_RESET_IN_REQUEST = cpu_to_be16(0x000e),
206 SCTP_PARAM_RESET_TSN_REQUEST = cpu_to_be16(0x000f),
207 SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010),
208 SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011),
209 SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012),
202} sctp_param_t; /* enum */ 210} sctp_param_t; /* enum */
203 211
204 212
@@ -225,12 +233,12 @@ typedef struct sctp_datahdr {
225 __be16 ssn; 233 __be16 ssn;
226 __be32 ppid; 234 __be32 ppid;
227 __u8 payload[0]; 235 __u8 payload[0];
228} __packed sctp_datahdr_t; 236} sctp_datahdr_t;
229 237
230typedef struct sctp_data_chunk { 238typedef struct sctp_data_chunk {
231 sctp_chunkhdr_t chunk_hdr; 239 sctp_chunkhdr_t chunk_hdr;
232 sctp_datahdr_t data_hdr; 240 sctp_datahdr_t data_hdr;
233} __packed sctp_data_chunk_t; 241} sctp_data_chunk_t;
234 242
235/* DATA Chuck Specific Flags */ 243/* DATA Chuck Specific Flags */
236enum { 244enum {
@@ -256,78 +264,78 @@ typedef struct sctp_inithdr {
256 __be16 num_inbound_streams; 264 __be16 num_inbound_streams;
257 __be32 initial_tsn; 265 __be32 initial_tsn;
258 __u8 params[0]; 266 __u8 params[0];
259} __packed sctp_inithdr_t; 267} sctp_inithdr_t;
260 268
261typedef struct sctp_init_chunk { 269typedef struct sctp_init_chunk {
262 sctp_chunkhdr_t chunk_hdr; 270 sctp_chunkhdr_t chunk_hdr;
263 sctp_inithdr_t init_hdr; 271 sctp_inithdr_t init_hdr;
264} __packed sctp_init_chunk_t; 272} sctp_init_chunk_t;
265 273
266 274
267/* Section 3.3.2.1. IPv4 Address Parameter (5) */ 275/* Section 3.3.2.1. IPv4 Address Parameter (5) */
268typedef struct sctp_ipv4addr_param { 276typedef struct sctp_ipv4addr_param {
269 sctp_paramhdr_t param_hdr; 277 sctp_paramhdr_t param_hdr;
270 struct in_addr addr; 278 struct in_addr addr;
271} __packed sctp_ipv4addr_param_t; 279} sctp_ipv4addr_param_t;
272 280
273/* Section 3.3.2.1. IPv6 Address Parameter (6) */ 281/* Section 3.3.2.1. IPv6 Address Parameter (6) */
274typedef struct sctp_ipv6addr_param { 282typedef struct sctp_ipv6addr_param {
275 sctp_paramhdr_t param_hdr; 283 sctp_paramhdr_t param_hdr;
276 struct in6_addr addr; 284 struct in6_addr addr;
277} __packed sctp_ipv6addr_param_t; 285} sctp_ipv6addr_param_t;
278 286
279/* Section 3.3.2.1 Cookie Preservative (9) */ 287/* Section 3.3.2.1 Cookie Preservative (9) */
280typedef struct sctp_cookie_preserve_param { 288typedef struct sctp_cookie_preserve_param {
281 sctp_paramhdr_t param_hdr; 289 sctp_paramhdr_t param_hdr;
282 __be32 lifespan_increment; 290 __be32 lifespan_increment;
283} __packed sctp_cookie_preserve_param_t; 291} sctp_cookie_preserve_param_t;
284 292
285/* Section 3.3.2.1 Host Name Address (11) */ 293/* Section 3.3.2.1 Host Name Address (11) */
286typedef struct sctp_hostname_param { 294typedef struct sctp_hostname_param {
287 sctp_paramhdr_t param_hdr; 295 sctp_paramhdr_t param_hdr;
288 uint8_t hostname[0]; 296 uint8_t hostname[0];
289} __packed sctp_hostname_param_t; 297} sctp_hostname_param_t;
290 298
291/* Section 3.3.2.1 Supported Address Types (12) */ 299/* Section 3.3.2.1 Supported Address Types (12) */
292typedef struct sctp_supported_addrs_param { 300typedef struct sctp_supported_addrs_param {
293 sctp_paramhdr_t param_hdr; 301 sctp_paramhdr_t param_hdr;
294 __be16 types[0]; 302 __be16 types[0];
295} __packed sctp_supported_addrs_param_t; 303} sctp_supported_addrs_param_t;
296 304
297/* Appendix A. ECN Capable (32768) */ 305/* Appendix A. ECN Capable (32768) */
298typedef struct sctp_ecn_capable_param { 306typedef struct sctp_ecn_capable_param {
299 sctp_paramhdr_t param_hdr; 307 sctp_paramhdr_t param_hdr;
300} __packed sctp_ecn_capable_param_t; 308} sctp_ecn_capable_param_t;
301 309
302/* ADDIP Section 3.2.6 Adaptation Layer Indication */ 310/* ADDIP Section 3.2.6 Adaptation Layer Indication */
303typedef struct sctp_adaptation_ind_param { 311typedef struct sctp_adaptation_ind_param {
304 struct sctp_paramhdr param_hdr; 312 struct sctp_paramhdr param_hdr;
305 __be32 adaptation_ind; 313 __be32 adaptation_ind;
306} __packed sctp_adaptation_ind_param_t; 314} sctp_adaptation_ind_param_t;
307 315
308/* ADDIP Section 4.2.7 Supported Extensions Parameter */ 316/* ADDIP Section 4.2.7 Supported Extensions Parameter */
309typedef struct sctp_supported_ext_param { 317typedef struct sctp_supported_ext_param {
310 struct sctp_paramhdr param_hdr; 318 struct sctp_paramhdr param_hdr;
311 __u8 chunks[0]; 319 __u8 chunks[0];
312} __packed sctp_supported_ext_param_t; 320} sctp_supported_ext_param_t;
313 321
314/* AUTH Section 3.1 Random */ 322/* AUTH Section 3.1 Random */
315typedef struct sctp_random_param { 323typedef struct sctp_random_param {
316 sctp_paramhdr_t param_hdr; 324 sctp_paramhdr_t param_hdr;
317 __u8 random_val[0]; 325 __u8 random_val[0];
318} __packed sctp_random_param_t; 326} sctp_random_param_t;
319 327
320/* AUTH Section 3.2 Chunk List */ 328/* AUTH Section 3.2 Chunk List */
321typedef struct sctp_chunks_param { 329typedef struct sctp_chunks_param {
322 sctp_paramhdr_t param_hdr; 330 sctp_paramhdr_t param_hdr;
323 __u8 chunks[0]; 331 __u8 chunks[0];
324} __packed sctp_chunks_param_t; 332} sctp_chunks_param_t;
325 333
326/* AUTH Section 3.3 HMAC Algorithm */ 334/* AUTH Section 3.3 HMAC Algorithm */
327typedef struct sctp_hmac_algo_param { 335typedef struct sctp_hmac_algo_param {
328 sctp_paramhdr_t param_hdr; 336 sctp_paramhdr_t param_hdr;
329 __be16 hmac_ids[0]; 337 __be16 hmac_ids[0];
330} __packed sctp_hmac_algo_param_t; 338} sctp_hmac_algo_param_t;
331 339
332/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): 340/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
333 * The INIT ACK chunk is used to acknowledge the initiation of an SCTP 341 * The INIT ACK chunk is used to acknowledge the initiation of an SCTP
@@ -339,13 +347,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t;
339typedef struct sctp_cookie_param { 347typedef struct sctp_cookie_param {
340 sctp_paramhdr_t p; 348 sctp_paramhdr_t p;
341 __u8 body[0]; 349 __u8 body[0];
342} __packed sctp_cookie_param_t; 350} sctp_cookie_param_t;
343 351
344/* Section 3.3.3.1 Unrecognized Parameters (8) */ 352/* Section 3.3.3.1 Unrecognized Parameters (8) */
345typedef struct sctp_unrecognized_param { 353typedef struct sctp_unrecognized_param {
346 sctp_paramhdr_t param_hdr; 354 sctp_paramhdr_t param_hdr;
347 sctp_paramhdr_t unrecognized; 355 sctp_paramhdr_t unrecognized;
348} __packed sctp_unrecognized_param_t; 356} sctp_unrecognized_param_t;
349 357
350 358
351 359
@@ -360,7 +368,7 @@ typedef struct sctp_unrecognized_param {
360typedef struct sctp_gap_ack_block { 368typedef struct sctp_gap_ack_block {
361 __be16 start; 369 __be16 start;
362 __be16 end; 370 __be16 end;
363} __packed sctp_gap_ack_block_t; 371} sctp_gap_ack_block_t;
364 372
365typedef __be32 sctp_dup_tsn_t; 373typedef __be32 sctp_dup_tsn_t;
366 374
@@ -375,12 +383,12 @@ typedef struct sctp_sackhdr {
375 __be16 num_gap_ack_blocks; 383 __be16 num_gap_ack_blocks;
376 __be16 num_dup_tsns; 384 __be16 num_dup_tsns;
377 sctp_sack_variable_t variable[0]; 385 sctp_sack_variable_t variable[0];
378} __packed sctp_sackhdr_t; 386} sctp_sackhdr_t;
379 387
380typedef struct sctp_sack_chunk { 388typedef struct sctp_sack_chunk {
381 sctp_chunkhdr_t chunk_hdr; 389 sctp_chunkhdr_t chunk_hdr;
382 sctp_sackhdr_t sack_hdr; 390 sctp_sackhdr_t sack_hdr;
383} __packed sctp_sack_chunk_t; 391} sctp_sack_chunk_t;
384 392
385 393
386/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): 394/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
@@ -392,12 +400,12 @@ typedef struct sctp_sack_chunk {
392 400
393typedef struct sctp_heartbeathdr { 401typedef struct sctp_heartbeathdr {
394 sctp_paramhdr_t info; 402 sctp_paramhdr_t info;
395} __packed sctp_heartbeathdr_t; 403} sctp_heartbeathdr_t;
396 404
397typedef struct sctp_heartbeat_chunk { 405typedef struct sctp_heartbeat_chunk {
398 sctp_chunkhdr_t chunk_hdr; 406 sctp_chunkhdr_t chunk_hdr;
399 sctp_heartbeathdr_t hb_hdr; 407 sctp_heartbeathdr_t hb_hdr;
400} __packed sctp_heartbeat_chunk_t; 408} sctp_heartbeat_chunk_t;
401 409
402 410
403/* For the abort and shutdown ACK we must carry the init tag in the 411/* For the abort and shutdown ACK we must carry the init tag in the
@@ -406,7 +414,7 @@ typedef struct sctp_heartbeat_chunk {
406 */ 414 */
407typedef struct sctp_abort_chunk { 415typedef struct sctp_abort_chunk {
408 sctp_chunkhdr_t uh; 416 sctp_chunkhdr_t uh;
409} __packed sctp_abort_chunk_t; 417} sctp_abort_chunk_t;
410 418
411 419
412/* For the graceful shutdown we must carry the tag (in common header) 420/* For the graceful shutdown we must carry the tag (in common header)
@@ -414,12 +422,12 @@ typedef struct sctp_abort_chunk {
414 */ 422 */
415typedef struct sctp_shutdownhdr { 423typedef struct sctp_shutdownhdr {
416 __be32 cum_tsn_ack; 424 __be32 cum_tsn_ack;
417} __packed sctp_shutdownhdr_t; 425} sctp_shutdownhdr_t;
418 426
419struct sctp_shutdown_chunk_t { 427struct sctp_shutdown_chunk_t {
420 sctp_chunkhdr_t chunk_hdr; 428 sctp_chunkhdr_t chunk_hdr;
421 sctp_shutdownhdr_t shutdown_hdr; 429 sctp_shutdownhdr_t shutdown_hdr;
422} __packed; 430};
423 431
424/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ 432/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */
425 433
@@ -427,12 +435,12 @@ typedef struct sctp_errhdr {
427 __be16 cause; 435 __be16 cause;
428 __be16 length; 436 __be16 length;
429 __u8 variable[0]; 437 __u8 variable[0];
430} __packed sctp_errhdr_t; 438} sctp_errhdr_t;
431 439
432typedef struct sctp_operr_chunk { 440typedef struct sctp_operr_chunk {
433 sctp_chunkhdr_t chunk_hdr; 441 sctp_chunkhdr_t chunk_hdr;
434 sctp_errhdr_t err_hdr; 442 sctp_errhdr_t err_hdr;
435} __packed sctp_operr_chunk_t; 443} sctp_operr_chunk_t;
436 444
437/* RFC 2960 3.3.10 - Operation Error 445/* RFC 2960 3.3.10 - Operation Error
438 * 446 *
@@ -522,7 +530,7 @@ typedef struct sctp_ecnehdr {
522typedef struct sctp_ecne_chunk { 530typedef struct sctp_ecne_chunk {
523 sctp_chunkhdr_t chunk_hdr; 531 sctp_chunkhdr_t chunk_hdr;
524 sctp_ecnehdr_t ence_hdr; 532 sctp_ecnehdr_t ence_hdr;
525} __packed sctp_ecne_chunk_t; 533} sctp_ecne_chunk_t;
526 534
527/* RFC 2960. Appendix A. Explicit Congestion Notification. 535/* RFC 2960. Appendix A. Explicit Congestion Notification.
528 * Congestion Window Reduced (CWR) (13) 536 * Congestion Window Reduced (CWR) (13)
@@ -534,7 +542,7 @@ typedef struct sctp_cwrhdr {
534typedef struct sctp_cwr_chunk { 542typedef struct sctp_cwr_chunk {
535 sctp_chunkhdr_t chunk_hdr; 543 sctp_chunkhdr_t chunk_hdr;
536 sctp_cwrhdr_t cwr_hdr; 544 sctp_cwrhdr_t cwr_hdr;
537} __packed sctp_cwr_chunk_t; 545} sctp_cwr_chunk_t;
538 546
539/* PR-SCTP 547/* PR-SCTP
540 * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) 548 * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
@@ -585,17 +593,17 @@ typedef struct sctp_cwr_chunk {
585struct sctp_fwdtsn_skip { 593struct sctp_fwdtsn_skip {
586 __be16 stream; 594 __be16 stream;
587 __be16 ssn; 595 __be16 ssn;
588} __packed; 596};
589 597
590struct sctp_fwdtsn_hdr { 598struct sctp_fwdtsn_hdr {
591 __be32 new_cum_tsn; 599 __be32 new_cum_tsn;
592 struct sctp_fwdtsn_skip skip[0]; 600 struct sctp_fwdtsn_skip skip[0];
593} __packed; 601};
594 602
595struct sctp_fwdtsn_chunk { 603struct sctp_fwdtsn_chunk {
596 struct sctp_chunkhdr chunk_hdr; 604 struct sctp_chunkhdr chunk_hdr;
597 struct sctp_fwdtsn_hdr fwdtsn_hdr; 605 struct sctp_fwdtsn_hdr fwdtsn_hdr;
598} __packed; 606};
599 607
600 608
601/* ADDIP 609/* ADDIP
@@ -633,17 +641,17 @@ struct sctp_fwdtsn_chunk {
633typedef struct sctp_addip_param { 641typedef struct sctp_addip_param {
634 sctp_paramhdr_t param_hdr; 642 sctp_paramhdr_t param_hdr;
635 __be32 crr_id; 643 __be32 crr_id;
636} __packed sctp_addip_param_t; 644} sctp_addip_param_t;
637 645
638typedef struct sctp_addiphdr { 646typedef struct sctp_addiphdr {
639 __be32 serial; 647 __be32 serial;
640 __u8 params[0]; 648 __u8 params[0];
641} __packed sctp_addiphdr_t; 649} sctp_addiphdr_t;
642 650
643typedef struct sctp_addip_chunk { 651typedef struct sctp_addip_chunk {
644 sctp_chunkhdr_t chunk_hdr; 652 sctp_chunkhdr_t chunk_hdr;
645 sctp_addiphdr_t addip_hdr; 653 sctp_addiphdr_t addip_hdr;
646} __packed sctp_addip_chunk_t; 654} sctp_addip_chunk_t;
647 655
648/* AUTH 656/* AUTH
649 * Section 4.1 Authentication Chunk (AUTH) 657 * Section 4.1 Authentication Chunk (AUTH)
@@ -698,16 +706,71 @@ typedef struct sctp_authhdr {
698 __be16 shkey_id; 706 __be16 shkey_id;
699 __be16 hmac_id; 707 __be16 hmac_id;
700 __u8 hmac[0]; 708 __u8 hmac[0];
701} __packed sctp_authhdr_t; 709} sctp_authhdr_t;
702 710
703typedef struct sctp_auth_chunk { 711typedef struct sctp_auth_chunk {
704 sctp_chunkhdr_t chunk_hdr; 712 sctp_chunkhdr_t chunk_hdr;
705 sctp_authhdr_t auth_hdr; 713 sctp_authhdr_t auth_hdr;
706} __packed sctp_auth_chunk_t; 714} sctp_auth_chunk_t;
707 715
708struct sctp_infox { 716struct sctp_infox {
709 struct sctp_info *sctpinfo; 717 struct sctp_info *sctpinfo;
710 struct sctp_association *asoc; 718 struct sctp_association *asoc;
711}; 719};
712 720
721struct sctp_reconf_chunk {
722 sctp_chunkhdr_t chunk_hdr;
723 __u8 params[0];
724};
725
726struct sctp_strreset_outreq {
727 sctp_paramhdr_t param_hdr;
728 __u32 request_seq;
729 __u32 response_seq;
730 __u32 send_reset_at_tsn;
731 __u16 list_of_streams[0];
732};
733
734struct sctp_strreset_inreq {
735 sctp_paramhdr_t param_hdr;
736 __u32 request_seq;
737 __u16 list_of_streams[0];
738};
739
740struct sctp_strreset_tsnreq {
741 sctp_paramhdr_t param_hdr;
742 __u32 request_seq;
743};
744
745struct sctp_strreset_addstrm {
746 sctp_paramhdr_t param_hdr;
747 __u32 request_seq;
748 __u16 number_of_streams;
749 __u16 reserved;
750};
751
752enum {
753 SCTP_STRRESET_NOTHING_TO_DO = 0x00,
754 SCTP_STRRESET_PERFORMED = 0x01,
755 SCTP_STRRESET_DENIED = 0x02,
756 SCTP_STRRESET_ERR_WRONG_SSN = 0x03,
757 SCTP_STRRESET_ERR_IN_PROGRESS = 0x04,
758 SCTP_STRRESET_ERR_BAD_SEQNO = 0x05,
759 SCTP_STRRESET_IN_PROGRESS = 0x06,
760};
761
762struct sctp_strreset_resp {
763 sctp_paramhdr_t param_hdr;
764 __u32 response_seq;
765 __u32 result;
766};
767
768struct sctp_strreset_resptsn {
769 sctp_paramhdr_t param_hdr;
770 __u32 response_seq;
771 __u32 result;
772 __u32 senders_next_tsn;
773 __u32 receivers_next_tsn;
774};
775
713#endif /* __LINUX_SCTP_H__ */ 776#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index c2125e9093e8..d3868f2ebada 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -332,7 +332,6 @@ int security_task_getscheduler(struct task_struct *p);
332int security_task_movememory(struct task_struct *p); 332int security_task_movememory(struct task_struct *p);
333int security_task_kill(struct task_struct *p, struct siginfo *info, 333int security_task_kill(struct task_struct *p, struct siginfo *info,
334 int sig, u32 secid); 334 int sig, u32 secid);
335int security_task_wait(struct task_struct *p);
336int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, 335int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
337 unsigned long arg4, unsigned long arg5); 336 unsigned long arg4, unsigned long arg5);
338void security_task_to_inode(struct task_struct *p, struct inode *inode); 337void security_task_to_inode(struct task_struct *p, struct inode *inode);
@@ -361,7 +360,7 @@ int security_sem_semop(struct sem_array *sma, struct sembuf *sops,
361 unsigned nsops, int alter); 360 unsigned nsops, int alter);
362void security_d_instantiate(struct dentry *dentry, struct inode *inode); 361void security_d_instantiate(struct dentry *dentry, struct inode *inode);
363int security_getprocattr(struct task_struct *p, char *name, char **value); 362int security_getprocattr(struct task_struct *p, char *name, char **value);
364int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size); 363int security_setprocattr(const char *name, void *value, size_t size);
365int security_netlink_send(struct sock *sk, struct sk_buff *skb); 364int security_netlink_send(struct sock *sk, struct sk_buff *skb);
366int security_ismaclabel(const char *name); 365int security_ismaclabel(const char *name);
367int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); 366int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
@@ -980,11 +979,6 @@ static inline int security_task_kill(struct task_struct *p,
980 return 0; 979 return 0;
981} 980}
982 981
983static inline int security_task_wait(struct task_struct *p)
984{
985 return 0;
986}
987
988static inline int security_task_prctl(int option, unsigned long arg2, 982static inline int security_task_prctl(int option, unsigned long arg2,
989 unsigned long arg3, 983 unsigned long arg3,
990 unsigned long arg4, 984 unsigned long arg4,
@@ -1106,7 +1100,7 @@ static inline int security_getprocattr(struct task_struct *p, char *name, char *
1106 return -EINVAL; 1100 return -EINVAL;
1107} 1101}
1108 1102
1109static inline int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size) 1103static inline int security_setprocattr(char *name, void *value, size_t size)
1110{ 1104{
1111 return -EINVAL; 1105 return -EINVAL;
1112} 1106}
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
new file mode 100644
index 000000000000..deee23d012e7
--- /dev/null
+++ b/include/linux/sed-opal.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Authors:
5 * Rafael Antognolli <rafael.antognolli@intel.com>
6 * Scott Bauer <scott.bauer@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#ifndef LINUX_OPAL_H
19#define LINUX_OPAL_H
20
21#include <uapi/linux/sed-opal.h>
22#include <linux/kernel.h>
23
24struct opal_dev;
25
26typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer,
27 size_t len, bool send);
28
29#ifdef CONFIG_BLK_SED_OPAL
30bool opal_unlock_from_suspend(struct opal_dev *dev);
31struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv);
32int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr);
33
34static inline bool is_sed_ioctl(unsigned int cmd)
35{
36 switch (cmd) {
37 case IOC_OPAL_SAVE:
38 case IOC_OPAL_LOCK_UNLOCK:
39 case IOC_OPAL_TAKE_OWNERSHIP:
40 case IOC_OPAL_ACTIVATE_LSP:
41 case IOC_OPAL_SET_PW:
42 case IOC_OPAL_ACTIVATE_USR:
43 case IOC_OPAL_REVERT_TPR:
44 case IOC_OPAL_LR_SETUP:
45 case IOC_OPAL_ADD_USR_TO_LR:
46 case IOC_OPAL_ENABLE_DISABLE_MBR:
47 case IOC_OPAL_ERASE_LR:
48 case IOC_OPAL_SECURE_ERASE_LR:
49 return true;
50 }
51 return false;
52}
53#else
54static inline bool is_sed_ioctl(unsigned int cmd)
55{
56 return false;
57}
58
59static inline int sed_ioctl(struct opal_dev *dev, unsigned int cmd,
60 void __user *ioctl_ptr)
61{
62 return 0;
63}
64static inline bool opal_unlock_from_suspend(struct opal_dev *dev)
65{
66 return false;
67}
68#define init_opal_dev(data, send_recv) NULL
69#endif /* CONFIG_BLK_SED_OPAL */
70#endif /* LINUX_OPAL_H */
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
new file mode 100644
index 000000000000..fa7a6b9cedbf
--- /dev/null
+++ b/include/linux/siphash.h
@@ -0,0 +1,140 @@
1/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
2 *
3 * This file is provided under a dual BSD/GPLv2 license.
4 *
5 * SipHash: a fast short-input PRF
6 * https://131002.net/siphash/
7 *
8 * This implementation is specifically for SipHash2-4 for a secure PRF
9 * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
10 * hashtables.
11 */
12
13#ifndef _LINUX_SIPHASH_H
14#define _LINUX_SIPHASH_H
15
16#include <linux/types.h>
17#include <linux/kernel.h>
18
19#define SIPHASH_ALIGNMENT __alignof__(u64)
20typedef struct {
21 u64 key[2];
22} siphash_key_t;
23
24u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
25#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
26u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
27#endif
28
29u64 siphash_1u64(const u64 a, const siphash_key_t *key);
30u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
31u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
32 const siphash_key_t *key);
33u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
34 const siphash_key_t *key);
35u64 siphash_1u32(const u32 a, const siphash_key_t *key);
36u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
37 const siphash_key_t *key);
38
39static inline u64 siphash_2u32(const u32 a, const u32 b,
40 const siphash_key_t *key)
41{
42 return siphash_1u64((u64)b << 32 | a, key);
43}
44static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
45 const u32 d, const siphash_key_t *key)
46{
47 return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
48}
49
50
51static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
52 const siphash_key_t *key)
53{
54 if (__builtin_constant_p(len) && len == 4)
55 return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
56 if (__builtin_constant_p(len) && len == 8)
57 return siphash_1u64(le64_to_cpu(data[0]), key);
58 if (__builtin_constant_p(len) && len == 16)
59 return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
60 key);
61 if (__builtin_constant_p(len) && len == 24)
62 return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
63 le64_to_cpu(data[2]), key);
64 if (__builtin_constant_p(len) && len == 32)
65 return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
66 le64_to_cpu(data[2]), le64_to_cpu(data[3]),
67 key);
68 return __siphash_aligned(data, len, key);
69}
70
71/**
72 * siphash - compute 64-bit siphash PRF value
73 * @data: buffer to hash
74 * @size: size of @data
75 * @key: the siphash key
76 */
77static inline u64 siphash(const void *data, size_t len,
78 const siphash_key_t *key)
79{
80#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
81 if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
82 return __siphash_unaligned(data, len, key);
83#endif
84 return ___siphash_aligned(data, len, key);
85}
86
87#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
88typedef struct {
89 unsigned long key[2];
90} hsiphash_key_t;
91
92u32 __hsiphash_aligned(const void *data, size_t len,
93 const hsiphash_key_t *key);
94#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
95u32 __hsiphash_unaligned(const void *data, size_t len,
96 const hsiphash_key_t *key);
97#endif
98
99u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
100u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
101u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
102 const hsiphash_key_t *key);
103u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
104 const hsiphash_key_t *key);
105
106static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
107 const hsiphash_key_t *key)
108{
109 if (__builtin_constant_p(len) && len == 4)
110 return hsiphash_1u32(le32_to_cpu(data[0]), key);
111 if (__builtin_constant_p(len) && len == 8)
112 return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
113 key);
114 if (__builtin_constant_p(len) && len == 12)
115 return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
116 le32_to_cpu(data[2]), key);
117 if (__builtin_constant_p(len) && len == 16)
118 return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
119 le32_to_cpu(data[2]), le32_to_cpu(data[3]),
120 key);
121 return __hsiphash_aligned(data, len, key);
122}
123
124/**
125 * hsiphash - compute 32-bit hsiphash PRF value
126 * @data: buffer to hash
127 * @size: size of @data
128 * @key: the hsiphash key
129 */
130static inline u32 hsiphash(const void *data, size_t len,
131 const hsiphash_key_t *key)
132{
133#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
134 if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
135 return __hsiphash_unaligned(data, len, key);
136#endif
137 return ___hsiphash_aligned(data, len, key);
138}
139
140#endif /* _LINUX_SIPHASH_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a410715bbef8..69ccd2636911 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -585,20 +585,22 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
585 * @cloned: Head may be cloned (check refcnt to be sure) 585 * @cloned: Head may be cloned (check refcnt to be sure)
586 * @ip_summed: Driver fed us an IP checksum 586 * @ip_summed: Driver fed us an IP checksum
587 * @nohdr: Payload reference only, must not modify header 587 * @nohdr: Payload reference only, must not modify header
588 * @nfctinfo: Relationship of this skb to the connection
589 * @pkt_type: Packet class 588 * @pkt_type: Packet class
590 * @fclone: skbuff clone status 589 * @fclone: skbuff clone status
591 * @ipvs_property: skbuff is owned by ipvs 590 * @ipvs_property: skbuff is owned by ipvs
591 * @tc_skip_classify: do not classify packet. set by IFB device
592 * @tc_at_ingress: used within tc_classify to distinguish in/egress
593 * @tc_redirected: packet was redirected by a tc action
594 * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
592 * @peeked: this packet has been seen already, so stats have been 595 * @peeked: this packet has been seen already, so stats have been
593 * done for it, don't do them again 596 * done for it, don't do them again
594 * @nf_trace: netfilter packet trace flag 597 * @nf_trace: netfilter packet trace flag
595 * @protocol: Packet protocol from driver 598 * @protocol: Packet protocol from driver
596 * @destructor: Destruct function 599 * @destructor: Destruct function
597 * @nfct: Associated connection, if any 600 * @_nfct: Associated connection, if any (with nfctinfo bits)
598 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 601 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
599 * @skb_iif: ifindex of device we arrived on 602 * @skb_iif: ifindex of device we arrived on
600 * @tc_index: Traffic control index 603 * @tc_index: Traffic control index
601 * @tc_verd: traffic control verdict
602 * @hash: the packet hash 604 * @hash: the packet hash
603 * @queue_mapping: Queue mapping for multiqueue devices 605 * @queue_mapping: Queue mapping for multiqueue devices
604 * @xmit_more: More SKBs are pending for this queue 606 * @xmit_more: More SKBs are pending for this queue
@@ -610,6 +612,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
610 * @wifi_acked_valid: wifi_acked was set 612 * @wifi_acked_valid: wifi_acked was set
611 * @wifi_acked: whether frame was acked on wifi or not 613 * @wifi_acked: whether frame was acked on wifi or not
612 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 614 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
615 * @dst_pending_confirm: need to confirm neighbour
613 * @napi_id: id of the NAPI struct this skb came from 616 * @napi_id: id of the NAPI struct this skb came from
614 * @secmark: security marking 617 * @secmark: security marking
615 * @mark: Generic packet mark 618 * @mark: Generic packet mark
@@ -668,7 +671,7 @@ struct sk_buff {
668 struct sec_path *sp; 671 struct sec_path *sp;
669#endif 672#endif
670#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 673#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
671 struct nf_conntrack *nfct; 674 unsigned long _nfct;
672#endif 675#endif
673#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 676#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
674 struct nf_bridge_info *nf_bridge; 677 struct nf_bridge_info *nf_bridge;
@@ -721,7 +724,6 @@ struct sk_buff {
721 __u8 pkt_type:3; 724 __u8 pkt_type:3;
722 __u8 pfmemalloc:1; 725 __u8 pfmemalloc:1;
723 __u8 ignore_df:1; 726 __u8 ignore_df:1;
724 __u8 nfctinfo:3;
725 727
726 __u8 nf_trace:1; 728 __u8 nf_trace:1;
727 __u8 ip_summed:2; 729 __u8 ip_summed:2;
@@ -740,6 +742,7 @@ struct sk_buff {
740 __u8 csum_level:2; 742 __u8 csum_level:2;
741 __u8 csum_bad:1; 743 __u8 csum_bad:1;
742 744
745 __u8 dst_pending_confirm:1;
743#ifdef CONFIG_IPV6_NDISC_NODETYPE 746#ifdef CONFIG_IPV6_NDISC_NODETYPE
744 __u8 ndisc_nodetype:2; 747 __u8 ndisc_nodetype:2;
745#endif 748#endif
@@ -749,13 +752,15 @@ struct sk_buff {
749#ifdef CONFIG_NET_SWITCHDEV 752#ifdef CONFIG_NET_SWITCHDEV
750 __u8 offload_fwd_mark:1; 753 __u8 offload_fwd_mark:1;
751#endif 754#endif
752 /* 2, 4 or 5 bit hole */ 755#ifdef CONFIG_NET_CLS_ACT
756 __u8 tc_skip_classify:1;
757 __u8 tc_at_ingress:1;
758 __u8 tc_redirected:1;
759 __u8 tc_from_ingress:1;
760#endif
753 761
754#ifdef CONFIG_NET_SCHED 762#ifdef CONFIG_NET_SCHED
755 __u16 tc_index; /* traffic control index */ 763 __u16 tc_index; /* traffic control index */
756#ifdef CONFIG_NET_CLS_ACT
757 __u16 tc_verd; /* traffic control verdict */
758#endif
759#endif 764#endif
760 765
761 union { 766 union {
@@ -836,6 +841,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
836#define SKB_DST_NOREF 1UL 841#define SKB_DST_NOREF 1UL
837#define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 842#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
838 843
844#define SKB_NFCT_PTRMASK ~(7UL)
839/** 845/**
840 * skb_dst - returns skb dst_entry 846 * skb_dst - returns skb dst_entry
841 * @skb: buffer 847 * @skb: buffer
@@ -2178,6 +2184,11 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2178 return skb->head + skb->mac_header; 2184 return skb->head + skb->mac_header;
2179} 2185}
2180 2186
2187static inline int skb_mac_offset(const struct sk_buff *skb)
2188{
2189 return skb_mac_header(skb) - skb->data;
2190}
2191
2181static inline int skb_mac_header_was_set(const struct sk_buff *skb) 2192static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2182{ 2193{
2183 return skb->mac_header != (typeof(skb->mac_header))~0U; 2194 return skb->mac_header != (typeof(skb->mac_header))~0U;
@@ -3553,6 +3564,15 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3553 skb->csum = csum_add(skb->csum, delta); 3564 skb->csum = csum_add(skb->csum, delta);
3554} 3565}
3555 3566
3567static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
3568{
3569#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3570 return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
3571#else
3572 return NULL;
3573#endif
3574}
3575
3556#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3576#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3557void nf_conntrack_destroy(struct nf_conntrack *nfct); 3577void nf_conntrack_destroy(struct nf_conntrack *nfct);
3558static inline void nf_conntrack_put(struct nf_conntrack *nfct) 3578static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -3581,8 +3601,8 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3581static inline void nf_reset(struct sk_buff *skb) 3601static inline void nf_reset(struct sk_buff *skb)
3582{ 3602{
3583#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3603#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3584 nf_conntrack_put(skb->nfct); 3604 nf_conntrack_put(skb_nfct(skb));
3585 skb->nfct = NULL; 3605 skb->_nfct = 0;
3586#endif 3606#endif
3587#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3607#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3588 nf_bridge_put(skb->nf_bridge); 3608 nf_bridge_put(skb->nf_bridge);
@@ -3602,10 +3622,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3602 bool copy) 3622 bool copy)
3603{ 3623{
3604#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3624#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3605 dst->nfct = src->nfct; 3625 dst->_nfct = src->_nfct;
3606 nf_conntrack_get(src->nfct); 3626 nf_conntrack_get(skb_nfct(src));
3607 if (copy)
3608 dst->nfctinfo = src->nfctinfo;
3609#endif 3627#endif
3610#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3628#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3611 dst->nf_bridge = src->nf_bridge; 3629 dst->nf_bridge = src->nf_bridge;
@@ -3620,7 +3638,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3620static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 3638static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3621{ 3639{
3622#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3640#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3623 nf_conntrack_put(dst->nfct); 3641 nf_conntrack_put(skb_nfct(dst));
3624#endif 3642#endif
3625#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3643#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3626 nf_bridge_put(dst->nf_bridge); 3644 nf_bridge_put(dst->nf_bridge);
@@ -3652,9 +3670,7 @@ static inline bool skb_irq_freeable(const struct sk_buff *skb)
3652#if IS_ENABLED(CONFIG_XFRM) 3670#if IS_ENABLED(CONFIG_XFRM)
3653 !skb->sp && 3671 !skb->sp &&
3654#endif 3672#endif
3655#if IS_ENABLED(CONFIG_NF_CONNTRACK) 3673 !skb_nfct(skb) &&
3656 !skb->nfct &&
3657#endif
3658 !skb->_skb_refdst && 3674 !skb->_skb_refdst &&
3659 !skb_has_frag_list(skb); 3675 !skb_has_frag_list(skb);
3660} 3676}
@@ -3689,6 +3705,16 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3689 return skb->queue_mapping != 0; 3705 return skb->queue_mapping != 0;
3690} 3706}
3691 3707
3708static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
3709{
3710 skb->dst_pending_confirm = val;
3711}
3712
3713static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
3714{
3715 return skb->dst_pending_confirm != 0;
3716}
3717
3692static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 3718static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3693{ 3719{
3694#ifdef CONFIG_XFRM 3720#ifdef CONFIG_XFRM
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index 7b88697929e9..b8478ee7a71f 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -1,7 +1,7 @@
1#ifndef __QCOM_SMEM_STATE__ 1#ifndef __QCOM_SMEM_STATE__
2#define __QCOM_SMEM_STATE__ 2#define __QCOM_SMEM_STATE__
3 3
4#include <linux/errno.h> 4#include <linux/err.h>
5 5
6struct device_node; 6struct device_node;
7struct qcom_smem_state; 7struct qcom_smem_state;
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
index e2e9de1acc5b..e57eb4b6cc5a 100644
--- a/include/linux/soc/samsung/exynos-pmu.h
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -12,6 +12,8 @@
12#ifndef __LINUX_SOC_EXYNOS_PMU_H 12#ifndef __LINUX_SOC_EXYNOS_PMU_H
13#define __LINUX_SOC_EXYNOS_PMU_H 13#define __LINUX_SOC_EXYNOS_PMU_H
14 14
15struct regmap;
16
15enum sys_powerdown { 17enum sys_powerdown {
16 SYS_AFTR, 18 SYS_AFTR,
17 SYS_LPA, 19 SYS_LPA,
@@ -20,5 +22,13 @@ enum sys_powerdown {
20}; 22};
21 23
22extern void exynos_sys_powerdown_conf(enum sys_powerdown mode); 24extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
25#ifdef CONFIG_EXYNOS_PMU
26extern struct regmap *exynos_get_pmu_regmap(void);
27#else
28static inline struct regmap *exynos_get_pmu_regmap(void)
29{
30 return ERR_PTR(-ENODEV);
31}
32#endif
23 33
24#endif /* __LINUX_SOC_EXYNOS_PMU_H */ 34#endif /* __LINUX_SOC_EXYNOS_PMU_H */
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 35cb9264e0d5..2b7882666ef6 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -41,6 +41,8 @@
41#define KNAV_DMA_DESC_RETQ_SHIFT 0 41#define KNAV_DMA_DESC_RETQ_SHIFT 0
42#define KNAV_DMA_DESC_RETQ_MASK MASK(14) 42#define KNAV_DMA_DESC_RETQ_MASK MASK(14)
43#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22) 43#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22)
44#define KNAV_DMA_DESC_EFLAGS_MASK MASK(4)
45#define KNAV_DMA_DESC_EFLAGS_SHIFT 20
44 46
45#define KNAV_DMA_NUM_EPIB_WORDS 4 47#define KNAV_DMA_NUM_EPIB_WORDS 4
46#define KNAV_DMA_NUM_PS_WORDS 16 48#define KNAV_DMA_NUM_PS_WORDS 16
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b5cc5a6d7011..082027457825 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -92,9 +92,9 @@ struct cmsghdr {
92 92
93#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) ) 93#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) )
94 94
95#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr)))) 95#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr)))
96#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len)) 96#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len))
97#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len)) 97#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len))
98 98
99#define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \ 99#define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \
100 (struct cmsghdr *)(ctl) : \ 100 (struct cmsghdr *)(ctl) : \
@@ -202,8 +202,12 @@ struct ucred {
202#define AF_VSOCK 40 /* vSockets */ 202#define AF_VSOCK 40 /* vSockets */
203#define AF_KCM 41 /* Kernel Connection Multiplexor*/ 203#define AF_KCM 41 /* Kernel Connection Multiplexor*/
204#define AF_QIPCRTR 42 /* Qualcomm IPC Router */ 204#define AF_QIPCRTR 42 /* Qualcomm IPC Router */
205#define AF_SMC 43 /* smc sockets: reserve number for
206 * PF_SMC protocol family that
207 * reuses AF_INET address family
208 */
205 209
206#define AF_MAX 43 /* For now.. */ 210#define AF_MAX 44 /* For now.. */
207 211
208/* Protocol families, same as address families. */ 212/* Protocol families, same as address families. */
209#define PF_UNSPEC AF_UNSPEC 213#define PF_UNSPEC AF_UNSPEC
@@ -251,6 +255,7 @@ struct ucred {
251#define PF_VSOCK AF_VSOCK 255#define PF_VSOCK AF_VSOCK
252#define PF_KCM AF_KCM 256#define PF_KCM AF_KCM
253#define PF_QIPCRTR AF_QIPCRTR 257#define PF_QIPCRTR AF_QIPCRTR
258#define PF_SMC AF_SMC
254#define PF_MAX AF_MAX 259#define PF_MAX AF_MAX
255 260
256/* Maximum queue length specifiable by listen. */ 261/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 47dd0cebd204..59248dcc6ef3 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -180,8 +180,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
180#ifdef CONFIG_DEBUG_LOCK_ALLOC 180#ifdef CONFIG_DEBUG_LOCK_ALLOC
181# define raw_spin_lock_nested(lock, subclass) \ 181# define raw_spin_lock_nested(lock, subclass) \
182 _raw_spin_lock_nested(lock, subclass) 182 _raw_spin_lock_nested(lock, subclass)
183# define raw_spin_lock_bh_nested(lock, subclass) \
184 _raw_spin_lock_bh_nested(lock, subclass)
185 183
186# define raw_spin_lock_nest_lock(lock, nest_lock) \ 184# define raw_spin_lock_nest_lock(lock, nest_lock) \
187 do { \ 185 do { \
@@ -197,7 +195,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
197# define raw_spin_lock_nested(lock, subclass) \ 195# define raw_spin_lock_nested(lock, subclass) \
198 _raw_spin_lock(((void)(subclass), (lock))) 196 _raw_spin_lock(((void)(subclass), (lock)))
199# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 197# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
200# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
201#endif 198#endif
202 199
203#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 200#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -317,11 +314,6 @@ do { \
317 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 314 raw_spin_lock_nested(spinlock_check(lock), subclass); \
318} while (0) 315} while (0)
319 316
320#define spin_lock_bh_nested(lock, subclass) \
321do { \
322 raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
323} while (0)
324
325#define spin_lock_nest_lock(lock, nest_lock) \ 317#define spin_lock_nest_lock(lock, nest_lock) \
326do { \ 318do { \
327 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 319 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 5344268e6e62..42dfab89e740 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -22,8 +22,6 @@ int in_lock_functions(unsigned long addr);
22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock); 24 __acquires(lock);
25void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
26 __acquires(lock);
27void __lockfunc 25void __lockfunc
28_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 26_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
29 __acquires(lock); 27 __acquires(lock);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d3afef9d8dbe..d0d188861ad6 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -57,7 +57,6 @@
57 57
58#define _raw_spin_lock(lock) __LOCK(lock) 58#define _raw_spin_lock(lock) __LOCK(lock)
59#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) 59#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
60#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
61#define _raw_read_lock(lock) __LOCK(lock) 60#define _raw_read_lock(lock) __LOCK(lock)
62#define _raw_write_lock(lock) __LOCK(lock) 61#define _raw_write_lock(lock) __LOCK(lock)
63#define _raw_spin_lock_bh(lock) __LOCK_BH(lock) 62#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/sram.h b/include/linux/sram.h
new file mode 100644
index 000000000000..c97dcbe8ce25
--- /dev/null
+++ b/include/linux/sram.h
@@ -0,0 +1,27 @@
1/*
2 * Generic SRAM Driver Interface
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#ifndef __LINUX_SRAM_H__
14#define __LINUX_SRAM_H__
15
16struct gen_pool;
17
18#ifdef CONFIG_SRAM_EXEC
19int sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size);
20#else
21static inline int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
22 size_t size)
23{
24 return -ENODEV;
25}
26#endif /* CONFIG_SRAM_EXEC */
27#endif /* __LINUX_SRAM_H__ */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index dc8eb63c6568..a598cf3ac70c 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -33,9 +33,9 @@
33#include <linux/rcupdate.h> 33#include <linux/rcupdate.h>
34#include <linux/workqueue.h> 34#include <linux/workqueue.h>
35 35
36struct srcu_struct_array { 36struct srcu_array {
37 unsigned long c[2]; 37 unsigned long lock_count[2];
38 unsigned long seq[2]; 38 unsigned long unlock_count[2];
39}; 39};
40 40
41struct rcu_batch { 41struct rcu_batch {
@@ -46,7 +46,7 @@ struct rcu_batch {
46 46
47struct srcu_struct { 47struct srcu_struct {
48 unsigned long completed; 48 unsigned long completed;
49 struct srcu_struct_array __percpu *per_cpu_ref; 49 struct srcu_array __percpu *per_cpu_ref;
50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */ 50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */
51 bool running; 51 bool running;
52 /* callbacks just queued */ 52 /* callbacks just queued */
@@ -118,7 +118,7 @@ void process_srcu(struct work_struct *work);
118 * See include/linux/percpu-defs.h for the rules on per-CPU variables. 118 * See include/linux/percpu-defs.h for the rules on per-CPU variables.
119 */ 119 */
120#define __DEFINE_SRCU(name, is_static) \ 120#define __DEFINE_SRCU(name, is_static) \
121 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ 121 static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
122 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) 122 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
123#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) 123#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
124#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) 124#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 266dab9ad782..fc273e9d5f67 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -103,7 +103,6 @@ struct stmmac_axi {
103 u32 axi_wr_osr_lmt; 103 u32 axi_wr_osr_lmt;
104 u32 axi_rd_osr_lmt; 104 u32 axi_rd_osr_lmt;
105 bool axi_kbbe; 105 bool axi_kbbe;
106 bool axi_axi_all;
107 u32 axi_blen[AXI_BLEN]; 106 u32 axi_blen[AXI_BLEN];
108 bool axi_fb; 107 bool axi_fb;
109 bool axi_mb; 108 bool axi_mb;
@@ -135,13 +134,18 @@ struct plat_stmmacenet_data {
135 int tx_fifo_size; 134 int tx_fifo_size;
136 int rx_fifo_size; 135 int rx_fifo_size;
137 void (*fix_mac_speed)(void *priv, unsigned int speed); 136 void (*fix_mac_speed)(void *priv, unsigned int speed);
138 void (*bus_setup)(void __iomem *ioaddr);
139 int (*init)(struct platform_device *pdev, void *priv); 137 int (*init)(struct platform_device *pdev, void *priv);
140 void (*exit)(struct platform_device *pdev, void *priv); 138 void (*exit)(struct platform_device *pdev, void *priv);
141 void *bsp_priv; 139 void *bsp_priv;
140 struct clk *stmmac_clk;
141 struct clk *pclk;
142 struct clk *clk_ptp_ref;
143 unsigned int clk_ptp_rate;
144 struct reset_control *stmmac_rst;
142 struct stmmac_axi *axi; 145 struct stmmac_axi *axi;
143 int has_gmac4; 146 int has_gmac4;
144 bool tso_en; 147 bool tso_en;
145 int mac_port_sel_speed; 148 int mac_port_sel_speed;
149 bool en_tx_lpi_clockgating;
146}; 150};
147#endif 151#endif
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 62a60eeacb0a..8a511c0985aa 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -198,7 +198,7 @@ static inline struct cache_head *cache_get(struct cache_head *h)
198 198
199static inline void cache_put(struct cache_head *h, struct cache_detail *cd) 199static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
200{ 200{
201 if (atomic_read(&h->ref.refcount) <= 2 && 201 if (kref_read(&h->ref) <= 2 &&
202 h->expiry_time < cd->nextcheck) 202 h->expiry_time < cd->nextcheck)
203 cd->nextcheck = h->expiry_time; 203 cd->nextcheck = h->expiry_time;
204 kref_put(&h->ref, cd->cache_put); 204 kref_put(&h->ref, cd->cache_put);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 85cc819676e8..333ad11b3dd9 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
216void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); 216void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
217bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 217bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
218 const struct sockaddr *sap); 218 const struct sockaddr *sap);
219void rpc_cleanup_clids(void);
219#endif /* __KERNEL__ */ 220#endif /* __KERNEL__ */
220#endif /* _LINUX_SUNRPC_CLNT_H */ 221#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index e5d193440374..7440290f64ac 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -66,6 +66,7 @@ struct svc_xprt {
66#define XPT_LISTENER 10 /* listening endpoint */ 66#define XPT_LISTENER 10 /* listening endpoint */
67#define XPT_CACHE_AUTH 11 /* cache auth info */ 67#define XPT_CACHE_AUTH 11 /* cache auth info */
68#define XPT_LOCAL 12 /* connection from loopback interface */ 68#define XPT_LOCAL 12 /* connection from loopback interface */
69#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
69 70
70 struct svc_serv *xpt_server; /* service for transport */ 71 struct svc_serv *xpt_server; /* service for transport */
71 atomic_t xpt_reserved; /* space on outq that is rsvd */ 72 atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0c729c3c8549..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
194}; 194};
195 195
196#ifdef CONFIG_SUSPEND 196#ifdef CONFIG_SUSPEND
197extern suspend_state_t mem_sleep_default;
198
199/** 197/**
200 * suspend_set_ops - set platform dependent suspend operations 198 * suspend_set_ops - set platform dependent suspend operations
201 * @ops: The new suspend operations to set. 199 * @ops: The new suspend operations to set.
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fc5848dad7a4..cfc2d9506ce8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
62 62
63/* TCP Fast Open Cookie as stored in memory */ 63/* TCP Fast Open Cookie as stored in memory */
64struct tcp_fastopen_cookie { 64struct tcp_fastopen_cookie {
65 union {
66 u8 val[TCP_FASTOPEN_COOKIE_MAX];
67#if IS_ENABLED(CONFIG_IPV6)
68 struct in6_addr addr;
69#endif
70 };
65 s8 len; 71 s8 len;
66 u8 val[TCP_FASTOPEN_COOKIE_MAX];
67 bool exp; /* In RFC6994 experimental option format */ 72 bool exp; /* In RFC6994 experimental option format */
68}; 73};
69 74
@@ -207,6 +212,8 @@ struct tcp_sock {
207 /* Information of the most recently (s)acked skb */ 212 /* Information of the most recently (s)acked skb */
208 struct tcp_rack { 213 struct tcp_rack {
209 struct skb_mstamp mstamp; /* (Re)sent time of the skb */ 214 struct skb_mstamp mstamp; /* (Re)sent time of the skb */
215 u32 rtt_us; /* Associated RTT */
216 u32 end_seq; /* Ending TCP sequence of the skb */
210 u8 advanced; /* mstamp advanced since last lost marking */ 217 u8 advanced; /* mstamp advanced since last lost marking */
211 u8 reord; /* reordering detected */ 218 u8 reord; /* reordering detected */
212 } rack; 219 } rack;
@@ -215,15 +222,15 @@ struct tcp_sock {
215 u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ 222 u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
216 u8 chrono_type:2, /* current chronograph type */ 223 u8 chrono_type:2, /* current chronograph type */
217 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ 224 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
218 unused:5; 225 fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
226 unused:4;
219 u8 nonagle : 4,/* Disable Nagle algorithm? */ 227 u8 nonagle : 4,/* Disable Nagle algorithm? */
220 thin_lto : 1,/* Use linear timeouts for thin streams */ 228 thin_lto : 1,/* Use linear timeouts for thin streams */
221 thin_dupack : 1,/* Fast retransmit on first dupack */ 229 unused1 : 1,
222 repair : 1, 230 repair : 1,
223 frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ 231 frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
224 u8 repair_queue; 232 u8 repair_queue;
225 u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ 233 u8 syn_data:1, /* SYN includes data */
226 syn_data:1, /* SYN includes data */
227 syn_fastopen:1, /* SYN includes Fast Open option */ 234 syn_fastopen:1, /* SYN includes Fast Open option */
228 syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ 235 syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
229 syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ 236 syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
@@ -305,7 +312,6 @@ struct tcp_sock {
305 */ 312 */
306 313
307 int lost_cnt_hint; 314 int lost_cnt_hint;
308 u32 retransmit_high; /* L-bits may be on up to this seqno */
309 315
310 u32 prior_ssthresh; /* ssthresh saved at recovery start */ 316 u32 prior_ssthresh; /* ssthresh saved at recovery start */
311 u32 high_seq; /* snd_nxt at onset of congestion */ 317 u32 high_seq; /* snd_nxt at onset of congestion */
@@ -439,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
439 445
440struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); 446struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
441 447
448static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
449{
450 /* We use READ_ONCE() here because socket might not be locked.
451 * This happens for listeners.
452 */
453 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
454
455 return (user_mss && user_mss < mss) ? user_mss : mss;
456}
442#endif /* _LINUX_TCP_H */ 457#endif /* _LINUX_TCP_H */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 51d601f192d4..5a209b84fd9e 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -20,11 +20,6 @@ struct timer_list {
20 unsigned long data; 20 unsigned long data;
21 u32 flags; 21 u32 flags;
22 22
23#ifdef CONFIG_TIMER_STATS
24 int start_pid;
25 void *start_site;
26 char start_comm[16];
27#endif
28#ifdef CONFIG_LOCKDEP 23#ifdef CONFIG_LOCKDEP
29 struct lockdep_map lockdep_map; 24 struct lockdep_map lockdep_map;
30#endif 25#endif
@@ -197,46 +192,6 @@ extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
197 */ 192 */
198#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) 193#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
199 194
200/*
201 * Timer-statistics info:
202 */
203#ifdef CONFIG_TIMER_STATS
204
205extern int timer_stats_active;
206
207extern void init_timer_stats(void);
208
209extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
210 void *timerf, char *comm, u32 flags);
211
212extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
213 void *addr);
214
215static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
216{
217 if (likely(!timer_stats_active))
218 return;
219 __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
220}
221
222static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
223{
224 timer->start_site = NULL;
225}
226#else
227static inline void init_timer_stats(void)
228{
229}
230
231static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
232{
233}
234
235static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
236{
237}
238#endif
239
240extern void add_timer(struct timer_list *timer); 195extern void add_timer(struct timer_list *timer);
241 196
242extern int try_to_del_timer_sync(struct timer_list *timer); 197extern int try_to_del_timer_sync(struct timer_list *timer);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index be007610ceb0..0f165507495c 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -33,7 +33,8 @@ const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
33 unsigned int bitmask_size); 33 unsigned int bitmask_size);
34 34
35const char *trace_print_hex_seq(struct trace_seq *p, 35const char *trace_print_hex_seq(struct trace_seq *p,
36 const unsigned char *buf, int len); 36 const unsigned char *buf, int len,
37 bool concatenate);
37 38
38const char *trace_print_array_seq(struct trace_seq *p, 39const char *trace_print_array_seq(struct trace_seq *p,
39 const void *buf, int count, 40 const void *buf, int count,
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 5dd75fa47dd8..c5fdfcf99828 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -12,16 +12,18 @@ struct ci_hdrc;
12 12
13/** 13/**
14 * struct ci_hdrc_cable - structure for external connector cable state tracking 14 * struct ci_hdrc_cable - structure for external connector cable state tracking
15 * @state: current state of the line 15 * @connected: true if cable is connected, false otherwise
16 * @changed: set to true when extcon event happen 16 * @changed: set to true when extcon event happen
17 * @enabled: set to true if we've enabled the vbus or id interrupt
17 * @edev: device which generate events 18 * @edev: device which generate events
18 * @ci: driver state of the chipidea device 19 * @ci: driver state of the chipidea device
19 * @nb: hold event notification callback 20 * @nb: hold event notification callback
20 * @conn: used for notification registration 21 * @conn: used for notification registration
21 */ 22 */
22struct ci_hdrc_cable { 23struct ci_hdrc_cable {
23 bool state; 24 bool connected;
24 bool changed; 25 bool changed;
26 bool enabled;
25 struct extcon_dev *edev; 27 struct extcon_dev *edev;
26 struct ci_hdrc *ci; 28 struct ci_hdrc *ci;
27 struct notifier_block nb; 29 struct notifier_block nb;
@@ -55,10 +57,11 @@ struct ci_hdrc_platform_data {
55#define CI_HDRC_OVERRIDE_AHB_BURST BIT(9) 57#define CI_HDRC_OVERRIDE_AHB_BURST BIT(9)
56#define CI_HDRC_OVERRIDE_TX_BURST BIT(10) 58#define CI_HDRC_OVERRIDE_TX_BURST BIT(10)
57#define CI_HDRC_OVERRIDE_RX_BURST BIT(11) 59#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
60#define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */
58 enum usb_dr_mode dr_mode; 61 enum usb_dr_mode dr_mode;
59#define CI_HDRC_CONTROLLER_RESET_EVENT 0 62#define CI_HDRC_CONTROLLER_RESET_EVENT 0
60#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 63#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
61 void (*notify_event) (struct ci_hdrc *ci, unsigned event); 64 int (*notify_event) (struct ci_hdrc *ci, unsigned event);
62 struct regulator *reg_vbus; 65 struct regulator *reg_vbus;
63 struct usb_otg_caps ci_otg_caps; 66 struct usb_otg_caps ci_otg_caps;
64 bool tpl_support; 67 bool tpl_support;
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 2d095fc60204..4dff73a89758 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -19,6 +19,30 @@
19#include <uapi/linux/uuid.h> 19#include <uapi/linux/uuid.h>
20 20
21/* 21/*
22 * V1 (time-based) UUID definition [RFC 4122].
23 * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
24 * increments since midnight 15th October 1582
25 * - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
26 * time
27 * - the clock sequence is a 14-bit counter to avoid duplicate times
28 */
29struct uuid_v1 {
30 __be32 time_low; /* low part of timestamp */
31 __be16 time_mid; /* mid part of timestamp */
32 __be16 time_hi_and_version; /* high part of timestamp and version */
33#define UUID_TO_UNIX_TIME 0x01b21dd213814000ULL
34#define UUID_TIMEHI_MASK 0x0fff
35#define UUID_VERSION_TIME 0x1000 /* time-based UUID */
36#define UUID_VERSION_NAME 0x3000 /* name-based UUID */
37#define UUID_VERSION_RANDOM 0x4000 /* (pseudo-)random generated UUID */
38 u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */
39#define UUID_CLOCKHI_MASK 0x3f
40#define UUID_VARIANT_STD 0x80
41 u8 clock_seq_low; /* clock seq low */
42 u8 node[6]; /* spatially unique node ID (MAC addr) */
43};
44
45/*
22 * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") 46 * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
23 * not including trailing NUL. 47 * not including trailing NUL.
24 */ 48 */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d5eb5479a425..04b0d3f95043 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -132,12 +132,16 @@ static inline struct virtio_device *dev_to_virtio(struct device *_dev)
132 return container_of(_dev, struct virtio_device, dev); 132 return container_of(_dev, struct virtio_device, dev);
133} 133}
134 134
135void virtio_add_status(struct virtio_device *dev, unsigned int status);
135int register_virtio_device(struct virtio_device *dev); 136int register_virtio_device(struct virtio_device *dev);
136void unregister_virtio_device(struct virtio_device *dev); 137void unregister_virtio_device(struct virtio_device *dev);
137 138
138void virtio_break_device(struct virtio_device *dev); 139void virtio_break_device(struct virtio_device *dev);
139 140
140void virtio_config_changed(struct virtio_device *dev); 141void virtio_config_changed(struct virtio_device *dev);
142void virtio_config_disable(struct virtio_device *dev);
143void virtio_config_enable(struct virtio_device *dev);
144int virtio_finalize_features(struct virtio_device *dev);
141#ifdef CONFIG_PM_SLEEP 145#ifdef CONFIG_PM_SLEEP
142int virtio_device_freeze(struct virtio_device *dev); 146int virtio_device_freeze(struct virtio_device *dev);
143int virtio_device_restore(struct virtio_device *dev); 147int virtio_device_restore(struct virtio_device *dev);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 66204007d7ac..5209b5ed2a64 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
56 56
57static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, 57static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
58 struct virtio_net_hdr *hdr, 58 struct virtio_net_hdr *hdr,
59 bool little_endian) 59 bool little_endian,
60 bool has_data_valid)
60{ 61{
61 memset(hdr, 0, sizeof(*hdr)); /* no info leak */ 62 memset(hdr, 0, sizeof(*hdr)); /* no info leak */
62 63
@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
91 skb_checksum_start_offset(skb)); 92 skb_checksum_start_offset(skb));
92 hdr->csum_offset = __cpu_to_virtio16(little_endian, 93 hdr->csum_offset = __cpu_to_virtio16(little_endian,
93 skb->csum_offset); 94 skb->csum_offset);
94 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 95 } else if (has_data_valid &&
96 skb->ip_summed == CHECKSUM_UNNECESSARY) {
95 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 97 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
96 } /* else everything is zero */ 98 } /* else everything is zero */
97 99
diff --git a/include/linux/vme.h b/include/linux/vme.h
index 8c589176c2f8..ec5e8bf6118e 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -108,7 +108,6 @@ struct vme_dev {
108}; 108};
109 109
110struct vme_driver { 110struct vme_driver {
111 struct list_head node;
112 const char *name; 111 const char *name;
113 int (*match)(struct vme_dev *); 112 int (*match)(struct vme_dev *);
114 int (*probe)(struct vme_dev *); 113 int (*probe)(struct vme_dev *);
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 1bd31a38c51e..b724ef7005de 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -54,13 +54,6 @@
54#define VMCI_IMR_DATAGRAM 0x1 54#define VMCI_IMR_DATAGRAM 0x1
55#define VMCI_IMR_NOTIFICATION 0x2 55#define VMCI_IMR_NOTIFICATION 0x2
56 56
57/* Interrupt type. */
58enum {
59 VMCI_INTR_TYPE_INTX = 0,
60 VMCI_INTR_TYPE_MSI = 1,
61 VMCI_INTR_TYPE_MSIX = 2,
62};
63
64/* Maximum MSI/MSI-X interrupt vectors in the device. */ 57/* Maximum MSI/MSI-X interrupt vectors in the device. */
65#define VMCI_MAX_INTRS 2 58#define VMCI_MAX_INTRS 2
66 59
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index aa9bfea8804a..0681fe25abeb 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -58,27 +58,28 @@ static inline void vtime_task_switch(struct task_struct *prev)
58 58
59extern void vtime_account_system(struct task_struct *tsk); 59extern void vtime_account_system(struct task_struct *tsk);
60extern void vtime_account_idle(struct task_struct *tsk); 60extern void vtime_account_idle(struct task_struct *tsk);
61extern void vtime_account_user(struct task_struct *tsk);
62 61
63#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 62#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
64 63
65static inline void vtime_task_switch(struct task_struct *prev) { } 64static inline void vtime_task_switch(struct task_struct *prev) { }
66static inline void vtime_account_system(struct task_struct *tsk) { } 65static inline void vtime_account_system(struct task_struct *tsk) { }
67static inline void vtime_account_user(struct task_struct *tsk) { }
68#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 66#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
69 67
70#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 68#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
71extern void arch_vtime_task_switch(struct task_struct *tsk); 69extern void arch_vtime_task_switch(struct task_struct *tsk);
70extern void vtime_account_user(struct task_struct *tsk);
72extern void vtime_user_enter(struct task_struct *tsk); 71extern void vtime_user_enter(struct task_struct *tsk);
73 72
74static inline void vtime_user_exit(struct task_struct *tsk) 73static inline void vtime_user_exit(struct task_struct *tsk)
75{ 74{
76 vtime_account_user(tsk); 75 vtime_account_user(tsk);
77} 76}
77
78extern void vtime_guest_enter(struct task_struct *tsk); 78extern void vtime_guest_enter(struct task_struct *tsk);
79extern void vtime_guest_exit(struct task_struct *tsk); 79extern void vtime_guest_exit(struct task_struct *tsk);
80extern void vtime_init_idle(struct task_struct *tsk, int cpu); 80extern void vtime_init_idle(struct task_struct *tsk, int cpu);
81#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 81#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
82static inline void vtime_account_user(struct task_struct *tsk) { }
82static inline void vtime_user_enter(struct task_struct *tsk) { } 83static inline void vtime_user_enter(struct task_struct *tsk) { }
83static inline void vtime_user_exit(struct task_struct *tsk) { } 84static inline void vtime_user_exit(struct task_struct *tsk) { }
84static inline void vtime_guest_enter(struct task_struct *tsk) { } 85static inline void vtime_guest_enter(struct task_struct *tsk) { }
@@ -93,9 +94,11 @@ static inline void vtime_account_irq_exit(struct task_struct *tsk)
93 /* On hard|softirq exit we always account to hard|softirq cputime */ 94 /* On hard|softirq exit we always account to hard|softirq cputime */
94 vtime_account_system(tsk); 95 vtime_account_system(tsk);
95} 96}
97extern void vtime_flush(struct task_struct *tsk);
96#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 98#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
97static inline void vtime_account_irq_enter(struct task_struct *tsk) { } 99static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
98static inline void vtime_account_irq_exit(struct task_struct *tsk) { } 100static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
101static inline void vtime_flush(struct task_struct *tsk) { }
99#endif 102#endif
100 103
101 104
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 7b0066814fa0..5dd9a7682227 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -51,10 +51,10 @@ struct ww_mutex {
51}; 51};
52 52
53#ifdef CONFIG_DEBUG_LOCK_ALLOC 53#ifdef CONFIG_DEBUG_LOCK_ALLOC
54# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ 54# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
55 , .ww_class = &ww_class 55 , .ww_class = class
56#else 56#else
57# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) 57# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
58#endif 58#endif
59 59
60#define __WW_CLASS_INITIALIZER(ww_class) \ 60#define __WW_CLASS_INITIALIZER(ww_class) \
@@ -63,7 +63,7 @@ struct ww_mutex {
63 , .mutex_name = #ww_class "_mutex" } 63 , .mutex_name = #ww_class "_mutex" }
64 64
65#define __WW_MUTEX_INITIALIZER(lockname, class) \ 65#define __WW_MUTEX_INITIALIZER(lockname, class) \
66 { .base = { \__MUTEX_INITIALIZER(lockname) } \ 66 { .base = __MUTEX_INITIALIZER(lockname.base) \
67 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } 67 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
68 68
69#define DEFINE_WW_CLASS(classname) \ 69#define DEFINE_WW_CLASS(classname) \
@@ -186,11 +186,6 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
186#endif 186#endif
187} 187}
188 188
189extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
190 struct ww_acquire_ctx *ctx);
191extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
192 struct ww_acquire_ctx *ctx);
193
194/** 189/**
195 * ww_mutex_lock - acquire the w/w mutex 190 * ww_mutex_lock - acquire the w/w mutex
196 * @lock: the mutex to be acquired 191 * @lock: the mutex to be acquired
@@ -220,14 +215,7 @@ extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
220 * 215 *
221 * A mutex acquired with this function must be released with ww_mutex_unlock. 216 * A mutex acquired with this function must be released with ww_mutex_unlock.
222 */ 217 */
223static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 218extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx);
224{
225 if (ctx)
226 return __ww_mutex_lock(lock, ctx);
227
228 mutex_lock(&lock->base);
229 return 0;
230}
231 219
232/** 220/**
233 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible 221 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
@@ -259,14 +247,8 @@ static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ct
259 * 247 *
260 * A mutex acquired with this function must be released with ww_mutex_unlock. 248 * A mutex acquired with this function must be released with ww_mutex_unlock.
261 */ 249 */
262static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, 250extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
263 struct ww_acquire_ctx *ctx) 251 struct ww_acquire_ctx *ctx);
264{
265 if (ctx)
266 return __ww_mutex_lock_interruptible(lock, ctx);
267 else
268 return mutex_lock_interruptible(&lock->base);
269}
270 252
271/** 253/**
272 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex 254 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex