aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/video.h11
-rw-r--r--include/dt-bindings/clock/vf610-clock.h4
-rw-r--r--include/dt-bindings/pinctrl/am33xx.h2
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--include/linux/cgroup_subsys.h45
-rw-r--r--include/linux/crc-t10dif.h4
-rw-r--r--include/linux/drbd.h6
-rw-r--r--include/linux/drbd_genl.h2
-rw-r--r--include/linux/drbd_limits.h9
-rw-r--r--include/linux/edac.h7
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h137
-rw-r--r--include/linux/platform_data/mmc-pxamci.h2
-rw-r--r--include/linux/shdma-base.h4
-rw-r--r--include/linux/usb.h11
-rw-r--r--include/trace/events/bcache.h381
-rw-r--r--include/trace/ftrace.h4
-rw-r--r--include/uapi/linux/usb/ch11.h11
-rw-r--r--include/xen/interface/io/blkif.h53
-rw-r--r--include/xen/interface/io/ring.h5
20 files changed, 473 insertions, 229 deletions
diff --git a/include/acpi/video.h b/include/acpi/video.h
index b26dc4fb7ba8..61109f2609fc 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -17,21 +17,12 @@ struct acpi_device;
17#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200 17#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
18 18
19#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) 19#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
20extern int __acpi_video_register(bool backlight_quirks); 20extern int acpi_video_register(void);
21static inline int acpi_video_register(void)
22{
23 return __acpi_video_register(false);
24}
25static inline int acpi_video_register_with_quirks(void)
26{
27 return __acpi_video_register(true);
28}
29extern void acpi_video_unregister(void); 21extern void acpi_video_unregister(void);
30extern int acpi_video_get_edid(struct acpi_device *device, int type, 22extern int acpi_video_get_edid(struct acpi_device *device, int type,
31 int device_id, void **edid); 23 int device_id, void **edid);
32#else 24#else
33static inline int acpi_video_register(void) { return 0; } 25static inline int acpi_video_register(void) { return 0; }
34static inline int acpi_video_register_with_quirks(void) { return 0; }
35static inline void acpi_video_unregister(void) { return; } 26static inline void acpi_video_unregister(void) { return; }
36static inline int acpi_video_get_edid(struct acpi_device *device, int type, 27static inline int acpi_video_get_edid(struct acpi_device *device, int type,
37 int device_id, void **edid) 28 int device_id, void **edid)
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 15e997fa78f2..4aa2b48cd151 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -158,6 +158,8 @@
158#define VF610_CLK_GPU_SEL 145 158#define VF610_CLK_GPU_SEL 145
159#define VF610_CLK_GPU_EN 146 159#define VF610_CLK_GPU_EN 146
160#define VF610_CLK_GPU2D 147 160#define VF610_CLK_GPU2D 147
161#define VF610_CLK_END 148 161#define VF610_CLK_ENET0 148
162#define VF610_CLK_ENET1 149
163#define VF610_CLK_END 150
162 164
163#endif /* __DT_BINDINGS_CLOCK_VF610_H */ 165#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h
index 469e0325e6f4..2fbc804e1a45 100644
--- a/include/dt-bindings/pinctrl/am33xx.h
+++ b/include/dt-bindings/pinctrl/am33xx.h
@@ -5,7 +5,7 @@
5#ifndef _DT_BINDINGS_PINCTRL_AM33XX_H 5#ifndef _DT_BINDINGS_PINCTRL_AM33XX_H
6#define _DT_BINDINGS_PINCTRL_AM33XX_H 6#define _DT_BINDINGS_PINCTRL_AM33XX_H
7 7
8#include <include/dt-bindings/pinctrl/omap.h> 8#include <dt-bindings/pinctrl/omap.h>
9 9
10/* am33xx specific mux bit defines */ 10/* am33xx specific mux bit defines */
11#undef PULL_ENA 11#undef PULL_ENA
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6ad72f92469c..353ba256f368 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -191,7 +191,6 @@ extern bool wmi_has_guid(const char *guid);
191#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 191#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200
192#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 192#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
193#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 193#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
194#define ACPI_VIDEO_SKIP_BACKLIGHT 0x1000
195 194
196#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) 195#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
197 196
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 297462b9f41a..e9ac882868c0 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -542,8 +542,7 @@ int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
542bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); 542bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
543 543
544int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); 544int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
545int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id, 545int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
546 char *buf, size_t buflen);
547 546
548int cgroup_task_count(const struct cgroup *cgrp); 547int cgroup_task_count(const struct cgroup *cgrp);
549 548
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 6e7ec64b69ab..b613ffd402d1 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -1,86 +1,55 @@
1/* Add subsystem definitions of the form SUBSYS(<name>) in this 1/*
2 * file. Surround each one by a line of comment markers so that 2 * List of cgroup subsystems.
3 * patches don't collide 3 *
4 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
4 */ 5 */
5
6/* */
7
8/* */
9
10#if IS_SUBSYS_ENABLED(CONFIG_CPUSETS) 6#if IS_SUBSYS_ENABLED(CONFIG_CPUSETS)
11SUBSYS(cpuset) 7SUBSYS(cpuset)
12#endif 8#endif
13 9
14/* */
15
16#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG) 10#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG)
17SUBSYS(debug) 11SUBSYS(debug)
18#endif 12#endif
19 13
20/* */
21
22#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED) 14#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED)
23SUBSYS(cpu_cgroup) 15SUBSYS(cpu_cgroup)
24#endif 16#endif
25 17
26/* */
27
28#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT) 18#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT)
29SUBSYS(cpuacct) 19SUBSYS(cpuacct)
30#endif 20#endif
31 21
32/* */
33
34#if IS_SUBSYS_ENABLED(CONFIG_MEMCG) 22#if IS_SUBSYS_ENABLED(CONFIG_MEMCG)
35SUBSYS(mem_cgroup) 23SUBSYS(mem_cgroup)
36#endif 24#endif
37 25
38/* */
39
40#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE) 26#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE)
41SUBSYS(devices) 27SUBSYS(devices)
42#endif 28#endif
43 29
44/* */
45
46#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER) 30#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER)
47SUBSYS(freezer) 31SUBSYS(freezer)
48#endif 32#endif
49 33
50/* */
51
52#if IS_SUBSYS_ENABLED(CONFIG_NET_CLS_CGROUP) 34#if IS_SUBSYS_ENABLED(CONFIG_NET_CLS_CGROUP)
53SUBSYS(net_cls) 35SUBSYS(net_cls)
54#endif 36#endif
55 37
56/* */
57
58#if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP) 38#if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP)
59SUBSYS(blkio) 39SUBSYS(blkio)
60#endif 40#endif
61 41
62/* */
63
64#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF) 42#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF)
65SUBSYS(perf) 43SUBSYS(perf)
66#endif 44#endif
67 45
68/* */
69
70#if IS_SUBSYS_ENABLED(CONFIG_NETPRIO_CGROUP) 46#if IS_SUBSYS_ENABLED(CONFIG_NETPRIO_CGROUP)
71SUBSYS(net_prio) 47SUBSYS(net_prio)
72#endif 48#endif
73 49
74/* */
75
76#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB) 50#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB)
77SUBSYS(hugetlb) 51SUBSYS(hugetlb)
78#endif 52#endif
79 53/*
80/* */ 54 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
81 55 */
82#ifdef CONFIG_CGROUP_BCACHE
83SUBSYS(bcache)
84#endif
85
86/* */
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index b3cb71f0d3b0..a9c96d865ee7 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -3,10 +3,6 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6#define CRC_T10DIF_DIGEST_SIZE 2
7#define CRC_T10DIF_BLOCK_SIZE 1
8
9__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len);
10__u16 crc_t10dif(unsigned char const *, size_t); 6__u16 crc_t10dif(unsigned char const *, size_t);
11 7
12#endif 8#endif
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 1b4d4ee1168f..de7d74ab3de6 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -177,7 +177,11 @@ enum drbd_ret_code {
177 ERR_NEED_APV_100 = 163, 177 ERR_NEED_APV_100 = 163,
178 ERR_NEED_ALLOW_TWO_PRI = 164, 178 ERR_NEED_ALLOW_TWO_PRI = 164,
179 ERR_MD_UNCLEAN = 165, 179 ERR_MD_UNCLEAN = 165,
180 180 ERR_MD_LAYOUT_CONNECTED = 166,
181 ERR_MD_LAYOUT_TOO_BIG = 167,
182 ERR_MD_LAYOUT_TOO_SMALL = 168,
183 ERR_MD_LAYOUT_NO_FIT = 169,
184 ERR_IMPLICIT_SHRINK = 170,
181 /* insert new ones above this line */ 185 /* insert new ones above this line */
182 AFTER_LAST_ERR_CODE 186 AFTER_LAST_ERR_CODE
183}; 187};
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index d0d8fac8a6e4..e8c44572b8cb 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -181,6 +181,8 @@ GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms,
181 __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size) 181 __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size)
182 __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force) 182 __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force)
183 __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync) 183 __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync)
184 __u32_field_def(4, 0 /* OPTIONAL */, al_stripes, DRBD_AL_STRIPES_DEF)
185 __u32_field_def(5, 0 /* OPTIONAL */, al_stripe_size, DRBD_AL_STRIPE_SIZE_DEF)
184) 186)
185 187
186GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info, 188GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 1fedf2b17cc8..17e50bb00521 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -215,4 +215,13 @@
215#define DRBD_ALWAYS_ASBP_DEF 0 215#define DRBD_ALWAYS_ASBP_DEF 0
216#define DRBD_USE_RLE_DEF 1 216#define DRBD_USE_RLE_DEF 1
217 217
218#define DRBD_AL_STRIPES_MIN 1
219#define DRBD_AL_STRIPES_MAX 1024
220#define DRBD_AL_STRIPES_DEF 1
221#define DRBD_AL_STRIPES_SCALE '1'
222
223#define DRBD_AL_STRIPE_SIZE_MIN 4
224#define DRBD_AL_STRIPE_SIZE_MAX 16777216
225#define DRBD_AL_STRIPE_SIZE_DEF 32
226#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
218#endif 227#endif
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 0b763276f619..5c6d7fbaf89e 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -622,7 +622,7 @@ struct edac_raw_error_desc {
622 */ 622 */
623struct mem_ctl_info { 623struct mem_ctl_info {
624 struct device dev; 624 struct device dev;
625 struct bus_type bus; 625 struct bus_type *bus;
626 626
627 struct list_head link; /* for global list of mem_ctl_info structs */ 627 struct list_head link; /* for global list of mem_ctl_info structs */
628 628
@@ -742,4 +742,9 @@ struct mem_ctl_info {
742#endif 742#endif
743}; 743};
744 744
745/*
746 * Maximum number of memory controllers in the coherent fabric.
747 */
748#define EDAC_MAX_MCS 16
749
745#endif 750#endif
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index dab34a1deb2c..b6bdcd66c07d 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -103,15 +103,15 @@
103#define IMX6Q_GPR1_EXC_MON_MASK BIT(22) 103#define IMX6Q_GPR1_EXC_MON_MASK BIT(22)
104#define IMX6Q_GPR1_EXC_MON_OKAY 0x0 104#define IMX6Q_GPR1_EXC_MON_OKAY 0x0
105#define IMX6Q_GPR1_EXC_MON_SLVE BIT(22) 105#define IMX6Q_GPR1_EXC_MON_SLVE BIT(22)
106#define IMX6Q_GPR1_MIPI_IPU2_SEL_MASK BIT(21) 106#define IMX6Q_GPR1_ENET_CLK_SEL_MASK BIT(21)
107#define IMX6Q_GPR1_MIPI_IPU2_SEL_GASKET 0x0 107#define IMX6Q_GPR1_ENET_CLK_SEL_PAD 0
108#define IMX6Q_GPR1_MIPI_IPU2_SEL_IOMUX BIT(21) 108#define IMX6Q_GPR1_ENET_CLK_SEL_ANATOP BIT(21)
109#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(20) 109#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(20)
110#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0
111#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(20)
112#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(19)
113#define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET 0x0 110#define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET 0x0
114#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(19) 111#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(20)
112#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(19)
113#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0
114#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(19)
115#define IMX6Q_GPR1_PCIE_TEST_PD BIT(18) 115#define IMX6Q_GPR1_PCIE_TEST_PD BIT(18)
116#define IMX6Q_GPR1_IPU_VPU_MUX_MASK BIT(17) 116#define IMX6Q_GPR1_IPU_VPU_MUX_MASK BIT(17)
117#define IMX6Q_GPR1_IPU_VPU_MUX_IPU1 0x0 117#define IMX6Q_GPR1_IPU_VPU_MUX_IPU1 0x0
@@ -279,41 +279,88 @@
279#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29) 279#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29)
280#define IMX6Q_GPR13_CAN1_STOP_REQ BIT(28) 280#define IMX6Q_GPR13_CAN1_STOP_REQ BIT(28)
281#define IMX6Q_GPR13_ENET_STOP_REQ BIT(27) 281#define IMX6Q_GPR13_ENET_STOP_REQ BIT(27)
282#define IMX6Q_GPR13_SATA_PHY_8_MASK (0x7 << 24) 282#define IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK (0x7 << 24)
283#define IMX6Q_GPR13_SATA_PHY_8_0_5_DB (0x0 << 24) 283#define IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB (0x0 << 24)
284#define IMX6Q_GPR13_SATA_PHY_8_1_0_DB (0x1 << 24) 284#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB (0x1 << 24)
285#define IMX6Q_GPR13_SATA_PHY_8_1_5_DB (0x2 << 24) 285#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB (0x2 << 24)
286#define IMX6Q_GPR13_SATA_PHY_8_2_0_DB (0x3 << 24) 286#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB (0x3 << 24)
287#define IMX6Q_GPR13_SATA_PHY_8_2_5_DB (0x4 << 24) 287#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB (0x4 << 24)
288#define IMX6Q_GPR13_SATA_PHY_8_3_0_DB (0x5 << 24) 288#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB (0x5 << 24)
289#define IMX6Q_GPR13_SATA_PHY_8_3_5_DB (0x6 << 24) 289#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB (0x6 << 24)
290#define IMX6Q_GPR13_SATA_PHY_8_4_0_DB (0x7 << 24) 290#define IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB (0x7 << 24)
291#define IMX6Q_GPR13_SATA_PHY_7_MASK (0x1f << 19) 291#define IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK (0x1f << 19)
292#define IMX6Q_GPR13_SATA_PHY_7_SATA1I (0x10 << 19) 292#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1I (0x10 << 19)
293#define IMX6Q_GPR13_SATA_PHY_7_SATA1M (0x10 << 19) 293#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1M (0x10 << 19)
294#define IMX6Q_GPR13_SATA_PHY_7_SATA1X (0x1a << 19) 294#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1X (0x1a << 19)
295#define IMX6Q_GPR13_SATA_PHY_7_SATA2I (0x12 << 19) 295#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2I (0x12 << 19)
296#define IMX6Q_GPR13_SATA_PHY_7_SATA2M (0x12 << 19) 296#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M (0x12 << 19)
297#define IMX6Q_GPR13_SATA_PHY_7_SATA2X (0x1a << 19) 297#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2X (0x1a << 19)
298#define IMX6Q_GPR13_SATA_PHY_6_MASK (0x7 << 16) 298#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK (0x7 << 16)
299#define IMX6Q_GPR13_SATA_SPEED_MASK BIT(15) 299#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_1F (0x0 << 16)
300#define IMX6Q_GPR13_SATA_SPEED_1P5G 0x0 300#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_2F (0x1 << 16)
301#define IMX6Q_GPR13_SATA_SPEED_3P0G BIT(15) 301#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_4F (0x2 << 16)
302#define IMX6Q_GPR13_SATA_PHY_5 BIT(14) 302#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F (0x3 << 16)
303#define IMX6Q_GPR13_SATA_PHY_4_MASK (0x7 << 11) 303#define IMX6Q_GPR13_SATA_SPD_MODE_MASK BIT(15)
304#define IMX6Q_GPR13_SATA_PHY_4_16_16 (0x0 << 11) 304#define IMX6Q_GPR13_SATA_SPD_MODE_1P5G 0x0
305#define IMX6Q_GPR13_SATA_PHY_4_14_16 (0x1 << 11) 305#define IMX6Q_GPR13_SATA_SPD_MODE_3P0G BIT(15)
306#define IMX6Q_GPR13_SATA_PHY_4_12_16 (0x2 << 11) 306#define IMX6Q_GPR13_SATA_MPLL_SS_EN BIT(14)
307#define IMX6Q_GPR13_SATA_PHY_4_10_16 (0x3 << 11) 307#define IMX6Q_GPR13_SATA_TX_ATTEN_MASK (0x7 << 11)
308#define IMX6Q_GPR13_SATA_PHY_4_9_16 (0x4 << 11) 308#define IMX6Q_GPR13_SATA_TX_ATTEN_16_16 (0x0 << 11)
309#define IMX6Q_GPR13_SATA_PHY_4_8_16 (0x5 << 11) 309#define IMX6Q_GPR13_SATA_TX_ATTEN_14_16 (0x1 << 11)
310#define IMX6Q_GPR13_SATA_PHY_3_MASK (0xf << 7) 310#define IMX6Q_GPR13_SATA_TX_ATTEN_12_16 (0x2 << 11)
311#define IMX6Q_GPR13_SATA_PHY_3_OFF 0x7 311#define IMX6Q_GPR13_SATA_TX_ATTEN_10_16 (0x3 << 11)
312#define IMX6Q_GPR13_SATA_PHY_2_MASK (0x1f << 2) 312#define IMX6Q_GPR13_SATA_TX_ATTEN_9_16 (0x4 << 11)
313#define IMX6Q_GPR13_SATA_PHY_2_OFF 0x2 313#define IMX6Q_GPR13_SATA_TX_ATTEN_8_16 (0x5 << 11)
314#define IMX6Q_GPR13_SATA_PHY_1_MASK (0x3 << 0) 314#define IMX6Q_GPR13_SATA_TX_BOOST_MASK (0xf << 7)
315#define IMX6Q_GPR13_SATA_PHY_1_FAST (0x0 << 0) 315#define IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB (0x0 << 7)
316#define IMX6Q_GPR13_SATA_PHY_1_MED (0x1 << 0) 316#define IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB (0x1 << 7)
317#define IMX6Q_GPR13_SATA_PHY_1_SLOW (0x2 << 0) 317#define IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB (0x2 << 7)
318 318#define IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB (0x3 << 7)
319#define IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB (0x4 << 7)
320#define IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB (0x5 << 7)
321#define IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB (0x6 << 7)
322#define IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB (0x7 << 7)
323#define IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB (0x8 << 7)
324#define IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB (0x9 << 7)
325#define IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB (0xa << 7)
326#define IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB (0xb << 7)
327#define IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB (0xc << 7)
328#define IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB (0xd << 7)
329#define IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB (0xe << 7)
330#define IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB (0xf << 7)
331#define IMX6Q_GPR13_SATA_TX_LVL_MASK (0x1f << 2)
332#define IMX6Q_GPR13_SATA_TX_LVL_0_937_V (0x00 << 2)
333#define IMX6Q_GPR13_SATA_TX_LVL_0_947_V (0x01 << 2)
334#define IMX6Q_GPR13_SATA_TX_LVL_0_957_V (0x02 << 2)
335#define IMX6Q_GPR13_SATA_TX_LVL_0_966_V (0x03 << 2)
336#define IMX6Q_GPR13_SATA_TX_LVL_0_976_V (0x04 << 2)
337#define IMX6Q_GPR13_SATA_TX_LVL_0_986_V (0x05 << 2)
338#define IMX6Q_GPR13_SATA_TX_LVL_0_996_V (0x06 << 2)
339#define IMX6Q_GPR13_SATA_TX_LVL_1_005_V (0x07 << 2)
340#define IMX6Q_GPR13_SATA_TX_LVL_1_015_V (0x08 << 2)
341#define IMX6Q_GPR13_SATA_TX_LVL_1_025_V (0x09 << 2)
342#define IMX6Q_GPR13_SATA_TX_LVL_1_035_V (0x0a << 2)
343#define IMX6Q_GPR13_SATA_TX_LVL_1_045_V (0x0b << 2)
344#define IMX6Q_GPR13_SATA_TX_LVL_1_054_V (0x0c << 2)
345#define IMX6Q_GPR13_SATA_TX_LVL_1_064_V (0x0d << 2)
346#define IMX6Q_GPR13_SATA_TX_LVL_1_074_V (0x0e << 2)
347#define IMX6Q_GPR13_SATA_TX_LVL_1_084_V (0x0f << 2)
348#define IMX6Q_GPR13_SATA_TX_LVL_1_094_V (0x10 << 2)
349#define IMX6Q_GPR13_SATA_TX_LVL_1_104_V (0x11 << 2)
350#define IMX6Q_GPR13_SATA_TX_LVL_1_113_V (0x12 << 2)
351#define IMX6Q_GPR13_SATA_TX_LVL_1_123_V (0x13 << 2)
352#define IMX6Q_GPR13_SATA_TX_LVL_1_133_V (0x14 << 2)
353#define IMX6Q_GPR13_SATA_TX_LVL_1_143_V (0x15 << 2)
354#define IMX6Q_GPR13_SATA_TX_LVL_1_152_V (0x16 << 2)
355#define IMX6Q_GPR13_SATA_TX_LVL_1_162_V (0x17 << 2)
356#define IMX6Q_GPR13_SATA_TX_LVL_1_172_V (0x18 << 2)
357#define IMX6Q_GPR13_SATA_TX_LVL_1_182_V (0x19 << 2)
358#define IMX6Q_GPR13_SATA_TX_LVL_1_191_V (0x1a << 2)
359#define IMX6Q_GPR13_SATA_TX_LVL_1_201_V (0x1b << 2)
360#define IMX6Q_GPR13_SATA_TX_LVL_1_211_V (0x1c << 2)
361#define IMX6Q_GPR13_SATA_TX_LVL_1_221_V (0x1d << 2)
362#define IMX6Q_GPR13_SATA_TX_LVL_1_230_V (0x1e << 2)
363#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2)
364#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1)
365#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0)
319#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ 366#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
index 9eb515bb799d..1706b3597ce0 100644
--- a/include/linux/platform_data/mmc-pxamci.h
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -12,7 +12,7 @@ struct pxamci_platform_data {
12 unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */ 12 unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */
13 int (*init)(struct device *, irq_handler_t , void *); 13 int (*init)(struct device *, irq_handler_t , void *);
14 int (*get_ro)(struct device *); 14 int (*get_ro)(struct device *);
15 void (*setpower)(struct device *, unsigned int); 15 int (*setpower)(struct device *, unsigned int);
16 void (*exit)(struct device *, void *); 16 void (*exit)(struct device *, void *);
17 int gpio_card_detect; /* gpio detecting card insertion */ 17 int gpio_card_detect; /* gpio detecting card insertion */
18 int gpio_card_ro; /* gpio detecting read only toggle */ 18 int gpio_card_ro; /* gpio detecting read only toggle */
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 382cf710ca9a..5b1c9848124c 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -124,6 +124,10 @@ void shdma_chan_remove(struct shdma_chan *schan);
124int shdma_init(struct device *dev, struct shdma_dev *sdev, 124int shdma_init(struct device *dev, struct shdma_dev *sdev,
125 int chan_num); 125 int chan_num);
126void shdma_cleanup(struct shdma_dev *sdev); 126void shdma_cleanup(struct shdma_dev *sdev);
127#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
127bool shdma_chan_filter(struct dma_chan *chan, void *arg); 128bool shdma_chan_filter(struct dma_chan *chan, void *arg);
129#else
130#define shdma_chan_filter NULL
131#endif
128 132
129#endif 133#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a232b7ece1f6..0eec2689b955 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -367,17 +367,6 @@ struct usb_bus {
367 367
368/* ----------------------------------------------------------------------- */ 368/* ----------------------------------------------------------------------- */
369 369
370/* This is arbitrary.
371 * From USB 2.0 spec Table 11-13, offset 7, a hub can
372 * have up to 255 ports. The most yet reported is 10.
373 *
374 * Current Wireless USB host hardware (Intel i1480 for example) allows
375 * up to 22 devices to connect. Upcoming hardware might raise that
376 * limit. Because the arrays need to add a bit for hub status data, we
377 * do 31, so plus one evens out to four bytes.
378 */
379#define USB_MAXCHILDREN (31)
380
381struct usb_tt; 370struct usb_tt;
382 371
383enum usb_device_removable { 372enum usb_device_removable {
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 3cc5a0b278c3..5ebda976ea93 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -9,9 +9,7 @@
9struct search; 9struct search;
10 10
11DECLARE_EVENT_CLASS(bcache_request, 11DECLARE_EVENT_CLASS(bcache_request,
12
13 TP_PROTO(struct search *s, struct bio *bio), 12 TP_PROTO(struct search *s, struct bio *bio),
14
15 TP_ARGS(s, bio), 13 TP_ARGS(s, bio),
16 14
17 TP_STRUCT__entry( 15 TP_STRUCT__entry(
@@ -22,7 +20,6 @@ DECLARE_EVENT_CLASS(bcache_request,
22 __field(dev_t, orig_sector ) 20 __field(dev_t, orig_sector )
23 __field(unsigned int, nr_sector ) 21 __field(unsigned int, nr_sector )
24 __array(char, rwbs, 6 ) 22 __array(char, rwbs, 6 )
25 __array(char, comm, TASK_COMM_LEN )
26 ), 23 ),
27 24
28 TP_fast_assign( 25 TP_fast_assign(
@@ -33,36 +30,66 @@ DECLARE_EVENT_CLASS(bcache_request,
33 __entry->orig_sector = bio->bi_sector - 16; 30 __entry->orig_sector = bio->bi_sector - 16;
34 __entry->nr_sector = bio->bi_size >> 9; 31 __entry->nr_sector = bio->bi_size >> 9;
35 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 32 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
36 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
37 ), 33 ),
38 34
39 TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d @ %llu)", 35 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
40 MAJOR(__entry->dev), MINOR(__entry->dev), 36 MAJOR(__entry->dev), MINOR(__entry->dev),
41 __entry->rwbs, 37 __entry->rwbs, (unsigned long long)__entry->sector,
42 (unsigned long long)__entry->sector, 38 __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
43 __entry->nr_sector, __entry->comm,
44 __entry->orig_major, __entry->orig_minor,
45 (unsigned long long)__entry->orig_sector) 39 (unsigned long long)__entry->orig_sector)
46); 40);
47 41
48DEFINE_EVENT(bcache_request, bcache_request_start, 42DECLARE_EVENT_CLASS(bkey,
43 TP_PROTO(struct bkey *k),
44 TP_ARGS(k),
49 45
50 TP_PROTO(struct search *s, struct bio *bio), 46 TP_STRUCT__entry(
47 __field(u32, size )
48 __field(u32, inode )
49 __field(u64, offset )
50 __field(bool, dirty )
51 ),
51 52
52 TP_ARGS(s, bio) 53 TP_fast_assign(
54 __entry->inode = KEY_INODE(k);
55 __entry->offset = KEY_OFFSET(k);
56 __entry->size = KEY_SIZE(k);
57 __entry->dirty = KEY_DIRTY(k);
58 ),
59
60 TP_printk("%u:%llu len %u dirty %u", __entry->inode,
61 __entry->offset, __entry->size, __entry->dirty)
53); 62);
54 63
55DEFINE_EVENT(bcache_request, bcache_request_end, 64DECLARE_EVENT_CLASS(btree_node,
65 TP_PROTO(struct btree *b),
66 TP_ARGS(b),
67
68 TP_STRUCT__entry(
69 __field(size_t, bucket )
70 ),
56 71
72 TP_fast_assign(
73 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
74 ),
75
76 TP_printk("bucket %zu", __entry->bucket)
77);
78
79/* request.c */
80
81DEFINE_EVENT(bcache_request, bcache_request_start,
57 TP_PROTO(struct search *s, struct bio *bio), 82 TP_PROTO(struct search *s, struct bio *bio),
83 TP_ARGS(s, bio)
84);
58 85
86DEFINE_EVENT(bcache_request, bcache_request_end,
87 TP_PROTO(struct search *s, struct bio *bio),
59 TP_ARGS(s, bio) 88 TP_ARGS(s, bio)
60); 89);
61 90
62DECLARE_EVENT_CLASS(bcache_bio, 91DECLARE_EVENT_CLASS(bcache_bio,
63
64 TP_PROTO(struct bio *bio), 92 TP_PROTO(struct bio *bio),
65
66 TP_ARGS(bio), 93 TP_ARGS(bio),
67 94
68 TP_STRUCT__entry( 95 TP_STRUCT__entry(
@@ -70,7 +97,6 @@ DECLARE_EVENT_CLASS(bcache_bio,
70 __field(sector_t, sector ) 97 __field(sector_t, sector )
71 __field(unsigned int, nr_sector ) 98 __field(unsigned int, nr_sector )
72 __array(char, rwbs, 6 ) 99 __array(char, rwbs, 6 )
73 __array(char, comm, TASK_COMM_LEN )
74 ), 100 ),
75 101
76 TP_fast_assign( 102 TP_fast_assign(
@@ -78,191 +104,328 @@ DECLARE_EVENT_CLASS(bcache_bio,
78 __entry->sector = bio->bi_sector; 104 __entry->sector = bio->bi_sector;
79 __entry->nr_sector = bio->bi_size >> 9; 105 __entry->nr_sector = bio->bi_size >> 9;
80 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 106 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
81 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
82 ), 107 ),
83 108
84 TP_printk("%d,%d %s %llu + %u [%s]", 109 TP_printk("%d,%d %s %llu + %u",
85 MAJOR(__entry->dev), MINOR(__entry->dev), 110 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
86 __entry->rwbs, 111 (unsigned long long)__entry->sector, __entry->nr_sector)
87 (unsigned long long)__entry->sector,
88 __entry->nr_sector, __entry->comm)
89); 112);
90 113
91 114DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
92DEFINE_EVENT(bcache_bio, bcache_passthrough,
93
94 TP_PROTO(struct bio *bio), 115 TP_PROTO(struct bio *bio),
116 TP_ARGS(bio)
117);
95 118
119DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
120 TP_PROTO(struct bio *bio),
96 TP_ARGS(bio) 121 TP_ARGS(bio)
97); 122);
98 123
99DEFINE_EVENT(bcache_bio, bcache_cache_hit, 124TRACE_EVENT(bcache_read,
125 TP_PROTO(struct bio *bio, bool hit, bool bypass),
126 TP_ARGS(bio, hit, bypass),
100 127
101 TP_PROTO(struct bio *bio), 128 TP_STRUCT__entry(
129 __field(dev_t, dev )
130 __field(sector_t, sector )
131 __field(unsigned int, nr_sector )
132 __array(char, rwbs, 6 )
133 __field(bool, cache_hit )
134 __field(bool, bypass )
135 ),
102 136
103 TP_ARGS(bio) 137 TP_fast_assign(
138 __entry->dev = bio->bi_bdev->bd_dev;
139 __entry->sector = bio->bi_sector;
140 __entry->nr_sector = bio->bi_size >> 9;
141 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
142 __entry->cache_hit = hit;
143 __entry->bypass = bypass;
144 ),
145
146 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
147 MAJOR(__entry->dev), MINOR(__entry->dev),
148 __entry->rwbs, (unsigned long long)__entry->sector,
149 __entry->nr_sector, __entry->cache_hit, __entry->bypass)
104); 150);
105 151
106DEFINE_EVENT(bcache_bio, bcache_cache_miss, 152TRACE_EVENT(bcache_write,
153 TP_PROTO(struct bio *bio, bool writeback, bool bypass),
154 TP_ARGS(bio, writeback, bypass),
107 155
108 TP_PROTO(struct bio *bio), 156 TP_STRUCT__entry(
157 __field(dev_t, dev )
158 __field(sector_t, sector )
159 __field(unsigned int, nr_sector )
160 __array(char, rwbs, 6 )
161 __field(bool, writeback )
162 __field(bool, bypass )
163 ),
109 164
110 TP_ARGS(bio) 165 TP_fast_assign(
166 __entry->dev = bio->bi_bdev->bd_dev;
167 __entry->sector = bio->bi_sector;
168 __entry->nr_sector = bio->bi_size >> 9;
169 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
170 __entry->writeback = writeback;
171 __entry->bypass = bypass;
172 ),
173
174 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
175 MAJOR(__entry->dev), MINOR(__entry->dev),
176 __entry->rwbs, (unsigned long long)__entry->sector,
177 __entry->nr_sector, __entry->writeback, __entry->bypass)
111); 178);
112 179
113DEFINE_EVENT(bcache_bio, bcache_read_retry, 180DEFINE_EVENT(bcache_bio, bcache_read_retry,
114
115 TP_PROTO(struct bio *bio), 181 TP_PROTO(struct bio *bio),
116
117 TP_ARGS(bio) 182 TP_ARGS(bio)
118); 183);
119 184
120DEFINE_EVENT(bcache_bio, bcache_writethrough, 185DEFINE_EVENT(bkey, bcache_cache_insert,
186 TP_PROTO(struct bkey *k),
187 TP_ARGS(k)
188);
121 189
122 TP_PROTO(struct bio *bio), 190/* Journal */
123 191
124 TP_ARGS(bio) 192DECLARE_EVENT_CLASS(cache_set,
125); 193 TP_PROTO(struct cache_set *c),
194 TP_ARGS(c),
126 195
127DEFINE_EVENT(bcache_bio, bcache_writeback, 196 TP_STRUCT__entry(
197 __array(char, uuid, 16 )
198 ),
128 199
129 TP_PROTO(struct bio *bio), 200 TP_fast_assign(
201 memcpy(__entry->uuid, c->sb.set_uuid, 16);
202 ),
130 203
131 TP_ARGS(bio) 204 TP_printk("%pU", __entry->uuid)
132); 205);
133 206
134DEFINE_EVENT(bcache_bio, bcache_write_skip, 207DEFINE_EVENT(bkey, bcache_journal_replay_key,
135 208 TP_PROTO(struct bkey *k),
136 TP_PROTO(struct bio *bio), 209 TP_ARGS(k)
210);
137 211
138 TP_ARGS(bio) 212DEFINE_EVENT(cache_set, bcache_journal_full,
213 TP_PROTO(struct cache_set *c),
214 TP_ARGS(c)
139); 215);
140 216
141DEFINE_EVENT(bcache_bio, bcache_btree_read, 217DEFINE_EVENT(cache_set, bcache_journal_entry_full,
218 TP_PROTO(struct cache_set *c),
219 TP_ARGS(c)
220);
142 221
222DEFINE_EVENT(bcache_bio, bcache_journal_write,
143 TP_PROTO(struct bio *bio), 223 TP_PROTO(struct bio *bio),
144
145 TP_ARGS(bio) 224 TP_ARGS(bio)
146); 225);
147 226
148DEFINE_EVENT(bcache_bio, bcache_btree_write, 227/* Btree */
149 228
150 TP_PROTO(struct bio *bio), 229DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
230 TP_PROTO(struct cache_set *c),
231 TP_ARGS(c)
232);
151 233
152 TP_ARGS(bio) 234DEFINE_EVENT(btree_node, bcache_btree_read,
235 TP_PROTO(struct btree *b),
236 TP_ARGS(b)
153); 237);
154 238
155DEFINE_EVENT(bcache_bio, bcache_write_dirty, 239TRACE_EVENT(bcache_btree_write,
240 TP_PROTO(struct btree *b),
241 TP_ARGS(b),
156 242
157 TP_PROTO(struct bio *bio), 243 TP_STRUCT__entry(
244 __field(size_t, bucket )
245 __field(unsigned, block )
246 __field(unsigned, keys )
247 ),
158 248
159 TP_ARGS(bio) 249 TP_fast_assign(
250 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
251 __entry->block = b->written;
252 __entry->keys = b->sets[b->nsets].data->keys;
253 ),
254
255 TP_printk("bucket %zu", __entry->bucket)
160); 256);
161 257
162DEFINE_EVENT(bcache_bio, bcache_read_dirty, 258DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
259 TP_PROTO(struct btree *b),
260 TP_ARGS(b)
261);
163 262
164 TP_PROTO(struct bio *bio), 263DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail,
264 TP_PROTO(struct btree *b),
265 TP_ARGS(b)
266);
165 267
166 TP_ARGS(bio) 268DEFINE_EVENT(btree_node, bcache_btree_node_free,
269 TP_PROTO(struct btree *b),
270 TP_ARGS(b)
167); 271);
168 272
169DEFINE_EVENT(bcache_bio, bcache_write_moving, 273TRACE_EVENT(bcache_btree_gc_coalesce,
274 TP_PROTO(unsigned nodes),
275 TP_ARGS(nodes),
170 276
171 TP_PROTO(struct bio *bio), 277 TP_STRUCT__entry(
278 __field(unsigned, nodes )
279 ),
172 280
173 TP_ARGS(bio) 281 TP_fast_assign(
282 __entry->nodes = nodes;
283 ),
284
285 TP_printk("coalesced %u nodes", __entry->nodes)
174); 286);
175 287
176DEFINE_EVENT(bcache_bio, bcache_read_moving, 288DEFINE_EVENT(cache_set, bcache_gc_start,
289 TP_PROTO(struct cache_set *c),
290 TP_ARGS(c)
291);
177 292
178 TP_PROTO(struct bio *bio), 293DEFINE_EVENT(cache_set, bcache_gc_end,
294 TP_PROTO(struct cache_set *c),
295 TP_ARGS(c)
296);
179 297
180 TP_ARGS(bio) 298DEFINE_EVENT(bkey, bcache_gc_copy,
299 TP_PROTO(struct bkey *k),
300 TP_ARGS(k)
181); 301);
182 302
183DEFINE_EVENT(bcache_bio, bcache_journal_write, 303DEFINE_EVENT(bkey, bcache_gc_copy_collision,
304 TP_PROTO(struct bkey *k),
305 TP_ARGS(k)
306);
184 307
185 TP_PROTO(struct bio *bio), 308TRACE_EVENT(bcache_btree_insert_key,
309 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
310 TP_ARGS(b, k, op, status),
186 311
187 TP_ARGS(bio) 312 TP_STRUCT__entry(
188); 313 __field(u64, btree_node )
314 __field(u32, btree_level )
315 __field(u32, inode )
316 __field(u64, offset )
317 __field(u32, size )
318 __field(u8, dirty )
319 __field(u8, op )
320 __field(u8, status )
321 ),
189 322
190DECLARE_EVENT_CLASS(bcache_cache_bio, 323 TP_fast_assign(
324 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
325 __entry->btree_level = b->level;
326 __entry->inode = KEY_INODE(k);
327 __entry->offset = KEY_OFFSET(k);
328 __entry->size = KEY_SIZE(k);
329 __entry->dirty = KEY_DIRTY(k);
330 __entry->op = op;
331 __entry->status = status;
332 ),
191 333
192 TP_PROTO(struct bio *bio, 334 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
193 sector_t orig_sector, 335 __entry->status, __entry->op,
194 struct block_device* orig_bdev), 336 __entry->btree_node, __entry->btree_level,
337 __entry->inode, __entry->offset,
338 __entry->size, __entry->dirty)
339);
195 340
196 TP_ARGS(bio, orig_sector, orig_bdev), 341DECLARE_EVENT_CLASS(btree_split,
342 TP_PROTO(struct btree *b, unsigned keys),
343 TP_ARGS(b, keys),
197 344
198 TP_STRUCT__entry( 345 TP_STRUCT__entry(
199 __field(dev_t, dev ) 346 __field(size_t, bucket )
200 __field(dev_t, orig_dev ) 347 __field(unsigned, keys )
201 __field(sector_t, sector )
202 __field(sector_t, orig_sector )
203 __field(unsigned int, nr_sector )
204 __array(char, rwbs, 6 )
205 __array(char, comm, TASK_COMM_LEN )
206 ), 348 ),
207 349
208 TP_fast_assign( 350 TP_fast_assign(
209 __entry->dev = bio->bi_bdev->bd_dev; 351 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
210 __entry->orig_dev = orig_bdev->bd_dev; 352 __entry->keys = keys;
211 __entry->sector = bio->bi_sector;
212 __entry->orig_sector = orig_sector;
213 __entry->nr_sector = bio->bi_size >> 9;
214 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
215 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
216 ), 353 ),
217 354
218 TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d %llu)", 355 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
219 MAJOR(__entry->dev), MINOR(__entry->dev),
220 __entry->rwbs,
221 (unsigned long long)__entry->sector,
222 __entry->nr_sector, __entry->comm,
223 MAJOR(__entry->orig_dev), MINOR(__entry->orig_dev),
224 (unsigned long long)__entry->orig_sector)
225); 356);
226 357
227DEFINE_EVENT(bcache_cache_bio, bcache_cache_insert, 358DEFINE_EVENT(btree_split, bcache_btree_node_split,
228 359 TP_PROTO(struct btree *b, unsigned keys),
229 TP_PROTO(struct bio *bio, 360 TP_ARGS(b, keys)
230 sector_t orig_sector, 361);
231 struct block_device *orig_bdev),
232 362
233 TP_ARGS(bio, orig_sector, orig_bdev) 363DEFINE_EVENT(btree_split, bcache_btree_node_compact,
364 TP_PROTO(struct btree *b, unsigned keys),
365 TP_ARGS(b, keys)
234); 366);
235 367
236DECLARE_EVENT_CLASS(bcache_gc, 368DEFINE_EVENT(btree_node, bcache_btree_set_root,
369 TP_PROTO(struct btree *b),
370 TP_ARGS(b)
371);
237 372
238 TP_PROTO(uint8_t *uuid), 373/* Allocator */
239 374
240 TP_ARGS(uuid), 375TRACE_EVENT(bcache_alloc_invalidate,
376 TP_PROTO(struct cache *ca),
377 TP_ARGS(ca),
241 378
242 TP_STRUCT__entry( 379 TP_STRUCT__entry(
243 __field(uint8_t *, uuid) 380 __field(unsigned, free )
381 __field(unsigned, free_inc )
382 __field(unsigned, free_inc_size )
383 __field(unsigned, unused )
244 ), 384 ),
245 385
246 TP_fast_assign( 386 TP_fast_assign(
247 __entry->uuid = uuid; 387 __entry->free = fifo_used(&ca->free);
388 __entry->free_inc = fifo_used(&ca->free_inc);
389 __entry->free_inc_size = ca->free_inc.size;
390 __entry->unused = fifo_used(&ca->unused);
248 ), 391 ),
249 392
250 TP_printk("%pU", __entry->uuid) 393 TP_printk("free %u free_inc %u/%u unused %u", __entry->free,
394 __entry->free_inc, __entry->free_inc_size, __entry->unused)
251); 395);
252 396
397TRACE_EVENT(bcache_alloc_fail,
398 TP_PROTO(struct cache *ca),
399 TP_ARGS(ca),
253 400
254DEFINE_EVENT(bcache_gc, bcache_gc_start, 401 TP_STRUCT__entry(
402 __field(unsigned, free )
403 __field(unsigned, free_inc )
404 __field(unsigned, unused )
405 __field(unsigned, blocked )
406 ),
255 407
256 TP_PROTO(uint8_t *uuid), 408 TP_fast_assign(
409 __entry->free = fifo_used(&ca->free);
410 __entry->free_inc = fifo_used(&ca->free_inc);
411 __entry->unused = fifo_used(&ca->unused);
412 __entry->blocked = atomic_read(&ca->set->prio_blocked);
413 ),
257 414
258 TP_ARGS(uuid) 415 TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free,
416 __entry->free_inc, __entry->unused, __entry->blocked)
259); 417);
260 418
261DEFINE_EVENT(bcache_gc, bcache_gc_end, 419/* Background writeback */
262 420
263 TP_PROTO(uint8_t *uuid), 421DEFINE_EVENT(bkey, bcache_writeback,
422 TP_PROTO(struct bkey *k),
423 TP_ARGS(k)
424);
264 425
265 TP_ARGS(uuid) 426DEFINE_EVENT(bkey, bcache_writeback_collision,
427 TP_PROTO(struct bkey *k),
428 TP_ARGS(k)
266); 429);
267 430
268#endif /* _TRACE_BCACHE_H */ 431#endif /* _TRACE_BCACHE_H */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index d615f78cc6b6..41a6643e2136 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -670,10 +670,6 @@ perf_trace_##call(void *__data, proto) \
670 sizeof(u64)); \ 670 sizeof(u64)); \
671 __entry_size -= sizeof(u32); \ 671 __entry_size -= sizeof(u32); \
672 \ 672 \
673 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
674 "profile buffer not large enough")) \
675 return; \
676 \
677 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ 673 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
678 __entry_size, event_call->event.type, &__regs, &rctx); \ 674 __entry_size, event_call->event.type, &__regs, &rctx); \
679 if (!entry) \ 675 if (!entry) \
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 7692dc69ccf7..331499d597fa 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -11,6 +11,17 @@
11 11
12#include <linux/types.h> /* __u8 etc */ 12#include <linux/types.h> /* __u8 etc */
13 13
14/* This is arbitrary.
15 * From USB 2.0 spec Table 11-13, offset 7, a hub can
16 * have up to 255 ports. The most yet reported is 10.
17 *
18 * Current Wireless USB host hardware (Intel i1480 for example) allows
19 * up to 22 devices to connect. Upcoming hardware might raise that
20 * limit. Because the arrays need to add a bit for hub status data, we
21 * use 31, so plus one evens out to four bytes.
22 */
23#define USB_MAXCHILDREN 31
24
14/* 25/*
15 * Hub request types 26 * Hub request types
16 */ 27 */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ffd4652de91c..65e12099ef89 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -103,12 +103,46 @@ typedef uint64_t blkif_sector_t;
103#define BLKIF_OP_DISCARD 5 103#define BLKIF_OP_DISCARD 5
104 104
105/* 105/*
106 * Recognized if "feature-max-indirect-segments" in present in the backend
107 * xenbus info. The "feature-max-indirect-segments" node contains the maximum
108 * number of segments allowed by the backend per request. If the node is
109 * present, the frontend might use blkif_request_indirect structs in order to
110 * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
111 * maximum number of indirect segments is fixed by the backend, but the
112 * frontend can issue requests with any number of indirect segments as long as
113 * it's less than the number provided by the backend. The indirect_grefs field
114 * in blkif_request_indirect should be filled by the frontend with the
115 * grant references of the pages that are holding the indirect segments.
116 * This pages are filled with an array of blkif_request_segment_aligned
117 * that hold the information about the segments. The number of indirect
118 * pages to use is determined by the maximum number of segments
119 * a indirect request contains. Every indirect page can contain a maximum
120 * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
121 * so to calculate the number of indirect pages to use we have to do
122 * ceil(indirect_segments/512).
123 *
124 * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
125 * create the "feature-max-indirect-segments" node!
126 */
127#define BLKIF_OP_INDIRECT 6
128
129/*
106 * Maximum scatter/gather segments per request. 130 * Maximum scatter/gather segments per request.
107 * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. 131 * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
108 * NB. This could be 12 if the ring indexes weren't stored in the same page. 132 * NB. This could be 12 if the ring indexes weren't stored in the same page.
109 */ 133 */
110#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 134#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
111 135
136#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
137
138struct blkif_request_segment_aligned {
139 grant_ref_t gref; /* reference to I/O buffer frame */
140 /* @first_sect: first sector in frame to transfer (inclusive). */
141 /* @last_sect: last sector in frame to transfer (inclusive). */
142 uint8_t first_sect, last_sect;
143 uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */
144} __attribute__((__packed__));
145
112struct blkif_request_rw { 146struct blkif_request_rw {
113 uint8_t nr_segments; /* number of segments */ 147 uint8_t nr_segments; /* number of segments */
114 blkif_vdev_t handle; /* only for read/write requests */ 148 blkif_vdev_t handle; /* only for read/write requests */
@@ -147,12 +181,31 @@ struct blkif_request_other {
147 uint64_t id; /* private guest value, echoed in resp */ 181 uint64_t id; /* private guest value, echoed in resp */
148} __attribute__((__packed__)); 182} __attribute__((__packed__));
149 183
184struct blkif_request_indirect {
185 uint8_t indirect_op;
186 uint16_t nr_segments;
187#ifdef CONFIG_X86_64
188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
189#endif
190 uint64_t id;
191 blkif_sector_t sector_number;
192 blkif_vdev_t handle;
193 uint16_t _pad2;
194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
195#ifdef CONFIG_X86_64
196 uint32_t _pad3; /* make it 64 byte aligned */
197#else
198 uint64_t _pad3; /* make it 64 byte aligned */
199#endif
200} __attribute__((__packed__));
201
150struct blkif_request { 202struct blkif_request {
151 uint8_t operation; /* BLKIF_OP_??? */ 203 uint8_t operation; /* BLKIF_OP_??? */
152 union { 204 union {
153 struct blkif_request_rw rw; 205 struct blkif_request_rw rw;
154 struct blkif_request_discard discard; 206 struct blkif_request_discard discard;
155 struct blkif_request_other other; 207 struct blkif_request_other other;
208 struct blkif_request_indirect indirect;
156 } u; 209 } u;
157} __attribute__((__packed__)); 210} __attribute__((__packed__));
158 211
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 75271b9a8f61..7d28aff605c7 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -188,6 +188,11 @@ struct __name##_back_ring { \
188#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ 188#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
189 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) 189 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
190 190
191/* Ill-behaved frontend determination: Can there be this many requests? */
192#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
193 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
194
195
191#define RING_PUSH_REQUESTS(_r) do { \ 196#define RING_PUSH_REQUESTS(_r) do { \
192 wmb(); /* back sees requests /before/ updated producer index */ \ 197 wmb(); /* back sees requests /before/ updated producer index */ \
193 (_r)->sring->req_prod = (_r)->req_prod_pvt; \ 198 (_r)->sring->req_prod = (_r)->req_prod_pvt; \