aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/ntfs.txt3
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/sh/boards/Kconfig18
-rw-r--r--arch/sh/boards/Makefile2
-rw-r--r--arch/sh/boards/board-apsh4a3a.c175
-rw-r--r--arch/sh/boards/board-apsh4ad0a.c125
-rw-r--r--arch/sh/configs/apsh4a3a_defconfig102
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig133
-rw-r--r--arch/sh/tools/mach-types3
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/char/agp/intel-agp.h2
-rw-r--r--drivers/char/agp/intel-gtt.c17
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c87
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h24
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c156
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c119
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c269
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h95
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c30
-rw-r--r--drivers/gpu/drm/i915/intel_display.c434
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c50
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c20
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c14
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c31
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c255
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h36
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c33
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/xen/Kconfig11
-rw-r--r--drivers/xen/xenbus/Makefile5
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c351
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h31
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c276
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c294
-rw-r--r--fs/ntfs/Makefile2
-rw-r--r--fs/ntfs/file.c35
-rw-r--r--fs/ntfs/super.c6
-rw-r--r--include/xen/xenbus.h2
-rw-r--r--tools/power/x86/turbostat/Makefile8
-rw-r--r--tools/power/x86/turbostat/turbostat.8172
-rw-r--r--tools/power/x86/turbostat/turbostat.c1048
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile8
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8104
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c325
-rwxr-xr-xtools/testing/ktest/compare-ktest-sample.pl30
-rwxr-xr-xtools/testing/ktest/ktest.pl2023
-rw-r--r--tools/testing/ktest/sample.conf622
54 files changed, 6668 insertions, 974 deletions
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index ac2a261c5f7d..6ef8cf3bc9a3 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -457,6 +457,9 @@ ChangeLog
457 457
458Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. 458Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
459 459
4602.1.30:
461 - Fix writev() (it kept writing the first segment over and over again
462 instead of moving onto subsequent segments).
4602.1.29: 4632.1.29:
461 - Fix a deadlock when mounting read-write. 464 - Fix a deadlock when mounting read-write.
4622.1.28: 4652.1.28:
diff --git a/MAINTAINERS b/MAINTAINERS
index 42f991e5a85d..64d7621ab35b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4383,11 +4383,11 @@ F: Documentation/scsi/NinjaSCSI.txt
4383F: drivers/scsi/nsp32* 4383F: drivers/scsi/nsp32*
4384 4384
4385NTFS FILESYSTEM 4385NTFS FILESYSTEM
4386M: Anton Altaparmakov <aia21@cantab.net> 4386M: Anton Altaparmakov <anton@tuxera.com>
4387L: linux-ntfs-dev@lists.sourceforge.net 4387L: linux-ntfs-dev@lists.sourceforge.net
4388W: http://www.linux-ntfs.org/ 4388W: http://www.tuxera.com/
4389T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git 4389T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
4390S: Maintained 4390S: Supported
4391F: Documentation/filesystems/ntfs.txt 4391F: Documentation/filesystems/ntfs.txt
4392F: fs/ntfs/ 4392F: fs/ntfs/
4393 4393
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 2018c7ea4c93..d893411022d5 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -3,6 +3,9 @@ menu "Board support"
3config SOLUTION_ENGINE 3config SOLUTION_ENGINE
4 bool 4 bool
5 5
6config SH_ALPHA_BOARD
7 bool
8
6config SH_SOLUTION_ENGINE 9config SH_SOLUTION_ENGINE
7 bool "SolutionEngine" 10 bool "SolutionEngine"
8 select SOLUTION_ENGINE 11 select SOLUTION_ENGINE
@@ -320,6 +323,21 @@ config SH_SH2007
320 Compact Flash socket, two serial ports and PC-104 bus. 323 Compact Flash socket, two serial ports and PC-104 bus.
321 More information at <http://sh2000.sh-linux.org>. 324 More information at <http://sh2000.sh-linux.org>.
322 325
326config SH_APSH4A3A
327 bool "AP-SH4A-3A"
328 select SH_ALPHA_BOARD
329 depends on CPU_SUBTYPE_SH7785
330 help
331 Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A.
332
333config SH_APSH4AD0A
334 bool "AP-SH4AD-0A"
335 select SH_ALPHA_BOARD
336 select SYS_SUPPORTS_PCI
337 depends on CPU_SUBTYPE_SH7786
338 help
339 Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A.
340
323endmenu 341endmenu
324 342
325source "arch/sh/boards/mach-r2d/Kconfig" 343source "arch/sh/boards/mach-r2d/Kconfig"
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index be7d11d04b26..975a0f64ff20 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_SH_ESPT) += board-espt.o
13obj-$(CONFIG_SH_POLARIS) += board-polaris.o 13obj-$(CONFIG_SH_POLARIS) += board-polaris.o
14obj-$(CONFIG_SH_TITAN) += board-titan.o 14obj-$(CONFIG_SH_TITAN) += board-titan.o
15obj-$(CONFIG_SH_SH7757LCR) += board-sh7757lcr.o 15obj-$(CONFIG_SH_SH7757LCR) += board-sh7757lcr.o
16obj-$(CONFIG_SH_APSH4A3A) += board-apsh4a3a.o
17obj-$(CONFIG_SH_APSH4AD0A) += board-apsh4ad0a.o
diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c
new file mode 100644
index 000000000000..8e2a27057bc9
--- /dev/null
+++ b/arch/sh/boards/board-apsh4a3a.c
@@ -0,0 +1,175 @@
1/*
2 * ALPHAPROJECT AP-SH4A-3A Support.
3 *
4 * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
5 * Copyright (C) 2008 Yoshihiro Shimoda
6 * Copyright (C) 2009 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/io.h>
15#include <linux/mtd/physmap.h>
16#include <linux/smsc911x.h>
17#include <linux/irq.h>
18#include <linux/clk.h>
19#include <asm/machvec.h>
20#include <asm/sizes.h>
21#include <asm/clock.h>
22
23static struct mtd_partition nor_flash_partitions[] = {
24 {
25 .name = "loader",
26 .offset = 0x00000000,
27 .size = 512 * 1024,
28 },
29 {
30 .name = "bootenv",
31 .offset = MTDPART_OFS_APPEND,
32 .size = 512 * 1024,
33 },
34 {
35 .name = "kernel",
36 .offset = MTDPART_OFS_APPEND,
37 .size = 4 * 1024 * 1024,
38 },
39 {
40 .name = "data",
41 .offset = MTDPART_OFS_APPEND,
42 .size = MTDPART_SIZ_FULL,
43 },
44};
45
46static struct physmap_flash_data nor_flash_data = {
47 .width = 4,
48 .parts = nor_flash_partitions,
49 .nr_parts = ARRAY_SIZE(nor_flash_partitions),
50};
51
52static struct resource nor_flash_resources[] = {
53 [0] = {
54 .start = 0x00000000,
55 .end = 0x01000000 - 1,
56 .flags = IORESOURCE_MEM,
57 }
58};
59
60static struct platform_device nor_flash_device = {
61 .name = "physmap-flash",
62 .dev = {
63 .platform_data = &nor_flash_data,
64 },
65 .num_resources = ARRAY_SIZE(nor_flash_resources),
66 .resource = nor_flash_resources,
67};
68
69static struct resource smsc911x_resources[] = {
70 [0] = {
71 .name = "smsc911x-memory",
72 .start = 0xA4000000,
73 .end = 0xA4000000 + SZ_256 - 1,
74 .flags = IORESOURCE_MEM,
75 },
76 [1] = {
77 .name = "smsc911x-irq",
78 .start = evt2irq(0x200),
79 .end = evt2irq(0x200),
80 .flags = IORESOURCE_IRQ,
81 },
82};
83
84static struct smsc911x_platform_config smsc911x_config = {
85 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
86 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
87 .flags = SMSC911X_USE_16BIT,
88 .phy_interface = PHY_INTERFACE_MODE_MII,
89};
90
91static struct platform_device smsc911x_device = {
92 .name = "smsc911x",
93 .id = -1,
94 .num_resources = ARRAY_SIZE(smsc911x_resources),
95 .resource = smsc911x_resources,
96 .dev = {
97 .platform_data = &smsc911x_config,
98 },
99};
100
101static struct platform_device *apsh4a3a_devices[] __initdata = {
102 &nor_flash_device,
103 &smsc911x_device,
104};
105
106static int __init apsh4a3a_devices_setup(void)
107{
108 return platform_add_devices(apsh4a3a_devices,
109 ARRAY_SIZE(apsh4a3a_devices));
110}
111device_initcall(apsh4a3a_devices_setup);
112
113static int apsh4a3a_clk_init(void)
114{
115 struct clk *clk;
116 int ret;
117
118 clk = clk_get(NULL, "extal");
119 if (!clk || IS_ERR(clk))
120 return PTR_ERR(clk);
121 ret = clk_set_rate(clk, 33333000);
122 clk_put(clk);
123
124 return ret;
125}
126
127/* Initialize the board */
128static void __init apsh4a3a_setup(char **cmdline_p)
129{
130 printk(KERN_INFO "Alpha Project AP-SH4A-3A support:\n");
131}
132
133static void __init apsh4a3a_init_irq(void)
134{
135 plat_irq_setup_pins(IRQ_MODE_IRQ7654);
136}
137
138/* Return the board specific boot mode pin configuration */
139static int apsh4a3a_mode_pins(void)
140{
141 int value = 0;
142
143 /* These are the factory default settings of SW1 and SW2.
144 * If you change these dip switches then you will need to
145 * adjust the values below as well.
146 */
147 value &= ~MODE_PIN0; /* Clock Mode 16 */
148 value &= ~MODE_PIN1;
149 value &= ~MODE_PIN2;
150 value &= ~MODE_PIN3;
151 value |= MODE_PIN4;
152 value &= ~MODE_PIN5; /* 16-bit Area0 bus width */
153 value |= MODE_PIN6; /* Area 0 SRAM interface */
154 value |= MODE_PIN7;
155 value |= MODE_PIN8; /* Little Endian */
156 value |= MODE_PIN9; /* Master Mode */
157 value |= MODE_PIN10; /* Crystal resonator */
158 value |= MODE_PIN11; /* Display Unit */
159 value |= MODE_PIN12;
160 value &= ~MODE_PIN13; /* 29-bit address mode */
161 value |= MODE_PIN14; /* No PLL step-up */
162
163 return value;
164}
165
166/*
167 * The Machine Vector
168 */
169static struct sh_machine_vector mv_apsh4a3a __initmv = {
170 .mv_name = "AP-SH4A-3A",
171 .mv_setup = apsh4a3a_setup,
172 .mv_clk_init = apsh4a3a_clk_init,
173 .mv_init_irq = apsh4a3a_init_irq,
174 .mv_mode_pins = apsh4a3a_mode_pins,
175};
diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c
new file mode 100644
index 000000000000..e2bd218a054e
--- /dev/null
+++ b/arch/sh/boards/board-apsh4ad0a.c
@@ -0,0 +1,125 @@
1/*
2 * ALPHAPROJECT AP-SH4AD-0A Support.
3 *
4 * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
5 * Copyright (C) 2010 Matt Fleming
6 * Copyright (C) 2010 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/io.h>
15#include <linux/smsc911x.h>
16#include <linux/irq.h>
17#include <linux/clk.h>
18#include <asm/machvec.h>
19#include <asm/sizes.h>
20
21static struct resource smsc911x_resources[] = {
22 [0] = {
23 .name = "smsc911x-memory",
24 .start = 0xA4000000,
25 .end = 0xA4000000 + SZ_256 - 1,
26 .flags = IORESOURCE_MEM,
27 },
28 [1] = {
29 .name = "smsc911x-irq",
30 .start = evt2irq(0x200),
31 .end = evt2irq(0x200),
32 .flags = IORESOURCE_IRQ,
33 },
34};
35
36static struct smsc911x_platform_config smsc911x_config = {
37 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
38 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
39 .flags = SMSC911X_USE_16BIT,
40 .phy_interface = PHY_INTERFACE_MODE_MII,
41};
42
43static struct platform_device smsc911x_device = {
44 .name = "smsc911x",
45 .id = -1,
46 .num_resources = ARRAY_SIZE(smsc911x_resources),
47 .resource = smsc911x_resources,
48 .dev = {
49 .platform_data = &smsc911x_config,
50 },
51};
52
53static struct platform_device *apsh4ad0a_devices[] __initdata = {
54 &smsc911x_device,
55};
56
57static int __init apsh4ad0a_devices_setup(void)
58{
59 return platform_add_devices(apsh4ad0a_devices,
60 ARRAY_SIZE(apsh4ad0a_devices));
61}
62device_initcall(apsh4ad0a_devices_setup);
63
64static int apsh4ad0a_mode_pins(void)
65{
66 int value = 0;
67
68 /* These are the factory default settings of SW1 and SW2.
69 * If you change these dip switches then you will need to
70 * adjust the values below as well.
71 */
72 value |= MODE_PIN0; /* Clock Mode 3 */
73 value |= MODE_PIN1;
74 value &= ~MODE_PIN2;
75 value &= ~MODE_PIN3;
76 value &= ~MODE_PIN4; /* 16-bit Area0 bus width */
77 value |= MODE_PIN5;
78 value |= MODE_PIN6;
79 value |= MODE_PIN7; /* Normal mode */
80 value |= MODE_PIN8; /* Little Endian */
81 value |= MODE_PIN9; /* Crystal resonator */
82 value &= ~MODE_PIN10; /* 29-bit address mode */
83 value &= ~MODE_PIN11; /* PCI-E Root port */
84 value &= ~MODE_PIN12; /* 4 lane + 1 lane */
85 value |= MODE_PIN13; /* AUD Enable */
86 value &= ~MODE_PIN14; /* Normal Operation */
87
88 return value;
89}
90
91static int apsh4ad0a_clk_init(void)
92{
93 struct clk *clk;
94 int ret;
95
96 clk = clk_get(NULL, "extal");
97 if (!clk || IS_ERR(clk))
98 return PTR_ERR(clk);
99 ret = clk_set_rate(clk, 33333000);
100 clk_put(clk);
101
102 return ret;
103}
104
105/* Initialize the board */
106static void __init apsh4ad0a_setup(char **cmdline_p)
107{
108 pr_info("Alpha Project AP-SH4AD-0A support:\n");
109}
110
111static void __init apsh4ad0a_init_irq(void)
112{
113 plat_irq_setup_pins(IRQ_MODE_IRQ3210);
114}
115
116/*
117 * The Machine Vector
118 */
119static struct sh_machine_vector mv_apsh4ad0a __initmv = {
120 .mv_name = "AP-SH4AD-0A",
121 .mv_setup = apsh4ad0a_setup,
122 .mv_mode_pins = apsh4ad0a_mode_pins,
123 .mv_clk_init = apsh4ad0a_clk_init,
124 .mv_init_irq = apsh4ad0a_init_irq,
125};
diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig
new file mode 100644
index 000000000000..6cb327977d13
--- /dev/null
+++ b/arch/sh/configs/apsh4a3a_defconfig
@@ -0,0 +1,102 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y
3CONFIG_BSD_PROCESS_ACCT=y
4CONFIG_IKCONFIG=y
5CONFIG_IKCONFIG_PROC=y
6CONFIG_LOG_BUF_SHIFT=14
7CONFIG_SYSFS_DEPRECATED=y
8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y
10CONFIG_SLAB=y
11CONFIG_PROFILING=y
12CONFIG_MODULES=y
13CONFIG_MODULE_UNLOAD=y
14# CONFIG_BLK_DEV_BSG is not set
15CONFIG_CPU_SUBTYPE_SH7785=y
16CONFIG_MEMORY_START=0x0C000000
17CONFIG_FLATMEM_MANUAL=y
18CONFIG_SH_STORE_QUEUES=y
19CONFIG_SH_APSH4A3A=y
20CONFIG_HIGH_RES_TIMERS=y
21CONFIG_KEXEC=y
22CONFIG_PREEMPT=y
23# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
24CONFIG_NET=y
25CONFIG_PACKET=y
26CONFIG_UNIX=y
27CONFIG_INET=y
28CONFIG_IP_ADVANCED_ROUTER=y
29CONFIG_IP_PNP=y
30CONFIG_IP_PNP_DHCP=y
31# CONFIG_INET_LRO is not set
32# CONFIG_IPV6 is not set
33# CONFIG_WIRELESS is not set
34CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
35# CONFIG_FW_LOADER is not set
36CONFIG_MTD=y
37CONFIG_MTD_CONCAT=y
38CONFIG_MTD_PARTITIONS=y
39CONFIG_MTD_CHAR=y
40CONFIG_MTD_BLOCK=y
41CONFIG_MTD_CFI=y
42CONFIG_MTD_CFI_AMDSTD=y
43CONFIG_MTD_PHYSMAP=y
44CONFIG_BLK_DEV_RAM=y
45CONFIG_BLK_DEV_RAM_SIZE=16384
46CONFIG_NETDEVICES=y
47CONFIG_NET_ETHERNET=y
48CONFIG_SMSC911X=y
49# CONFIG_NETDEV_1000 is not set
50# CONFIG_NETDEV_10000 is not set
51# CONFIG_WLAN is not set
52# CONFIG_INPUT_MOUSEDEV is not set
53# CONFIG_INPUT_KEYBOARD is not set
54# CONFIG_INPUT_MOUSE is not set
55# CONFIG_SERIO is not set
56CONFIG_VT_HW_CONSOLE_BINDING=y
57CONFIG_SERIAL_SH_SCI=y
58CONFIG_SERIAL_SH_SCI_NR_UARTS=6
59CONFIG_SERIAL_SH_SCI_CONSOLE=y
60CONFIG_HW_RANDOM=y
61# CONFIG_HWMON is not set
62CONFIG_FB=y
63CONFIG_FB_SH7785FB=y
64CONFIG_FRAMEBUFFER_CONSOLE=y
65CONFIG_FONTS=y
66CONFIG_FONT_8x8=y
67CONFIG_FONT_8x16=y
68CONFIG_LOGO=y
69# CONFIG_HID_SUPPORT is not set
70# CONFIG_USB_SUPPORT is not set
71CONFIG_EXT2_FS=y
72CONFIG_EXT3_FS=y
73# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
74CONFIG_MSDOS_FS=y
75CONFIG_VFAT_FS=y
76CONFIG_NTFS_FS=y
77CONFIG_NTFS_RW=y
78CONFIG_PROC_KCORE=y
79CONFIG_TMPFS=y
80CONFIG_JFFS2_FS=y
81CONFIG_CRAMFS=y
82CONFIG_NFS_FS=y
83CONFIG_NFS_V3=y
84CONFIG_NFS_V4=y
85CONFIG_CIFS=y
86CONFIG_NLS_DEFAULT="utf8"
87CONFIG_NLS_CODEPAGE_437=y
88CONFIG_NLS_CODEPAGE_932=y
89CONFIG_NLS_ASCII=y
90CONFIG_NLS_ISO8859_1=y
91CONFIG_NLS_UTF8=y
92# CONFIG_ENABLE_WARN_DEPRECATED is not set
93# CONFIG_ENABLE_MUST_CHECK is not set
94CONFIG_DEBUG_FS=y
95CONFIG_DEBUG_KERNEL=y
96# CONFIG_DEBUG_PREEMPT is not set
97# CONFIG_DEBUG_BUGVERBOSE is not set
98CONFIG_DEBUG_INFO=y
99# CONFIG_RCU_CPU_STALL_DETECTOR is not set
100# CONFIG_FTRACE is not set
101# CONFIG_CRYPTO_ANSI_CPRNG is not set
102# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
new file mode 100644
index 000000000000..e71a531f1e31
--- /dev/null
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -0,0 +1,133 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y
4CONFIG_BSD_PROCESS_ACCT=y
5CONFIG_RCU_TRACE=y
6CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y
8CONFIG_LOG_BUF_SHIFT=14
9CONFIG_CGROUPS=y
10CONFIG_CGROUP_NS=y
11CONFIG_CGROUP_FREEZER=y
12CONFIG_CGROUP_DEVICE=y
13CONFIG_CGROUP_CPUACCT=y
14CONFIG_RESOURCE_COUNTERS=y
15CONFIG_CGROUP_MEM_RES_CTLR=y
16CONFIG_BLK_CGROUP=y
17CONFIG_NAMESPACES=y
18CONFIG_BLK_DEV_INITRD=y
19CONFIG_KALLSYMS_ALL=y
20# CONFIG_COMPAT_BRK is not set
21CONFIG_SLAB=y
22CONFIG_PROFILING=y
23CONFIG_MODULES=y
24CONFIG_MODULE_UNLOAD=y
25# CONFIG_LBDAF is not set
26# CONFIG_BLK_DEV_BSG is not set
27CONFIG_CFQ_GROUP_IOSCHED=y
28CONFIG_CPU_SUBTYPE_SH7786=y
29CONFIG_MEMORY_SIZE=0x10000000
30CONFIG_HUGETLB_PAGE_SIZE_1MB=y
31CONFIG_MEMORY_HOTPLUG=y
32CONFIG_MEMORY_HOTREMOVE=y
33CONFIG_KSM=y
34CONFIG_SH_STORE_QUEUES=y
35CONFIG_SH_APSH4AD0A=y
36CONFIG_NO_HZ=y
37CONFIG_HIGH_RES_TIMERS=y
38CONFIG_CPU_FREQ=y
39CONFIG_CPU_FREQ_GOV_POWERSAVE=m
40CONFIG_CPU_FREQ_GOV_USERSPACE=m
41CONFIG_CPU_FREQ_GOV_ONDEMAND=m
42CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
43CONFIG_SH_CPU_FREQ=y
44CONFIG_KEXEC=y
45CONFIG_SECCOMP=y
46CONFIG_PREEMPT=y
47# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
48CONFIG_BINFMT_MISC=y
49CONFIG_PM=y
50CONFIG_PM_DEBUG=y
51CONFIG_PM_VERBOSE=y
52CONFIG_PM_RUNTIME=y
53CONFIG_CPU_IDLE=y
54CONFIG_NET=y
55CONFIG_PACKET=y
56CONFIG_UNIX=y
57CONFIG_NET_KEY=y
58CONFIG_INET=y
59# CONFIG_INET_LRO is not set
60# CONFIG_IPV6 is not set
61# CONFIG_WIRELESS is not set
62CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
63# CONFIG_FW_LOADER is not set
64CONFIG_MTD=y
65CONFIG_MTD_CFI=y
66CONFIG_BLK_DEV_RAM=y
67CONFIG_BLK_DEV_RAM_SIZE=16384
68CONFIG_SCSI=y
69CONFIG_BLK_DEV_SD=y
70CONFIG_SCSI_MULTI_LUN=y
71# CONFIG_SCSI_LOWLEVEL is not set
72CONFIG_NETDEVICES=y
73CONFIG_MDIO_BITBANG=y
74CONFIG_NET_ETHERNET=y
75CONFIG_SMSC911X=y
76# CONFIG_NETDEV_1000 is not set
77# CONFIG_NETDEV_10000 is not set
78# CONFIG_WLAN is not set
79CONFIG_INPUT_EVDEV=y
80# CONFIG_INPUT_KEYBOARD is not set
81# CONFIG_INPUT_MOUSE is not set
82# CONFIG_SERIO is not set
83CONFIG_SERIAL_SH_SCI=y
84CONFIG_SERIAL_SH_SCI_NR_UARTS=6
85CONFIG_SERIAL_SH_SCI_CONSOLE=y
86# CONFIG_LEGACY_PTYS is not set
87# CONFIG_HW_RANDOM is not set
88# CONFIG_HWMON is not set
89CONFIG_VIDEO_OUTPUT_CONTROL=y
90CONFIG_FB=y
91CONFIG_FB_SH7785FB=y
92CONFIG_FRAMEBUFFER_CONSOLE=y
93CONFIG_FONTS=y
94CONFIG_FONT_8x8=y
95CONFIG_FONT_8x16=y
96CONFIG_LOGO=y
97CONFIG_USB=y
98CONFIG_USB_DEBUG=y
99CONFIG_USB_MON=y
100CONFIG_USB_OHCI_HCD=y
101CONFIG_USB_STORAGE=y
102CONFIG_EXT2_FS=y
103CONFIG_EXT3_FS=y
104# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
105CONFIG_MSDOS_FS=y
106CONFIG_VFAT_FS=y
107CONFIG_NTFS_FS=y
108CONFIG_NTFS_RW=y
109CONFIG_PROC_KCORE=y
110CONFIG_TMPFS=y
111CONFIG_HUGETLBFS=y
112CONFIG_JFFS2_FS=y
113CONFIG_CRAMFS=y
114CONFIG_NFS_FS=y
115CONFIG_NFS_V3=y
116CONFIG_NFS_V4=y
117CONFIG_CIFS=y
118CONFIG_NLS_DEFAULT="utf8"
119CONFIG_NLS_CODEPAGE_437=y
120CONFIG_NLS_CODEPAGE_932=y
121CONFIG_NLS_ASCII=y
122CONFIG_NLS_ISO8859_1=y
123CONFIG_NLS_UTF8=y
124# CONFIG_ENABLE_MUST_CHECK is not set
125CONFIG_MAGIC_SYSRQ=y
126CONFIG_DEBUG_KERNEL=y
127CONFIG_DEBUG_SHIRQ=y
128CONFIG_DETECT_HUNG_TASK=y
129CONFIG_DEBUG_INFO=y
130CONFIG_DEBUG_VM=y
131# CONFIG_RCU_CPU_STALL_DETECTOR is not set
132CONFIG_DWARF_UNWINDER=y
133# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 0e68465e7b50..6dd56c4d0054 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -9,6 +9,7 @@ SE SH_SOLUTION_ENGINE
9HIGHLANDER SH_HIGHLANDER 9HIGHLANDER SH_HIGHLANDER
10RTS7751R2D SH_RTS7751R2D 10RTS7751R2D SH_RTS7751R2D
11RSK SH_RSK 11RSK SH_RSK
12ALPHA_BOARD SH_ALPHA_BOARD
12 13
13# 14#
14# List of companion chips / MFDs. 15# List of companion chips / MFDs.
@@ -61,3 +62,5 @@ ESPT SH_ESPT
61POLARIS SH_POLARIS 62POLARIS SH_POLARIS
62KFR2R09 SH_KFR2R09 63KFR2R09 SH_KFR2R09
63ECOVEC SH_ECOVEC 64ECOVEC SH_ECOVEC
65APSH4A3A SH_APSH4A3A
66APSH4AD0A SH_APSH4AD0A
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4b9359a6f6ca..83c32cb72582 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -464,6 +464,7 @@ config XEN_BLKDEV_FRONTEND
464 tristate "Xen virtual block device support" 464 tristate "Xen virtual block device support"
465 depends on XEN 465 depends on XEN
466 default y 466 default y
467 select XEN_XENBUS_FRONTEND
467 help 468 help
468 This driver implements the front-end of the Xen virtual 469 This driver implements the front-end of the Xen virtual
469 block device driver. It communicates with a back-end driver 470 block device driver. It communicates with a back-end driver
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 010e3defd6c3..c195bfeade11 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -94,6 +94,8 @@
94#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8) 94#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
95#define G4x_GMCH_SIZE_VT_2M (0xc << 8) 95#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
96 96
97#define GFX_FLSH_CNTL 0x2170 /* 915+ */
98
97#define I810_DRAM_CTL 0x3000 99#define I810_DRAM_CTL 0x3000
98#define I810_DRAM_ROW_0 0x00000001 100#define I810_DRAM_ROW_0 0x00000001
99#define I810_DRAM_ROW_0_SDRAM 0x00000001 101#define I810_DRAM_ROW_0_SDRAM 0x00000001
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 356f73e0d17e..e921b693412b 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -688,14 +688,14 @@ static int intel_gtt_init(void)
688 688
689 intel_private.base.stolen_size = intel_gtt_stolen_size(); 689 intel_private.base.stolen_size = intel_gtt_stolen_size();
690 690
691 intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
692
691 ret = intel_gtt_setup_scratch_page(); 693 ret = intel_gtt_setup_scratch_page();
692 if (ret != 0) { 694 if (ret != 0) {
693 intel_gtt_cleanup(); 695 intel_gtt_cleanup();
694 return ret; 696 return ret;
695 } 697 }
696 698
697 intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
698
699 return 0; 699 return 0;
700} 700}
701 701
@@ -814,6 +814,12 @@ static bool intel_enable_gtt(void)
814 } 814 }
815 } 815 }
816 816
817 /* On the resume path we may be adjusting the PGTBL value, so
818 * be paranoid and flush all chipset write buffers...
819 */
820 if (INTEL_GTT_GEN >= 3)
821 writel(0, intel_private.registers+GFX_FLSH_CNTL);
822
817 reg = intel_private.registers+I810_PGETBL_CTL; 823 reg = intel_private.registers+I810_PGETBL_CTL;
818 writel(intel_private.PGETBL_save, reg); 824 writel(intel_private.PGETBL_save, reg);
819 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { 825 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
@@ -823,6 +829,9 @@ static bool intel_enable_gtt(void)
823 return false; 829 return false;
824 } 830 }
825 831
832 if (INTEL_GTT_GEN >= 3)
833 writel(0, intel_private.registers+GFX_FLSH_CNTL);
834
826 return true; 835 return true;
827} 836}
828 837
@@ -991,14 +1000,14 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
991 if (mem->page_count == 0) 1000 if (mem->page_count == 0)
992 return 0; 1001 return 0;
993 1002
1003 intel_gtt_clear_range(pg_start, mem->page_count);
1004
994 if (intel_private.base.needs_dmar) { 1005 if (intel_private.base.needs_dmar) {
995 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); 1006 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
996 mem->sg_list = NULL; 1007 mem->sg_list = NULL;
997 mem->num_sg = 0; 1008 mem->num_sg = 0;
998 } 1009 }
999 1010
1000 intel_gtt_clear_range(pg_start, mem->page_count);
1001
1002 return 0; 1011 return 0;
1003} 1012}
1004 1013
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 92f75782c332..19a3d58044dd 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -106,10 +106,19 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
106 } 106 }
107} 107}
108 108
109static const char *agp_type_str(int type)
110{
111 switch (type) {
112 case 0: return " uncached";
113 case 1: return " snooped";
114 default: return "";
115 }
116}
117
109static void 118static void
110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 119describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
111{ 120{
112 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s", 121 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
113 &obj->base, 122 &obj->base,
114 get_pin_flag(obj), 123 get_pin_flag(obj),
115 get_tiling_flag(obj), 124 get_tiling_flag(obj),
@@ -118,6 +127,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
118 obj->base.write_domain, 127 obj->base.write_domain,
119 obj->last_rendering_seqno, 128 obj->last_rendering_seqno,
120 obj->last_fenced_seqno, 129 obj->last_fenced_seqno,
130 agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
121 obj->dirty ? " dirty" : "", 131 obj->dirty ? " dirty" : "",
122 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 132 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
123 if (obj->base.name) 133 if (obj->base.name)
@@ -276,6 +286,37 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
276 return 0; 286 return 0;
277} 287}
278 288
289static int i915_gem_gtt_info(struct seq_file *m, void* data)
290{
291 struct drm_info_node *node = (struct drm_info_node *) m->private;
292 struct drm_device *dev = node->minor->dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 struct drm_i915_gem_object *obj;
295 size_t total_obj_size, total_gtt_size;
296 int count, ret;
297
298 ret = mutex_lock_interruptible(&dev->struct_mutex);
299 if (ret)
300 return ret;
301
302 total_obj_size = total_gtt_size = count = 0;
303 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
304 seq_printf(m, " ");
305 describe_obj(m, obj);
306 seq_printf(m, "\n");
307 total_obj_size += obj->base.size;
308 total_gtt_size += obj->gtt_space->size;
309 count++;
310 }
311
312 mutex_unlock(&dev->struct_mutex);
313
314 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
315 count, total_obj_size, total_gtt_size);
316
317 return 0;
318}
319
279 320
280static int i915_gem_pageflip_info(struct seq_file *m, void *data) 321static int i915_gem_pageflip_info(struct seq_file *m, void *data)
281{ 322{
@@ -456,8 +497,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
456 } 497 }
457 seq_printf(m, "Interrupts received: %d\n", 498 seq_printf(m, "Interrupts received: %d\n",
458 atomic_read(&dev_priv->irq_received)); 499 atomic_read(&dev_priv->irq_received));
459 for (i = 0; i < I915_NUM_RINGS; i++) 500 for (i = 0; i < I915_NUM_RINGS; i++) {
501 if (IS_GEN6(dev)) {
502 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
503 dev_priv->ring[i].name,
504 I915_READ_IMR(&dev_priv->ring[i]));
505 }
460 i915_ring_seqno_info(m, &dev_priv->ring[i]); 506 i915_ring_seqno_info(m, &dev_priv->ring[i]);
507 }
461 mutex_unlock(&dev->struct_mutex); 508 mutex_unlock(&dev->struct_mutex);
462 509
463 return 0; 510 return 0;
@@ -656,7 +703,7 @@ static void print_error_buffers(struct seq_file *m,
656 seq_printf(m, "%s [%d]:\n", name, count); 703 seq_printf(m, "%s [%d]:\n", name, count);
657 704
658 while (count--) { 705 while (count--) {
659 seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s", 706 seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s%s",
660 err->gtt_offset, 707 err->gtt_offset,
661 err->size, 708 err->size,
662 err->read_domains, 709 err->read_domains,
@@ -666,7 +713,8 @@ static void print_error_buffers(struct seq_file *m,
666 tiling_flag(err->tiling), 713 tiling_flag(err->tiling),
667 dirty_flag(err->dirty), 714 dirty_flag(err->dirty),
668 purgeable_flag(err->purgeable), 715 purgeable_flag(err->purgeable),
669 ring_str(err->ring)); 716 ring_str(err->ring),
717 agp_type_str(err->agp_type));
670 718
671 if (err->name) 719 if (err->name)
672 seq_printf(m, " (name: %d)", err->name); 720 seq_printf(m, " (name: %d)", err->name);
@@ -744,7 +792,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
744 if (error->batchbuffer[i]) { 792 if (error->batchbuffer[i]) {
745 struct drm_i915_error_object *obj = error->batchbuffer[i]; 793 struct drm_i915_error_object *obj = error->batchbuffer[i];
746 794
747 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); 795 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
796 dev_priv->ring[i].name,
797 obj->gtt_offset);
748 offset = 0; 798 offset = 0;
749 for (page = 0; page < obj->page_count; page++) { 799 for (page = 0; page < obj->page_count; page++) {
750 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 800 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
@@ -890,7 +940,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
890 struct drm_device *dev = node->minor->dev; 940 struct drm_device *dev = node->minor->dev;
891 drm_i915_private_t *dev_priv = dev->dev_private; 941 drm_i915_private_t *dev_priv = dev->dev_private;
892 u32 rgvmodectl = I915_READ(MEMMODECTL); 942 u32 rgvmodectl = I915_READ(MEMMODECTL);
893 u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); 943 u32 rstdbyctl = I915_READ(RSTDBYCTL);
894 u16 crstandvid = I915_READ16(CRSTANDVID); 944 u16 crstandvid = I915_READ16(CRSTANDVID);
895 945
896 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 946 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -913,6 +963,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
913 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 963 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
914 seq_printf(m, "Render standby enabled: %s\n", 964 seq_printf(m, "Render standby enabled: %s\n",
915 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 965 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
966 seq_printf(m, "Current RS state: ");
967 switch (rstdbyctl & RSX_STATUS_MASK) {
968 case RSX_STATUS_ON:
969 seq_printf(m, "on\n");
970 break;
971 case RSX_STATUS_RC1:
972 seq_printf(m, "RC1\n");
973 break;
974 case RSX_STATUS_RC1E:
975 seq_printf(m, "RC1E\n");
976 break;
977 case RSX_STATUS_RS1:
978 seq_printf(m, "RS1\n");
979 break;
980 case RSX_STATUS_RS2:
981 seq_printf(m, "RS2 (RC6)\n");
982 break;
983 case RSX_STATUS_RS3:
984 seq_printf(m, "RC3 (RC6+)\n");
985 break;
986 default:
987 seq_printf(m, "unknown\n");
988 break;
989 }
916 990
917 return 0; 991 return 0;
918} 992}
@@ -1187,6 +1261,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1187static struct drm_info_list i915_debugfs_list[] = { 1261static struct drm_info_list i915_debugfs_list[] = {
1188 {"i915_capabilities", i915_capabilities, 0, 0}, 1262 {"i915_capabilities", i915_capabilities, 0, 0},
1189 {"i915_gem_objects", i915_gem_object_info, 0}, 1263 {"i915_gem_objects", i915_gem_object_info, 0},
1264 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1190 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1265 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1191 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1266 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1192 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1267 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0568dbdc10ef..844f3c972b04 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1962,13 +1962,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1962 /* enable GEM by default */ 1962 /* enable GEM by default */
1963 dev_priv->has_gem = 1; 1963 dev_priv->has_gem = 1;
1964 1964
1965 if (dev_priv->has_gem == 0 &&
1966 drm_core_check_feature(dev, DRIVER_MODESET)) {
1967 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
1968 ret = -ENODEV;
1969 goto out_workqueue_free;
1970 }
1971
1972 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1965 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1973 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1966 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1974 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1967 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
@@ -2055,7 +2048,6 @@ out_gem_unload:
2055 2048
2056 intel_teardown_gmbus(dev); 2049 intel_teardown_gmbus(dev);
2057 intel_teardown_mchbar(dev); 2050 intel_teardown_mchbar(dev);
2058out_workqueue_free:
2059 destroy_workqueue(dev_priv->wq); 2051 destroy_workqueue(dev_priv->wq);
2060out_iomapfree: 2052out_iomapfree:
2061 io_mapping_free(dev_priv->mm.gtt_mapping); 2053 io_mapping_free(dev_priv->mm.gtt_mapping);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 872493331988..0de75a23f8e7 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,9 @@ module_param_named(powersave, i915_powersave, int, 0600);
49unsigned int i915_lvds_downclock = 0; 49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
51 51
52bool i915_try_reset = true;
53module_param_named(reset, i915_try_reset, bool, 0600);
54
52static struct drm_driver driver; 55static struct drm_driver driver;
53extern int intel_agp_enabled; 56extern int intel_agp_enabled;
54 57
@@ -352,6 +355,9 @@ static int i915_drm_thaw(struct drm_device *dev)
352 355
353 /* Resume the modeset for every activated CRTC */ 356 /* Resume the modeset for every activated CRTC */
354 drm_helper_resume_force_mode(dev); 357 drm_helper_resume_force_mode(dev);
358
359 if (dev_priv->renderctx && dev_priv->pwrctx)
360 ironlake_enable_rc6(dev);
355 } 361 }
356 362
357 intel_opregion_init(dev); 363 intel_opregion_init(dev);
@@ -475,6 +481,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
475 bool need_display = true; 481 bool need_display = true;
476 int ret; 482 int ret;
477 483
484 if (!i915_try_reset)
485 return 0;
486
478 if (!mutex_trylock(&dev->struct_mutex)) 487 if (!mutex_trylock(&dev->struct_mutex))
479 return -EBUSY; 488 return -EBUSY;
480 489
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aac1bf332f75..385fc7ec39d3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -172,20 +172,21 @@ struct drm_i915_error_state {
172 int page_count; 172 int page_count;
173 u32 gtt_offset; 173 u32 gtt_offset;
174 u32 *pages[0]; 174 u32 *pages[0];
175 } *ringbuffer, *batchbuffer[2]; 175 } *ringbuffer, *batchbuffer[I915_NUM_RINGS];
176 struct drm_i915_error_buffer { 176 struct drm_i915_error_buffer {
177 size_t size; 177 u32 size;
178 u32 name; 178 u32 name;
179 u32 seqno; 179 u32 seqno;
180 u32 gtt_offset; 180 u32 gtt_offset;
181 u32 read_domains; 181 u32 read_domains;
182 u32 write_domain; 182 u32 write_domain;
183 u32 fence_reg; 183 s32 fence_reg:5;
184 s32 pinned:2; 184 s32 pinned:2;
185 u32 tiling:2; 185 u32 tiling:2;
186 u32 dirty:1; 186 u32 dirty:1;
187 u32 purgeable:1; 187 u32 purgeable:1;
188 u32 ring:4; 188 u32 ring:4;
189 u32 agp_type:1;
189 } *active_bo, *pinned_bo; 190 } *active_bo, *pinned_bo;
190 u32 active_bo_count, pinned_bo_count; 191 u32 active_bo_count, pinned_bo_count;
191 struct intel_overlay_error_state *overlay; 192 struct intel_overlay_error_state *overlay;
@@ -332,6 +333,7 @@ typedef struct drm_i915_private {
332 333
333 /* LVDS info */ 334 /* LVDS info */
334 int backlight_level; /* restore backlight to this value */ 335 int backlight_level; /* restore backlight to this value */
336 bool backlight_enabled;
335 struct drm_display_mode *panel_fixed_mode; 337 struct drm_display_mode *panel_fixed_mode;
336 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 338 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
337 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 339 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -794,6 +796,7 @@ struct drm_i915_gem_object {
794 */ 796 */
795 struct hlist_node exec_node; 797 struct hlist_node exec_node;
796 unsigned long exec_handle; 798 unsigned long exec_handle;
799 struct drm_i915_gem_exec_object2 *exec_entry;
797 800
798 /** 801 /**
799 * Current offset of the object in GTT space. 802 * Current offset of the object in GTT space.
@@ -1006,12 +1009,6 @@ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
1006extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); 1009extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
1007extern int i915_vblank_swap(struct drm_device *dev, void *data, 1010extern int i915_vblank_swap(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv); 1011 struct drm_file *file_priv);
1009extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
1010extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
1011extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
1012 u32 mask);
1013extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
1014 u32 mask);
1015 1012
1016void 1013void
1017i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1014i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1091,10 +1088,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1091 struct drm_file *file_priv); 1088 struct drm_file *file_priv);
1092void i915_gem_load(struct drm_device *dev); 1089void i915_gem_load(struct drm_device *dev);
1093int i915_gem_init_object(struct drm_gem_object *obj); 1090int i915_gem_init_object(struct drm_gem_object *obj);
1094void i915_gem_flush_ring(struct drm_device *dev, 1091int __must_check i915_gem_flush_ring(struct drm_device *dev,
1095 struct intel_ring_buffer *ring, 1092 struct intel_ring_buffer *ring,
1096 uint32_t invalidate_domains, 1093 uint32_t invalidate_domains,
1097 uint32_t flush_domains); 1094 uint32_t flush_domains);
1098struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1095struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1099 size_t size); 1096 size_t size);
1100void i915_gem_free_object(struct drm_gem_object *obj); 1097void i915_gem_free_object(struct drm_gem_object *obj);
@@ -1265,6 +1262,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
1265extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 1262extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1266extern bool intel_fbc_enabled(struct drm_device *dev); 1263extern bool intel_fbc_enabled(struct drm_device *dev);
1267extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1264extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1265extern void ironlake_enable_rc6(struct drm_device *dev);
1268extern void gen6_set_rps(struct drm_device *dev, u8 val); 1266extern void gen6_set_rps(struct drm_device *dev, u8 val);
1269extern void intel_detect_pch (struct drm_device *dev); 1267extern void intel_detect_pch (struct drm_device *dev);
1270extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1268extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c79c0b62ef60..3dfc848ff755 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,18 +35,18 @@
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37 37
38static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); 38static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, 41static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
42 bool write); 42 bool write);
43static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, 43static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
44 uint64_t offset, 44 uint64_t offset,
45 uint64_t size); 45 uint64_t size);
46static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); 46static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
47static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 47static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
48 unsigned alignment, 48 unsigned alignment,
49 bool map_and_fenceable); 49 bool map_and_fenceable);
50static void i915_gem_clear_fence_reg(struct drm_device *dev, 50static void i915_gem_clear_fence_reg(struct drm_device *dev,
51 struct drm_i915_fence_reg *reg); 51 struct drm_i915_fence_reg *reg);
52static int i915_gem_phys_pwrite(struct drm_device *dev, 52static int i915_gem_phys_pwrite(struct drm_device *dev,
@@ -1935,6 +1935,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
1935{ 1935{
1936 drm_i915_private_t *dev_priv; 1936 drm_i915_private_t *dev_priv;
1937 struct drm_device *dev; 1937 struct drm_device *dev;
1938 bool idle;
1939 int i;
1938 1940
1939 dev_priv = container_of(work, drm_i915_private_t, 1941 dev_priv = container_of(work, drm_i915_private_t,
1940 mm.retire_work.work); 1942 mm.retire_work.work);
@@ -1948,11 +1950,31 @@ i915_gem_retire_work_handler(struct work_struct *work)
1948 1950
1949 i915_gem_retire_requests(dev); 1951 i915_gem_retire_requests(dev);
1950 1952
1951 if (!dev_priv->mm.suspended && 1953 /* Send a periodic flush down the ring so we don't hold onto GEM
1952 (!list_empty(&dev_priv->ring[RCS].request_list) || 1954 * objects indefinitely.
1953 !list_empty(&dev_priv->ring[VCS].request_list) || 1955 */
1954 !list_empty(&dev_priv->ring[BCS].request_list))) 1956 idle = true;
1957 for (i = 0; i < I915_NUM_RINGS; i++) {
1958 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1959
1960 if (!list_empty(&ring->gpu_write_list)) {
1961 struct drm_i915_gem_request *request;
1962 int ret;
1963
1964 ret = i915_gem_flush_ring(dev, ring, 0,
1965 I915_GEM_GPU_DOMAINS);
1966 request = kzalloc(sizeof(*request), GFP_KERNEL);
1967 if (ret || request == NULL ||
1968 i915_add_request(dev, NULL, request, ring))
1969 kfree(request);
1970 }
1971
1972 idle &= list_empty(&ring->request_list);
1973 }
1974
1975 if (!dev_priv->mm.suspended && !idle)
1955 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1976 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1977
1956 mutex_unlock(&dev->struct_mutex); 1978 mutex_unlock(&dev->struct_mutex);
1957} 1979}
1958 1980
@@ -2142,25 +2164,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2142 return ret; 2164 return ret;
2143} 2165}
2144 2166
2145void 2167int
2146i915_gem_flush_ring(struct drm_device *dev, 2168i915_gem_flush_ring(struct drm_device *dev,
2147 struct intel_ring_buffer *ring, 2169 struct intel_ring_buffer *ring,
2148 uint32_t invalidate_domains, 2170 uint32_t invalidate_domains,
2149 uint32_t flush_domains) 2171 uint32_t flush_domains)
2150{ 2172{
2151 ring->flush(ring, invalidate_domains, flush_domains); 2173 int ret;
2174
2175 ret = ring->flush(ring, invalidate_domains, flush_domains);
2176 if (ret)
2177 return ret;
2178
2152 i915_gem_process_flushing_list(dev, flush_domains, ring); 2179 i915_gem_process_flushing_list(dev, flush_domains, ring);
2180 return 0;
2153} 2181}
2154 2182
2155static int i915_ring_idle(struct drm_device *dev, 2183static int i915_ring_idle(struct drm_device *dev,
2156 struct intel_ring_buffer *ring) 2184 struct intel_ring_buffer *ring)
2157{ 2185{
2186 int ret;
2187
2158 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) 2188 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2159 return 0; 2189 return 0;
2160 2190
2161 if (!list_empty(&ring->gpu_write_list)) 2191 if (!list_empty(&ring->gpu_write_list)) {
2162 i915_gem_flush_ring(dev, ring, 2192 ret = i915_gem_flush_ring(dev, ring,
2163 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2193 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2194 if (ret)
2195 return ret;
2196 }
2197
2164 return i915_wait_request(dev, 2198 return i915_wait_request(dev,
2165 i915_gem_next_request_seqno(dev, ring), 2199 i915_gem_next_request_seqno(dev, ring),
2166 ring); 2200 ring);
@@ -2370,10 +2404,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2370 int ret; 2404 int ret;
2371 2405
2372 if (obj->fenced_gpu_access) { 2406 if (obj->fenced_gpu_access) {
2373 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) 2407 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2374 i915_gem_flush_ring(obj->base.dev, 2408 ret = i915_gem_flush_ring(obj->base.dev,
2375 obj->last_fenced_ring, 2409 obj->last_fenced_ring,
2376 0, obj->base.write_domain); 2410 0, obj->base.write_domain);
2411 if (ret)
2412 return ret;
2413 }
2377 2414
2378 obj->fenced_gpu_access = false; 2415 obj->fenced_gpu_access = false;
2379 } 2416 }
@@ -2393,6 +2430,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2393 obj->last_fenced_ring = NULL; 2430 obj->last_fenced_ring = NULL;
2394 } 2431 }
2395 2432
2433 /* Ensure that all CPU reads are completed before installing a fence
2434 * and all writes before removing the fence.
2435 */
2436 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2437 mb();
2438
2396 return 0; 2439 return 0;
2397} 2440}
2398 2441
@@ -2523,9 +2566,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2523 return ret; 2566 return ret;
2524 } else if (obj->tiling_changed) { 2567 } else if (obj->tiling_changed) {
2525 if (obj->fenced_gpu_access) { 2568 if (obj->fenced_gpu_access) {
2526 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) 2569 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2527 i915_gem_flush_ring(obj->base.dev, obj->ring, 2570 ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
2528 0, obj->base.write_domain); 2571 0, obj->base.write_domain);
2572 if (ret)
2573 return ret;
2574 }
2529 2575
2530 obj->fenced_gpu_access = false; 2576 obj->fenced_gpu_access = false;
2531 } 2577 }
@@ -2736,10 +2782,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2736 obj->gtt_space = NULL; 2782 obj->gtt_space = NULL;
2737 2783
2738 if (ret == -ENOMEM) { 2784 if (ret == -ENOMEM) {
2739 /* first try to clear up some space from the GTT */ 2785 /* first try to reclaim some memory by clearing the GTT */
2740 ret = i915_gem_evict_something(dev, size, 2786 ret = i915_gem_evict_everything(dev, false);
2741 alignment,
2742 map_and_fenceable);
2743 if (ret) { 2787 if (ret) {
2744 /* now try to shrink everyone else */ 2788 /* now try to shrink everyone else */
2745 if (gfpmask) { 2789 if (gfpmask) {
@@ -2747,7 +2791,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2747 goto search_free; 2791 goto search_free;
2748 } 2792 }
2749 2793
2750 return ret; 2794 return -ENOMEM;
2751 } 2795 }
2752 2796
2753 goto search_free; 2797 goto search_free;
@@ -2762,9 +2806,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2762 drm_mm_put_block(obj->gtt_space); 2806 drm_mm_put_block(obj->gtt_space);
2763 obj->gtt_space = NULL; 2807 obj->gtt_space = NULL;
2764 2808
2765 ret = i915_gem_evict_something(dev, size, 2809 if (i915_gem_evict_everything(dev, false))
2766 alignment, map_and_fenceable);
2767 if (ret)
2768 return ret; 2810 return ret;
2769 2811
2770 goto search_free; 2812 goto search_free;
@@ -2811,17 +2853,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2811} 2853}
2812 2854
2813/** Flushes any GPU write domain for the object if it's dirty. */ 2855/** Flushes any GPU write domain for the object if it's dirty. */
2814static void 2856static int
2815i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) 2857i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2816{ 2858{
2817 struct drm_device *dev = obj->base.dev; 2859 struct drm_device *dev = obj->base.dev;
2818 2860
2819 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) 2861 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2820 return; 2862 return 0;
2821 2863
2822 /* Queue the GPU write cache flushing we need. */ 2864 /* Queue the GPU write cache flushing we need. */
2823 i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); 2865 return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
2824 BUG_ON(obj->base.write_domain);
2825} 2866}
2826 2867
2827/** Flushes the GTT write domain for the object if it's dirty. */ 2868/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2833,10 +2874,16 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2833 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 2874 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2834 return; 2875 return;
2835 2876
2836 /* No actual flushing is required for the GTT write domain. Writes 2877 /* No actual flushing is required for the GTT write domain. Writes
2837 * to it immediately go to main memory as far as we know, so there's 2878 * to it immediately go to main memory as far as we know, so there's
2838 * no chipset flush. It also doesn't land in render cache. 2879 * no chipset flush. It also doesn't land in render cache.
2880 *
2881 * However, we do have to enforce the order so that all writes through
2882 * the GTT land before any writes to the device, such as updates to
2883 * the GATT itself.
2839 */ 2884 */
2885 wmb();
2886
2840 i915_gem_release_mmap(obj); 2887 i915_gem_release_mmap(obj);
2841 2888
2842 old_write_domain = obj->base.write_domain; 2889 old_write_domain = obj->base.write_domain;
@@ -2882,7 +2929,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2882 if (obj->gtt_space == NULL) 2929 if (obj->gtt_space == NULL)
2883 return -EINVAL; 2930 return -EINVAL;
2884 2931
2885 i915_gem_object_flush_gpu_write_domain(obj); 2932 ret = i915_gem_object_flush_gpu_write_domain(obj);
2933 if (ret)
2934 return ret;
2935
2886 if (obj->pending_gpu_write || write) { 2936 if (obj->pending_gpu_write || write) {
2887 ret = i915_gem_object_wait_rendering(obj, true); 2937 ret = i915_gem_object_wait_rendering(obj, true);
2888 if (ret) 2938 if (ret)
@@ -2927,7 +2977,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
2927 if (obj->gtt_space == NULL) 2977 if (obj->gtt_space == NULL)
2928 return -EINVAL; 2978 return -EINVAL;
2929 2979
2930 i915_gem_object_flush_gpu_write_domain(obj); 2980 ret = i915_gem_object_flush_gpu_write_domain(obj);
2981 if (ret)
2982 return ret;
2983
2931 2984
2932 /* Currently, we are always called from an non-interruptible context. */ 2985 /* Currently, we are always called from an non-interruptible context. */
2933 if (pipelined != obj->ring) { 2986 if (pipelined != obj->ring) {
@@ -2952,12 +3005,17 @@ int
2952i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, 3005i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2953 bool interruptible) 3006 bool interruptible)
2954{ 3007{
3008 int ret;
3009
2955 if (!obj->active) 3010 if (!obj->active)
2956 return 0; 3011 return 0;
2957 3012
2958 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) 3013 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2959 i915_gem_flush_ring(obj->base.dev, obj->ring, 3014 ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
2960 0, obj->base.write_domain); 3015 0, obj->base.write_domain);
3016 if (ret)
3017 return ret;
3018 }
2961 3019
2962 return i915_gem_object_wait_rendering(obj, interruptible); 3020 return i915_gem_object_wait_rendering(obj, interruptible);
2963} 3021}
@@ -2974,7 +3032,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2974 uint32_t old_write_domain, old_read_domains; 3032 uint32_t old_write_domain, old_read_domains;
2975 int ret; 3033 int ret;
2976 3034
2977 i915_gem_object_flush_gpu_write_domain(obj); 3035 ret = i915_gem_object_flush_gpu_write_domain(obj);
3036 if (ret)
3037 return ret;
3038
2978 ret = i915_gem_object_wait_rendering(obj, true); 3039 ret = i915_gem_object_wait_rendering(obj, true);
2979 if (ret) 3040 if (ret)
2980 return ret; 3041 return ret;
@@ -3069,7 +3130,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3069 if (offset == 0 && size == obj->base.size) 3130 if (offset == 0 && size == obj->base.size)
3070 return i915_gem_object_set_to_cpu_domain(obj, 0); 3131 return i915_gem_object_set_to_cpu_domain(obj, 0);
3071 3132
3072 i915_gem_object_flush_gpu_write_domain(obj); 3133 ret = i915_gem_object_flush_gpu_write_domain(obj);
3134 if (ret)
3135 return ret;
3136
3073 ret = i915_gem_object_wait_rendering(obj, true); 3137 ret = i915_gem_object_wait_rendering(obj, true);
3074 if (ret) 3138 if (ret)
3075 return ret; 3139 return ret;
@@ -3362,8 +3426,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3362 * flush earlier is beneficial. 3426 * flush earlier is beneficial.
3363 */ 3427 */
3364 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3428 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3365 i915_gem_flush_ring(dev, obj->ring, 3429 ret = i915_gem_flush_ring(dev, obj->ring,
3366 0, obj->base.write_domain); 3430 0, obj->base.write_domain);
3367 } else if (obj->ring->outstanding_lazy_request == 3431 } else if (obj->ring->outstanding_lazy_request ==
3368 obj->last_rendering_seqno) { 3432 obj->last_rendering_seqno) {
3369 struct drm_i915_gem_request *request; 3433 struct drm_i915_gem_request *request;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 78b8cf90c922..3d39005540aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -127,9 +127,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
127 } 127 }
128 128
129 /* Nothing found, clean up and bail out! */ 129 /* Nothing found, clean up and bail out! */
130 list_for_each_entry(obj, &unwind_list, exec_list) { 130 while (!list_empty(&unwind_list)) {
131 obj = list_first_entry(&unwind_list,
132 struct drm_i915_gem_object,
133 exec_list);
134
131 ret = drm_mm_scan_remove_block(obj->gtt_space); 135 ret = drm_mm_scan_remove_block(obj->gtt_space);
132 BUG_ON(ret); 136 BUG_ON(ret);
137
138 list_del_init(&obj->exec_list);
133 drm_gem_object_unreference(&obj->base); 139 drm_gem_object_unreference(&obj->base);
134 } 140 }
135 141
@@ -162,6 +168,7 @@ found:
162 exec_list); 168 exec_list);
163 if (ret == 0) 169 if (ret == 0)
164 ret = i915_gem_object_unbind(obj); 170 ret = i915_gem_object_unbind(obj);
171
165 list_del_init(&obj->exec_list); 172 list_del_init(&obj->exec_list);
166 drm_gem_object_unreference(&obj->base); 173 drm_gem_object_unreference(&obj->base);
167 } 174 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 61129e6759eb..e69834341ef0 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -268,7 +268,6 @@ eb_destroy(struct eb_objects *eb)
268static int 268static int
269i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 269i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
270 struct eb_objects *eb, 270 struct eb_objects *eb,
271 struct drm_i915_gem_exec_object2 *entry,
272 struct drm_i915_gem_relocation_entry *reloc) 271 struct drm_i915_gem_relocation_entry *reloc)
273{ 272{
274 struct drm_device *dev = obj->base.dev; 273 struct drm_device *dev = obj->base.dev;
@@ -411,10 +410,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
411 410
412static int 411static int
413i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 412i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
414 struct eb_objects *eb, 413 struct eb_objects *eb)
415 struct drm_i915_gem_exec_object2 *entry)
416{ 414{
417 struct drm_i915_gem_relocation_entry __user *user_relocs; 415 struct drm_i915_gem_relocation_entry __user *user_relocs;
416 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
418 int i, ret; 417 int i, ret;
419 418
420 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; 419 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
@@ -426,7 +425,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
426 sizeof(reloc))) 425 sizeof(reloc)))
427 return -EFAULT; 426 return -EFAULT;
428 427
429 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc); 428 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
430 if (ret) 429 if (ret)
431 return ret; 430 return ret;
432 431
@@ -442,13 +441,13 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
442static int 441static int
443i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, 442i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
444 struct eb_objects *eb, 443 struct eb_objects *eb,
445 struct drm_i915_gem_exec_object2 *entry,
446 struct drm_i915_gem_relocation_entry *relocs) 444 struct drm_i915_gem_relocation_entry *relocs)
447{ 445{
446 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
448 int i, ret; 447 int i, ret;
449 448
450 for (i = 0; i < entry->relocation_count; i++) { 449 for (i = 0; i < entry->relocation_count; i++) {
451 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]); 450 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
452 if (ret) 451 if (ret)
453 return ret; 452 return ret;
454 } 453 }
@@ -459,8 +458,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
459static int 458static int
460i915_gem_execbuffer_relocate(struct drm_device *dev, 459i915_gem_execbuffer_relocate(struct drm_device *dev,
461 struct eb_objects *eb, 460 struct eb_objects *eb,
462 struct list_head *objects, 461 struct list_head *objects)
463 struct drm_i915_gem_exec_object2 *exec)
464{ 462{
465 struct drm_i915_gem_object *obj; 463 struct drm_i915_gem_object *obj;
466 int ret; 464 int ret;
@@ -468,7 +466,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
468 list_for_each_entry(obj, objects, exec_list) { 466 list_for_each_entry(obj, objects, exec_list) {
469 obj->base.pending_read_domains = 0; 467 obj->base.pending_read_domains = 0;
470 obj->base.pending_write_domain = 0; 468 obj->base.pending_write_domain = 0;
471 ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++); 469 ret = i915_gem_execbuffer_relocate_object(obj, eb);
472 if (ret) 470 if (ret)
473 return ret; 471 return ret;
474 } 472 }
@@ -479,13 +477,36 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
479static int 477static int
480i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 478i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
481 struct drm_file *file, 479 struct drm_file *file,
482 struct list_head *objects, 480 struct list_head *objects)
483 struct drm_i915_gem_exec_object2 *exec)
484{ 481{
485 struct drm_i915_gem_object *obj; 482 struct drm_i915_gem_object *obj;
486 struct drm_i915_gem_exec_object2 *entry;
487 int ret, retry; 483 int ret, retry;
488 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 484 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
485 struct list_head ordered_objects;
486
487 INIT_LIST_HEAD(&ordered_objects);
488 while (!list_empty(objects)) {
489 struct drm_i915_gem_exec_object2 *entry;
490 bool need_fence, need_mappable;
491
492 obj = list_first_entry(objects,
493 struct drm_i915_gem_object,
494 exec_list);
495 entry = obj->exec_entry;
496
497 need_fence =
498 has_fenced_gpu_access &&
499 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
500 obj->tiling_mode != I915_TILING_NONE;
501 need_mappable =
502 entry->relocation_count ? true : need_fence;
503
504 if (need_mappable)
505 list_move(&obj->exec_list, &ordered_objects);
506 else
507 list_move_tail(&obj->exec_list, &ordered_objects);
508 }
509 list_splice(&ordered_objects, objects);
489 510
490 /* Attempt to pin all of the buffers into the GTT. 511 /* Attempt to pin all of the buffers into the GTT.
491 * This is done in 3 phases: 512 * This is done in 3 phases:
@@ -504,14 +525,11 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
504 ret = 0; 525 ret = 0;
505 526
506 /* Unbind any ill-fitting objects or pin. */ 527 /* Unbind any ill-fitting objects or pin. */
507 entry = exec;
508 list_for_each_entry(obj, objects, exec_list) { 528 list_for_each_entry(obj, objects, exec_list) {
529 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
509 bool need_fence, need_mappable; 530 bool need_fence, need_mappable;
510 531 if (!obj->gtt_space)
511 if (!obj->gtt_space) {
512 entry++;
513 continue; 532 continue;
514 }
515 533
516 need_fence = 534 need_fence =
517 has_fenced_gpu_access && 535 has_fenced_gpu_access &&
@@ -534,8 +552,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
534 } 552 }
535 553
536 /* Bind fresh objects */ 554 /* Bind fresh objects */
537 entry = exec;
538 list_for_each_entry(obj, objects, exec_list) { 555 list_for_each_entry(obj, objects, exec_list) {
556 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
539 bool need_fence; 557 bool need_fence;
540 558
541 need_fence = 559 need_fence =
@@ -570,7 +588,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
570 } 588 }
571 589
572 entry->offset = obj->gtt_offset; 590 entry->offset = obj->gtt_offset;
573 entry++;
574 } 591 }
575 592
576 /* Decrement pin count for bound objects */ 593 /* Decrement pin count for bound objects */
@@ -622,7 +639,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
622 int i, total, ret; 639 int i, total, ret;
623 640
624 /* We may process another execbuffer during the unlock... */ 641 /* We may process another execbuffer during the unlock... */
625 while (list_empty(objects)) { 642 while (!list_empty(objects)) {
626 obj = list_first_entry(objects, 643 obj = list_first_entry(objects,
627 struct drm_i915_gem_object, 644 struct drm_i915_gem_object,
628 exec_list); 645 exec_list);
@@ -665,7 +682,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
665 } 682 }
666 683
667 /* reacquire the objects */ 684 /* reacquire the objects */
668 INIT_LIST_HEAD(objects);
669 eb_reset(eb); 685 eb_reset(eb);
670 for (i = 0; i < count; i++) { 686 for (i = 0; i < count; i++) {
671 struct drm_i915_gem_object *obj; 687 struct drm_i915_gem_object *obj;
@@ -681,10 +697,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
681 697
682 list_add_tail(&obj->exec_list, objects); 698 list_add_tail(&obj->exec_list, objects);
683 obj->exec_handle = exec[i].handle; 699 obj->exec_handle = exec[i].handle;
700 obj->exec_entry = &exec[i];
684 eb_add_object(eb, obj); 701 eb_add_object(eb, obj);
685 } 702 }
686 703
687 ret = i915_gem_execbuffer_reserve(ring, file, objects, exec); 704 ret = i915_gem_execbuffer_reserve(ring, file, objects);
688 if (ret) 705 if (ret)
689 goto err; 706 goto err;
690 707
@@ -693,7 +710,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
693 obj->base.pending_read_domains = 0; 710 obj->base.pending_read_domains = 0;
694 obj->base.pending_write_domain = 0; 711 obj->base.pending_write_domain = 0;
695 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 712 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
696 exec,
697 reloc + total); 713 reloc + total);
698 if (ret) 714 if (ret)
699 goto err; 715 goto err;
@@ -713,25 +729,34 @@ err:
713 return ret; 729 return ret;
714} 730}
715 731
716static void 732static int
717i915_gem_execbuffer_flush(struct drm_device *dev, 733i915_gem_execbuffer_flush(struct drm_device *dev,
718 uint32_t invalidate_domains, 734 uint32_t invalidate_domains,
719 uint32_t flush_domains, 735 uint32_t flush_domains,
720 uint32_t flush_rings) 736 uint32_t flush_rings)
721{ 737{
722 drm_i915_private_t *dev_priv = dev->dev_private; 738 drm_i915_private_t *dev_priv = dev->dev_private;
723 int i; 739 int i, ret;
724 740
725 if (flush_domains & I915_GEM_DOMAIN_CPU) 741 if (flush_domains & I915_GEM_DOMAIN_CPU)
726 intel_gtt_chipset_flush(); 742 intel_gtt_chipset_flush();
727 743
744 if (flush_domains & I915_GEM_DOMAIN_GTT)
745 wmb();
746
728 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { 747 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
729 for (i = 0; i < I915_NUM_RINGS; i++) 748 for (i = 0; i < I915_NUM_RINGS; i++)
730 if (flush_rings & (1 << i)) 749 if (flush_rings & (1 << i)) {
731 i915_gem_flush_ring(dev, &dev_priv->ring[i], 750 ret = i915_gem_flush_ring(dev,
732 invalidate_domains, 751 &dev_priv->ring[i],
733 flush_domains); 752 invalidate_domains,
753 flush_domains);
754 if (ret)
755 return ret;
756 }
734 } 757 }
758
759 return 0;
735} 760}
736 761
737static int 762static int
@@ -795,10 +820,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
795 cd.invalidate_domains, 820 cd.invalidate_domains,
796 cd.flush_domains); 821 cd.flush_domains);
797#endif 822#endif
798 i915_gem_execbuffer_flush(ring->dev, 823 ret = i915_gem_execbuffer_flush(ring->dev,
799 cd.invalidate_domains, 824 cd.invalidate_domains,
800 cd.flush_domains, 825 cd.flush_domains,
801 cd.flush_rings); 826 cd.flush_rings);
827 if (ret)
828 return ret;
802 } 829 }
803 830
804 list_for_each_entry(obj, objects, exec_list) { 831 list_for_each_entry(obj, objects, exec_list) {
@@ -921,7 +948,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
921 struct intel_ring_buffer *ring) 948 struct intel_ring_buffer *ring)
922{ 949{
923 struct drm_i915_gem_request *request; 950 struct drm_i915_gem_request *request;
924 u32 flush_domains; 951 u32 invalidate;
925 952
926 /* 953 /*
927 * Ensure that the commands in the batch buffer are 954 * Ensure that the commands in the batch buffer are
@@ -929,11 +956,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
929 * 956 *
930 * The sampler always gets flushed on i965 (sigh). 957 * The sampler always gets flushed on i965 (sigh).
931 */ 958 */
932 flush_domains = 0; 959 invalidate = I915_GEM_DOMAIN_COMMAND;
933 if (INTEL_INFO(dev)->gen >= 4) 960 if (INTEL_INFO(dev)->gen >= 4)
934 flush_domains |= I915_GEM_DOMAIN_SAMPLER; 961 invalidate |= I915_GEM_DOMAIN_SAMPLER;
935 962 if (ring->flush(ring, invalidate, 0)) {
936 ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); 963 i915_gem_next_request_seqno(dev, ring);
964 return;
965 }
937 966
938 /* Add a breadcrumb for the completion of the batch buffer */ 967 /* Add a breadcrumb for the completion of the batch buffer */
939 request = kzalloc(sizeof(*request), GFP_KERNEL); 968 request = kzalloc(sizeof(*request), GFP_KERNEL);
@@ -1098,16 +1127,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1098 1127
1099 list_add_tail(&obj->exec_list, &objects); 1128 list_add_tail(&obj->exec_list, &objects);
1100 obj->exec_handle = exec[i].handle; 1129 obj->exec_handle = exec[i].handle;
1130 obj->exec_entry = &exec[i];
1101 eb_add_object(eb, obj); 1131 eb_add_object(eb, obj);
1102 } 1132 }
1103 1133
1134 /* take note of the batch buffer before we might reorder the lists */
1135 batch_obj = list_entry(objects.prev,
1136 struct drm_i915_gem_object,
1137 exec_list);
1138
1104 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1139 /* Move the objects en-masse into the GTT, evicting if necessary. */
1105 ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec); 1140 ret = i915_gem_execbuffer_reserve(ring, file, &objects);
1106 if (ret) 1141 if (ret)
1107 goto err; 1142 goto err;
1108 1143
1109 /* The objects are in their final locations, apply the relocations. */ 1144 /* The objects are in their final locations, apply the relocations. */
1110 ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec); 1145 ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
1111 if (ret) { 1146 if (ret) {
1112 if (ret == -EFAULT) { 1147 if (ret == -EFAULT) {
1113 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, 1148 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
@@ -1121,9 +1156,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1121 } 1156 }
1122 1157
1123 /* Set the pending read domains for the batch buffer to COMMAND */ 1158 /* Set the pending read domains for the batch buffer to COMMAND */
1124 batch_obj = list_entry(objects.prev,
1125 struct drm_i915_gem_object,
1126 exec_list);
1127 if (batch_obj->base.pending_write_domain) { 1159 if (batch_obj->base.pending_write_domain) {
1128 DRM_ERROR("Attempting to use self-modifying batch buffer\n"); 1160 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
1129 ret = -EINVAL; 1161 ret = -EINVAL;
@@ -1340,4 +1372,3 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1340 drm_free_large(exec2_list); 1372 drm_free_large(exec2_list);
1341 return ret; 1373 return ret;
1342} 1374}
1343
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 86673e77d7cb..70433ae50ac8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -85,15 +85,11 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
85 85
86void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 86void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
87{ 87{
88 struct drm_device *dev = obj->base.dev; 88 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
89 struct drm_i915_private *dev_priv = dev->dev_private; 89 obj->base.size >> PAGE_SHIFT);
90 90
91 if (dev_priv->mm.gtt->needs_dmar) { 91 if (obj->sg_list) {
92 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); 92 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
93 obj->sg_list = NULL; 93 obj->sg_list = NULL;
94 obj->num_sg = 0;
95 } 94 }
96
97 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
98 obj->base.size >> PAGE_SHIFT);
99} 95}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0dadc025b77b..e418e8bb61e6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -64,26 +64,6 @@
64#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ 64#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
65 DRM_I915_VBLANK_PIPE_B) 65 DRM_I915_VBLANK_PIPE_B)
66 66
67void
68ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
69{
70 if ((dev_priv->gt_irq_mask & mask) != 0) {
71 dev_priv->gt_irq_mask &= ~mask;
72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
73 POSTING_READ(GTIMR);
74 }
75}
76
77void
78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
79{
80 if ((dev_priv->gt_irq_mask & mask) != mask) {
81 dev_priv->gt_irq_mask |= mask;
82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
83 POSTING_READ(GTIMR);
84 }
85}
86
87/* For display hotplug interrupt */ 67/* For display hotplug interrupt */
88static void 68static void
89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 69ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -105,26 +85,6 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
105 } 85 }
106} 86}
107 87
108void
109i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
110{
111 if ((dev_priv->irq_mask & mask) != 0) {
112 dev_priv->irq_mask &= ~mask;
113 I915_WRITE(IMR, dev_priv->irq_mask);
114 POSTING_READ(IMR);
115 }
116}
117
118void
119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
120{
121 if ((dev_priv->irq_mask & mask) != mask) {
122 dev_priv->irq_mask |= mask;
123 I915_WRITE(IMR, dev_priv->irq_mask);
124 POSTING_READ(IMR);
125 }
126}
127
128static inline u32 88static inline u32
129i915_pipestat(int pipe) 89i915_pipestat(int pipe)
130{ 90{
@@ -389,9 +349,12 @@ static void notify_ring(struct drm_device *dev,
389{ 349{
390 struct drm_i915_private *dev_priv = dev->dev_private; 350 struct drm_i915_private *dev_priv = dev->dev_private;
391 u32 seqno = ring->get_seqno(ring); 351 u32 seqno = ring->get_seqno(ring);
392 ring->irq_seqno = seqno; 352
393 trace_i915_gem_request_complete(dev, seqno); 353 trace_i915_gem_request_complete(dev, seqno);
354
355 ring->irq_seqno = seqno;
394 wake_up_all(&ring->irq_queue); 356 wake_up_all(&ring->irq_queue);
357
395 dev_priv->hangcheck_count = 0; 358 dev_priv->hangcheck_count = 0;
396 mod_timer(&dev_priv->hangcheck_timer, 359 mod_timer(&dev_priv->hangcheck_timer,
397 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 360 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -435,6 +398,50 @@ static void gen6_pm_irq_handler(struct drm_device *dev)
435 I915_WRITE(GEN6_PMIIR, pm_iir); 398 I915_WRITE(GEN6_PMIIR, pm_iir);
436} 399}
437 400
401static void pch_irq_handler(struct drm_device *dev)
402{
403 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
404 u32 pch_iir;
405
406 pch_iir = I915_READ(SDEIIR);
407
408 if (pch_iir & SDE_AUDIO_POWER_MASK)
409 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
410 (pch_iir & SDE_AUDIO_POWER_MASK) >>
411 SDE_AUDIO_POWER_SHIFT);
412
413 if (pch_iir & SDE_GMBUS)
414 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
415
416 if (pch_iir & SDE_AUDIO_HDCP_MASK)
417 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
418
419 if (pch_iir & SDE_AUDIO_TRANS_MASK)
420 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
421
422 if (pch_iir & SDE_POISON)
423 DRM_ERROR("PCH poison interrupt\n");
424
425 if (pch_iir & SDE_FDI_MASK) {
426 u32 fdia, fdib;
427
428 fdia = I915_READ(FDI_RXA_IIR);
429 fdib = I915_READ(FDI_RXB_IIR);
430 DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
431 }
432
433 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
434 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
435
436 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
437 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
438
439 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
440 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
441 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
442 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
443}
444
438static irqreturn_t ironlake_irq_handler(struct drm_device *dev) 445static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
439{ 446{
440 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 447 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -502,8 +509,11 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
502 drm_handle_vblank(dev, 1); 509 drm_handle_vblank(dev, 1);
503 510
504 /* check event from PCH */ 511 /* check event from PCH */
505 if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) 512 if (de_iir & DE_PCH_EVENT) {
506 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 513 if (pch_iir & hotplug_mask)
514 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
515 pch_irq_handler(dev);
516 }
507 517
508 if (de_iir & DE_PCU_EVENT) { 518 if (de_iir & DE_PCU_EVENT) {
509 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 519 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -556,10 +566,9 @@ static void i915_error_work_func(struct work_struct *work)
556 566
557#ifdef CONFIG_DEBUG_FS 567#ifdef CONFIG_DEBUG_FS
558static struct drm_i915_error_object * 568static struct drm_i915_error_object *
559i915_error_object_create(struct drm_device *dev, 569i915_error_object_create(struct drm_i915_private *dev_priv,
560 struct drm_i915_gem_object *src) 570 struct drm_i915_gem_object *src)
561{ 571{
562 drm_i915_private_t *dev_priv = dev->dev_private;
563 struct drm_i915_error_object *dst; 572 struct drm_i915_error_object *dst;
564 int page, page_count; 573 int page, page_count;
565 u32 reloc_offset; 574 u32 reloc_offset;
@@ -632,52 +641,6 @@ i915_error_state_free(struct drm_device *dev,
632 kfree(error); 641 kfree(error);
633} 642}
634 643
635static u32
636i915_get_bbaddr(struct drm_device *dev, u32 *ring)
637{
638 u32 cmd;
639
640 if (IS_I830(dev) || IS_845G(dev))
641 cmd = MI_BATCH_BUFFER;
642 else if (INTEL_INFO(dev)->gen >= 4)
643 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
644 MI_BATCH_NON_SECURE_I965);
645 else
646 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
647
648 return ring[0] == cmd ? ring[1] : 0;
649}
650
651static u32
652i915_ringbuffer_last_batch(struct drm_device *dev,
653 struct intel_ring_buffer *ring)
654{
655 struct drm_i915_private *dev_priv = dev->dev_private;
656 u32 head, bbaddr;
657 u32 *val;
658
659 /* Locate the current position in the ringbuffer and walk back
660 * to find the most recently dispatched batch buffer.
661 */
662 head = I915_READ_HEAD(ring) & HEAD_ADDR;
663
664 val = (u32 *)(ring->virtual_start + head);
665 while (--val >= (u32 *)ring->virtual_start) {
666 bbaddr = i915_get_bbaddr(dev, val);
667 if (bbaddr)
668 return bbaddr;
669 }
670
671 val = (u32 *)(ring->virtual_start + ring->size);
672 while (--val >= (u32 *)ring->virtual_start) {
673 bbaddr = i915_get_bbaddr(dev, val);
674 if (bbaddr)
675 return bbaddr;
676 }
677
678 return 0;
679}
680
681static u32 capture_bo_list(struct drm_i915_error_buffer *err, 644static u32 capture_bo_list(struct drm_i915_error_buffer *err,
682 int count, 645 int count,
683 struct list_head *head) 646 struct list_head *head)
@@ -702,6 +665,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
702 err->dirty = obj->dirty; 665 err->dirty = obj->dirty;
703 err->purgeable = obj->madv != I915_MADV_WILLNEED; 666 err->purgeable = obj->madv != I915_MADV_WILLNEED;
704 err->ring = obj->ring ? obj->ring->id : 0; 667 err->ring = obj->ring ? obj->ring->id : 0;
668 err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY;
705 669
706 if (++i == count) 670 if (++i == count)
707 break; 671 break;
@@ -741,6 +705,36 @@ static void i915_gem_record_fences(struct drm_device *dev,
741 } 705 }
742} 706}
743 707
708static struct drm_i915_error_object *
709i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
710 struct intel_ring_buffer *ring)
711{
712 struct drm_i915_gem_object *obj;
713 u32 seqno;
714
715 if (!ring->get_seqno)
716 return NULL;
717
718 seqno = ring->get_seqno(ring);
719 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
720 if (obj->ring != ring)
721 continue;
722
723 if (!i915_seqno_passed(obj->last_rendering_seqno, seqno))
724 continue;
725
726 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
727 continue;
728
729 /* We need to copy these to an anonymous buffer as the simplest
730 * method to avoid being overwritten by userspace.
731 */
732 return i915_error_object_create(dev_priv, obj);
733 }
734
735 return NULL;
736}
737
744/** 738/**
745 * i915_capture_error_state - capture an error record for later analysis 739 * i915_capture_error_state - capture an error record for later analysis
746 * @dev: drm device 740 * @dev: drm device
@@ -755,10 +749,8 @@ static void i915_capture_error_state(struct drm_device *dev)
755 struct drm_i915_private *dev_priv = dev->dev_private; 749 struct drm_i915_private *dev_priv = dev->dev_private;
756 struct drm_i915_gem_object *obj; 750 struct drm_i915_gem_object *obj;
757 struct drm_i915_error_state *error; 751 struct drm_i915_error_state *error;
758 struct drm_i915_gem_object *batchbuffer[2];
759 unsigned long flags; 752 unsigned long flags;
760 u32 bbaddr; 753 int i;
761 int count;
762 754
763 spin_lock_irqsave(&dev_priv->error_lock, flags); 755 spin_lock_irqsave(&dev_priv->error_lock, flags);
764 error = dev_priv->first_error; 756 error = dev_priv->first_error;
@@ -817,83 +809,30 @@ static void i915_capture_error_state(struct drm_device *dev)
817 } 809 }
818 i915_gem_record_fences(dev, error); 810 i915_gem_record_fences(dev, error);
819 811
820 bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]); 812 /* Record the active batchbuffers */
821 813 for (i = 0; i < I915_NUM_RINGS; i++)
822 /* Grab the current batchbuffer, most likely to have crashed. */ 814 error->batchbuffer[i] =
823 batchbuffer[0] = NULL; 815 i915_error_first_batchbuffer(dev_priv,
824 batchbuffer[1] = NULL; 816 &dev_priv->ring[i]);
825 count = 0;
826 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
827 if (batchbuffer[0] == NULL &&
828 bbaddr >= obj->gtt_offset &&
829 bbaddr < obj->gtt_offset + obj->base.size)
830 batchbuffer[0] = obj;
831
832 if (batchbuffer[1] == NULL &&
833 error->acthd >= obj->gtt_offset &&
834 error->acthd < obj->gtt_offset + obj->base.size)
835 batchbuffer[1] = obj;
836
837 count++;
838 }
839 /* Scan the other lists for completeness for those bizarre errors. */
840 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
841 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
842 if (batchbuffer[0] == NULL &&
843 bbaddr >= obj->gtt_offset &&
844 bbaddr < obj->gtt_offset + obj->base.size)
845 batchbuffer[0] = obj;
846
847 if (batchbuffer[1] == NULL &&
848 error->acthd >= obj->gtt_offset &&
849 error->acthd < obj->gtt_offset + obj->base.size)
850 batchbuffer[1] = obj;
851
852 if (batchbuffer[0] && batchbuffer[1])
853 break;
854 }
855 }
856 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
857 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
858 if (batchbuffer[0] == NULL &&
859 bbaddr >= obj->gtt_offset &&
860 bbaddr < obj->gtt_offset + obj->base.size)
861 batchbuffer[0] = obj;
862
863 if (batchbuffer[1] == NULL &&
864 error->acthd >= obj->gtt_offset &&
865 error->acthd < obj->gtt_offset + obj->base.size)
866 batchbuffer[1] = obj;
867
868 if (batchbuffer[0] && batchbuffer[1])
869 break;
870 }
871 }
872
873 /* We need to copy these to an anonymous buffer as the simplest
874 * method to avoid being overwritten by userspace.
875 */
876 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
877 if (batchbuffer[1] != batchbuffer[0])
878 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
879 else
880 error->batchbuffer[1] = NULL;
881 817
882 /* Record the ringbuffer */ 818 /* Record the ringbuffer */
883 error->ringbuffer = i915_error_object_create(dev, 819 error->ringbuffer = i915_error_object_create(dev_priv,
884 dev_priv->ring[RCS].obj); 820 dev_priv->ring[RCS].obj);
885 821
886 /* Record buffers on the active and pinned lists. */ 822 /* Record buffers on the active and pinned lists. */
887 error->active_bo = NULL; 823 error->active_bo = NULL;
888 error->pinned_bo = NULL; 824 error->pinned_bo = NULL;
889 825
890 error->active_bo_count = count; 826 i = 0;
827 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
828 i++;
829 error->active_bo_count = i;
891 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 830 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
892 count++; 831 i++;
893 error->pinned_bo_count = count - error->active_bo_count; 832 error->pinned_bo_count = i - error->active_bo_count;
894 833
895 if (count) { 834 if (i) {
896 error->active_bo = kmalloc(sizeof(*error->active_bo)*count, 835 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
897 GFP_ATOMIC); 836 GFP_ATOMIC);
898 if (error->active_bo) 837 if (error->active_bo)
899 error->pinned_bo = 838 error->pinned_bo =
@@ -1673,11 +1612,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1673 1612
1674 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1613 I915_WRITE(GTIIR, I915_READ(GTIIR));
1675 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1614 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1676 if (IS_GEN6(dev)) {
1677 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
1678 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
1679 I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
1680 }
1681 1615
1682 if (IS_GEN6(dev)) 1616 if (IS_GEN6(dev))
1683 render_irqs = 1617 render_irqs =
@@ -1698,6 +1632,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1698 } else { 1632 } else {
1699 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1633 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1700 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1634 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1635 hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
1636 I915_WRITE(FDI_RXA_IMR, 0);
1637 I915_WRITE(FDI_RXB_IMR, 0);
1701 } 1638 }
1702 1639
1703 dev_priv->pch_irq_mask = ~hotplug_mask; 1640 dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 8f948a6fbc1c..40a407f41f61 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -145,6 +145,8 @@
145#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 145#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
146#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ 146#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
147#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 147#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
148#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
149#define MI_SUSPEND_FLUSH_EN (1<<0)
148#define MI_REPORT_HEAD MI_INSTR(0x07, 0) 150#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
149#define MI_OVERLAY_FLIP MI_INSTR(0x11,0) 151#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
150#define MI_OVERLAY_CONTINUE (0x0<<21) 152#define MI_OVERLAY_CONTINUE (0x0<<21)
@@ -159,6 +161,7 @@
159#define MI_MM_SPACE_PHYSICAL (0<<8) 161#define MI_MM_SPACE_PHYSICAL (0<<8)
160#define MI_SAVE_EXT_STATE_EN (1<<3) 162#define MI_SAVE_EXT_STATE_EN (1<<3)
161#define MI_RESTORE_EXT_STATE_EN (1<<2) 163#define MI_RESTORE_EXT_STATE_EN (1<<2)
164#define MI_FORCE_RESTORE (1<<1)
162#define MI_RESTORE_INHIBIT (1<<0) 165#define MI_RESTORE_INHIBIT (1<<0)
163#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 166#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
164#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 167#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
@@ -288,6 +291,7 @@
288#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 291#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
289#define RING_ACTHD(base) ((base)+0x74) 292#define RING_ACTHD(base) ((base)+0x74)
290#define RING_NOPID(base) ((base)+0x94) 293#define RING_NOPID(base) ((base)+0x94)
294#define RING_IMR(base) ((base)+0xa8)
291#define TAIL_ADDR 0x001FFFF8 295#define TAIL_ADDR 0x001FFFF8
292#define HEAD_WRAP_COUNT 0xFFE00000 296#define HEAD_WRAP_COUNT 0xFFE00000
293#define HEAD_WRAP_ONE 0x00200000 297#define HEAD_WRAP_ONE 0x00200000
@@ -1130,9 +1134,50 @@
1130#define RCBMINAVG 0x111a0 1134#define RCBMINAVG 0x111a0
1131#define RCUPEI 0x111b0 1135#define RCUPEI 0x111b0
1132#define RCDNEI 0x111b4 1136#define RCDNEI 0x111b4
1133#define MCHBAR_RENDER_STANDBY 0x111b8 1137#define RSTDBYCTL 0x111b8
1134#define RCX_SW_EXIT (1<<23) 1138#define RS1EN (1<<31)
1135#define RSX_STATUS_MASK 0x00700000 1139#define RS2EN (1<<30)
1140#define RS3EN (1<<29)
1141#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
1142#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
1143#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
1144#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
1145#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
1146#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
1147#define RSX_STATUS_MASK (7<<20)
1148#define RSX_STATUS_ON (0<<20)
1149#define RSX_STATUS_RC1 (1<<20)
1150#define RSX_STATUS_RC1E (2<<20)
1151#define RSX_STATUS_RS1 (3<<20)
1152#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
1153#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
1154#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
1155#define RSX_STATUS_RSVD2 (7<<20)
1156#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
1157#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
1158#define JRSC (1<<17) /* rsx coupled to cpu c-state */
1159#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
1160#define RS1CONTSAV_MASK (3<<14)
1161#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
1162#define RS1CONTSAV_RSVD (1<<14)
1163#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
1164#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
1165#define NORMSLEXLAT_MASK (3<<12)
1166#define SLOW_RS123 (0<<12)
1167#define SLOW_RS23 (1<<12)
1168#define SLOW_RS3 (2<<12)
1169#define NORMAL_RS123 (3<<12)
1170#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
1171#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
1172#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
1173#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
1174#define RS_CSTATE_MASK (3<<4)
1175#define RS_CSTATE_C367_RS1 (0<<4)
1176#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
1177#define RS_CSTATE_RSVD (2<<4)
1178#define RS_CSTATE_C367_RS2 (3<<4)
1179#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
1180#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
1136#define VIDCTL 0x111c0 1181#define VIDCTL 0x111c0
1137#define VIDSTS 0x111c8 1182#define VIDSTS 0x111c8
1138#define VIDSTART 0x111cc /* 8 bits */ 1183#define VIDSTART 0x111cc /* 8 bits */
@@ -2345,8 +2390,13 @@
2345 2390
2346/* Memory latency timer register */ 2391/* Memory latency timer register */
2347#define MLTR_ILK 0x11222 2392#define MLTR_ILK 0x11222
2393#define MLTR_WM1_SHIFT 0
2394#define MLTR_WM2_SHIFT 8
2348/* the unit of memory self-refresh latency time is 0.5us */ 2395/* the unit of memory self-refresh latency time is 0.5us */
2349#define ILK_SRLT_MASK 0x3f 2396#define ILK_SRLT_MASK 0x3f
2397#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
2398#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
2399#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
2350 2400
2351/* define the fifo size on Ironlake */ 2401/* define the fifo size on Ironlake */
2352#define ILK_DISPLAY_FIFO 128 2402#define ILK_DISPLAY_FIFO 128
@@ -2728,12 +2778,41 @@
2728/* PCH */ 2778/* PCH */
2729 2779
2730/* south display engine interrupt */ 2780/* south display engine interrupt */
2781#define SDE_AUDIO_POWER_D (1 << 27)
2782#define SDE_AUDIO_POWER_C (1 << 26)
2783#define SDE_AUDIO_POWER_B (1 << 25)
2784#define SDE_AUDIO_POWER_SHIFT (25)
2785#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
2786#define SDE_GMBUS (1 << 24)
2787#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
2788#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
2789#define SDE_AUDIO_HDCP_MASK (3 << 22)
2790#define SDE_AUDIO_TRANSB (1 << 21)
2791#define SDE_AUDIO_TRANSA (1 << 20)
2792#define SDE_AUDIO_TRANS_MASK (3 << 20)
2793#define SDE_POISON (1 << 19)
2794/* 18 reserved */
2795#define SDE_FDI_RXB (1 << 17)
2796#define SDE_FDI_RXA (1 << 16)
2797#define SDE_FDI_MASK (3 << 16)
2798#define SDE_AUXD (1 << 15)
2799#define SDE_AUXC (1 << 14)
2800#define SDE_AUXB (1 << 13)
2801#define SDE_AUX_MASK (7 << 13)
2802/* 12 reserved */
2731#define SDE_CRT_HOTPLUG (1 << 11) 2803#define SDE_CRT_HOTPLUG (1 << 11)
2732#define SDE_PORTD_HOTPLUG (1 << 10) 2804#define SDE_PORTD_HOTPLUG (1 << 10)
2733#define SDE_PORTC_HOTPLUG (1 << 9) 2805#define SDE_PORTC_HOTPLUG (1 << 9)
2734#define SDE_PORTB_HOTPLUG (1 << 8) 2806#define SDE_PORTB_HOTPLUG (1 << 8)
2735#define SDE_SDVOB_HOTPLUG (1 << 6) 2807#define SDE_SDVOB_HOTPLUG (1 << 6)
2736#define SDE_HOTPLUG_MASK (0xf << 8) 2808#define SDE_HOTPLUG_MASK (0xf << 8)
2809#define SDE_TRANSB_CRC_DONE (1 << 5)
2810#define SDE_TRANSB_CRC_ERR (1 << 4)
2811#define SDE_TRANSB_FIFO_UNDER (1 << 3)
2812#define SDE_TRANSA_CRC_DONE (1 << 2)
2813#define SDE_TRANSA_CRC_ERR (1 << 1)
2814#define SDE_TRANSA_FIFO_UNDER (1 << 0)
2815#define SDE_TRANS_MASK (0x3f)
2737/* CPT */ 2816/* CPT */
2738#define SDE_CRT_HOTPLUG_CPT (1 << 19) 2817#define SDE_CRT_HOTPLUG_CPT (1 << 19)
2739#define SDE_PORTD_HOTPLUG_CPT (1 << 23) 2818#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
@@ -3174,10 +3253,11 @@
3174#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) 3253#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
3175#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) 3254#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
3176/* SNB B-stepping */ 3255/* SNB B-stepping */
3177#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) 3256#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
3178#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) 3257#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
3179#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) 3258#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
3180#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) 3259#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
3260#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
3181#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) 3261#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
3182 3262
3183#define FORCEWAKE 0xA18C 3263#define FORCEWAKE 0xA18C
@@ -3239,6 +3319,7 @@
3239 3319
3240#define GEN6_PCODE_MAILBOX 0x138124 3320#define GEN6_PCODE_MAILBOX 0x138124
3241#define GEN6_PCODE_READY (1<<31) 3321#define GEN6_PCODE_READY (1<<31)
3322#define GEN6_READ_OC_PARAMS 0xc
3242#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 3323#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
3243#define GEN6_PCODE_DATA 0x138128 3324#define GEN6_PCODE_DATA 0x138128
3244 3325
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 410772466fa7..0521ecf26017 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -740,7 +740,7 @@ void i915_restore_display(struct drm_device *dev)
740 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 740 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
741 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 741 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
742 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 742 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
743 I915_WRITE(MCHBAR_RENDER_STANDBY, 743 I915_WRITE(RSTDBYCTL,
744 dev_priv->saveMCHBAR_RENDER_STANDBY); 744 dev_priv->saveMCHBAR_RENDER_STANDBY);
745 } else { 745 } else {
746 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 746 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
@@ -811,7 +811,7 @@ int i915_save_state(struct drm_device *dev)
811 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); 811 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
812 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); 812 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
813 dev_priv->saveMCHBAR_RENDER_STANDBY = 813 dev_priv->saveMCHBAR_RENDER_STANDBY =
814 I915_READ(MCHBAR_RENDER_STANDBY); 814 I915_READ(RSTDBYCTL);
815 } else { 815 } else {
816 dev_priv->saveIER = I915_READ(IER); 816 dev_priv->saveIER = I915_READ(IER);
817 dev_priv->saveIMR = I915_READ(IMR); 817 dev_priv->saveIMR = I915_READ(IMR);
@@ -822,10 +822,6 @@ int i915_save_state(struct drm_device *dev)
822 if (IS_GEN6(dev)) 822 if (IS_GEN6(dev))
823 gen6_disable_rps(dev); 823 gen6_disable_rps(dev);
824 824
825 /* XXX disabling the clock gating breaks suspend on gm45
826 intel_disable_clock_gating(dev);
827 */
828
829 /* Cache mode state */ 825 /* Cache mode state */
830 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 826 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
831 827
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8df574316063..17035b87ee46 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -30,6 +30,7 @@
30#include "drm.h" 30#include "drm.h"
31#include "drm_crtc.h" 31#include "drm_crtc.h"
32#include "drm_crtc_helper.h" 32#include "drm_crtc_helper.h"
33#include "drm_edid.h"
33#include "intel_drv.h" 34#include "intel_drv.h"
34#include "i915_drm.h" 35#include "i915_drm.h"
35#include "i915_drv.h" 36#include "i915_drv.h"
@@ -287,8 +288,9 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
287 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; 288 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
288} 289}
289 290
290static bool intel_crt_detect_ddc(struct intel_crt *crt) 291static bool intel_crt_detect_ddc(struct drm_connector *connector)
291{ 292{
293 struct intel_crt *crt = intel_attached_crt(connector);
292 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; 294 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
293 295
294 /* CRT should always be at 0, but check anyway */ 296 /* CRT should always be at 0, but check anyway */
@@ -301,8 +303,26 @@ static bool intel_crt_detect_ddc(struct intel_crt *crt)
301 } 303 }
302 304
303 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { 305 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
304 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 306 struct edid *edid;
305 return true; 307 bool is_digital = false;
308
309 edid = drm_get_edid(connector,
310 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
311 /*
312 * This may be a DVI-I connector with a shared DDC
313 * link between analog and digital outputs, so we
314 * have to check the EDID input spec of the attached device.
315 */
316 if (edid != NULL) {
317 is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
318 connector->display_info.raw_edid = NULL;
319 kfree(edid);
320 }
321
322 if (!is_digital) {
323 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
324 return true;
325 }
306 } 326 }
307 327
308 return false; 328 return false;
@@ -458,7 +478,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
458 } 478 }
459 } 479 }
460 480
461 if (intel_crt_detect_ddc(crt)) 481 if (intel_crt_detect_ddc(connector))
462 return connector_status_connected; 482 return connector_status_connected;
463 483
464 if (!force) 484 if (!force)
@@ -472,7 +492,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
472 crtc = intel_get_load_detect_pipe(&crt->base, connector, 492 crtc = intel_get_load_detect_pipe(&crt->base, connector,
473 NULL, &dpms_mode); 493 NULL, &dpms_mode);
474 if (crtc) { 494 if (crtc) {
475 if (intel_crt_detect_ddc(crt)) 495 if (intel_crt_detect_ddc(connector))
476 status = connector_status_connected; 496 status = connector_status_connected;
477 else 497 else
478 status = intel_crt_load_detect(crtc, crt); 498 status = intel_crt_load_detect(crtc, crt);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0abe79fb6385..25d96889d7d2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3418,15 +3418,16 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3418static bool ironlake_compute_wm0(struct drm_device *dev, 3418static bool ironlake_compute_wm0(struct drm_device *dev,
3419 int pipe, 3419 int pipe,
3420 const struct intel_watermark_params *display, 3420 const struct intel_watermark_params *display,
3421 int display_latency, 3421 int display_latency_ns,
3422 const struct intel_watermark_params *cursor, 3422 const struct intel_watermark_params *cursor,
3423 int cursor_latency, 3423 int cursor_latency_ns,
3424 int *plane_wm, 3424 int *plane_wm,
3425 int *cursor_wm) 3425 int *cursor_wm)
3426{ 3426{
3427 struct drm_crtc *crtc; 3427 struct drm_crtc *crtc;
3428 int htotal, hdisplay, clock, pixel_size = 0; 3428 int htotal, hdisplay, clock, pixel_size;
3429 int line_time_us, line_count, entries; 3429 int line_time_us, line_count;
3430 int entries, tlb_miss;
3430 3431
3431 crtc = intel_get_crtc_for_pipe(dev, pipe); 3432 crtc = intel_get_crtc_for_pipe(dev, pipe);
3432 if (crtc->fb == NULL || !crtc->enabled) 3433 if (crtc->fb == NULL || !crtc->enabled)
@@ -3438,7 +3439,10 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
3438 pixel_size = crtc->fb->bits_per_pixel / 8; 3439 pixel_size = crtc->fb->bits_per_pixel / 8;
3439 3440
3440 /* Use the small buffer method to calculate plane watermark */ 3441 /* Use the small buffer method to calculate plane watermark */
3441 entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000; 3442 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
3443 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
3444 if (tlb_miss > 0)
3445 entries += tlb_miss;
3442 entries = DIV_ROUND_UP(entries, display->cacheline_size); 3446 entries = DIV_ROUND_UP(entries, display->cacheline_size);
3443 *plane_wm = entries + display->guard_size; 3447 *plane_wm = entries + display->guard_size;
3444 if (*plane_wm > (int)display->max_wm) 3448 if (*plane_wm > (int)display->max_wm)
@@ -3446,8 +3450,11 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
3446 3450
3447 /* Use the large buffer method to calculate cursor watermark */ 3451 /* Use the large buffer method to calculate cursor watermark */
3448 line_time_us = ((htotal * 1000) / clock); 3452 line_time_us = ((htotal * 1000) / clock);
3449 line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000; 3453 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
3450 entries = line_count * 64 * pixel_size; 3454 entries = line_count * 64 * pixel_size;
3455 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
3456 if (tlb_miss > 0)
3457 entries += tlb_miss;
3451 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 3458 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3452 *cursor_wm = entries + cursor->guard_size; 3459 *cursor_wm = entries + cursor->guard_size;
3453 if (*cursor_wm > (int)cursor->max_wm) 3460 if (*cursor_wm > (int)cursor->max_wm)
@@ -3456,113 +3463,17 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
3456 return true; 3463 return true;
3457} 3464}
3458 3465
3459static void ironlake_update_wm(struct drm_device *dev,
3460 int planea_clock, int planeb_clock,
3461 int sr_hdisplay, int sr_htotal,
3462 int pixel_size)
3463{
3464 struct drm_i915_private *dev_priv = dev->dev_private;
3465 int plane_wm, cursor_wm, enabled;
3466 int tmp;
3467
3468 enabled = 0;
3469 if (ironlake_compute_wm0(dev, 0,
3470 &ironlake_display_wm_info,
3471 ILK_LP0_PLANE_LATENCY,
3472 &ironlake_cursor_wm_info,
3473 ILK_LP0_CURSOR_LATENCY,
3474 &plane_wm, &cursor_wm)) {
3475 I915_WRITE(WM0_PIPEA_ILK,
3476 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3477 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
3478 " plane %d, " "cursor: %d\n",
3479 plane_wm, cursor_wm);
3480 enabled++;
3481 }
3482
3483 if (ironlake_compute_wm0(dev, 1,
3484 &ironlake_display_wm_info,
3485 ILK_LP0_PLANE_LATENCY,
3486 &ironlake_cursor_wm_info,
3487 ILK_LP0_CURSOR_LATENCY,
3488 &plane_wm, &cursor_wm)) {
3489 I915_WRITE(WM0_PIPEB_ILK,
3490 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3491 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
3492 " plane %d, cursor: %d\n",
3493 plane_wm, cursor_wm);
3494 enabled++;
3495 }
3496
3497 /*
3498 * Calculate and update the self-refresh watermark only when one
3499 * display plane is used.
3500 */
3501 tmp = 0;
3502 if (enabled == 1) {
3503 unsigned long line_time_us;
3504 int small, large, plane_fbc;
3505 int sr_clock, entries;
3506 int line_count, line_size;
3507 /* Read the self-refresh latency. The unit is 0.5us */
3508 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
3509
3510 sr_clock = planea_clock ? planea_clock : planeb_clock;
3511 line_time_us = (sr_htotal * 1000) / sr_clock;
3512
3513 /* Use ns/us then divide to preserve precision */
3514 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
3515 / 1000;
3516 line_size = sr_hdisplay * pixel_size;
3517
3518 /* Use the minimum of the small and large buffer method for primary */
3519 small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
3520 large = line_count * line_size;
3521
3522 entries = DIV_ROUND_UP(min(small, large),
3523 ironlake_display_srwm_info.cacheline_size);
3524
3525 plane_fbc = entries * 64;
3526 plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
3527
3528 plane_wm = entries + ironlake_display_srwm_info.guard_size;
3529 if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
3530 plane_wm = ironlake_display_srwm_info.max_wm;
3531
3532 /* calculate the self-refresh watermark for display cursor */
3533 entries = line_count * pixel_size * 64;
3534 entries = DIV_ROUND_UP(entries,
3535 ironlake_cursor_srwm_info.cacheline_size);
3536
3537 cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
3538 if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
3539 cursor_wm = ironlake_cursor_srwm_info.max_wm;
3540
3541 /* configure watermark and enable self-refresh */
3542 tmp = (WM1_LP_SR_EN |
3543 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3544 (plane_fbc << WM1_LP_FBC_SHIFT) |
3545 (plane_wm << WM1_LP_SR_SHIFT) |
3546 cursor_wm);
3547 DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
3548 " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
3549 }
3550 I915_WRITE(WM1_LP_ILK, tmp);
3551 /* XXX setup WM2 and WM3 */
3552}
3553
3554/* 3466/*
3555 * Check the wm result. 3467 * Check the wm result.
3556 * 3468 *
3557 * If any calculated watermark values is larger than the maximum value that 3469 * If any calculated watermark values is larger than the maximum value that
3558 * can be programmed into the associated watermark register, that watermark 3470 * can be programmed into the associated watermark register, that watermark
3559 * must be disabled. 3471 * must be disabled.
3560 *
3561 * Also return true if all of those watermark values is 0, which is set by
3562 * sandybridge_compute_srwm, to indicate the latency is ZERO.
3563 */ 3472 */
3564static bool sandybridge_check_srwm(struct drm_device *dev, int level, 3473static bool ironlake_check_srwm(struct drm_device *dev, int level,
3565 int fbc_wm, int display_wm, int cursor_wm) 3474 int fbc_wm, int display_wm, int cursor_wm,
3475 const struct intel_watermark_params *display,
3476 const struct intel_watermark_params *cursor)
3566{ 3477{
3567 struct drm_i915_private *dev_priv = dev->dev_private; 3478 struct drm_i915_private *dev_priv = dev->dev_private;
3568 3479
@@ -3571,7 +3482,7 @@ static bool sandybridge_check_srwm(struct drm_device *dev, int level,
3571 3482
3572 if (fbc_wm > SNB_FBC_MAX_SRWM) { 3483 if (fbc_wm > SNB_FBC_MAX_SRWM) {
3573 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", 3484 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
3574 fbc_wm, SNB_FBC_MAX_SRWM, level); 3485 fbc_wm, SNB_FBC_MAX_SRWM, level);
3575 3486
3576 /* fbc has it's own way to disable FBC WM */ 3487 /* fbc has it's own way to disable FBC WM */
3577 I915_WRITE(DISP_ARB_CTL, 3488 I915_WRITE(DISP_ARB_CTL,
@@ -3579,15 +3490,15 @@ static bool sandybridge_check_srwm(struct drm_device *dev, int level,
3579 return false; 3490 return false;
3580 } 3491 }
3581 3492
3582 if (display_wm > SNB_DISPLAY_MAX_SRWM) { 3493 if (display_wm > display->max_wm) {
3583 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", 3494 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
3584 display_wm, SNB_DISPLAY_MAX_SRWM, level); 3495 display_wm, SNB_DISPLAY_MAX_SRWM, level);
3585 return false; 3496 return false;
3586 } 3497 }
3587 3498
3588 if (cursor_wm > SNB_CURSOR_MAX_SRWM) { 3499 if (cursor_wm > cursor->max_wm) {
3589 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", 3500 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
3590 cursor_wm, SNB_CURSOR_MAX_SRWM, level); 3501 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
3591 return false; 3502 return false;
3592 } 3503 }
3593 3504
@@ -3602,16 +3513,18 @@ static bool sandybridge_check_srwm(struct drm_device *dev, int level,
3602/* 3513/*
3603 * Compute watermark values of WM[1-3], 3514 * Compute watermark values of WM[1-3],
3604 */ 3515 */
3605static bool sandybridge_compute_srwm(struct drm_device *dev, int level, 3516static bool ironlake_compute_srwm(struct drm_device *dev, int level,
3606 int hdisplay, int htotal, int pixel_size, 3517 int hdisplay, int htotal,
3607 int clock, int latency_ns, int *fbc_wm, 3518 int pixel_size, int clock, int latency_ns,
3608 int *display_wm, int *cursor_wm) 3519 const struct intel_watermark_params *display,
3520 const struct intel_watermark_params *cursor,
3521 int *fbc_wm, int *display_wm, int *cursor_wm)
3609{ 3522{
3610 3523
3611 unsigned long line_time_us; 3524 unsigned long line_time_us;
3525 int line_count, line_size;
3612 int small, large; 3526 int small, large;
3613 int entries; 3527 int entries;
3614 int line_count, line_size;
3615 3528
3616 if (!latency_ns) { 3529 if (!latency_ns) {
3617 *fbc_wm = *display_wm = *cursor_wm = 0; 3530 *fbc_wm = *display_wm = *cursor_wm = 0;
@@ -3626,24 +3539,110 @@ static bool sandybridge_compute_srwm(struct drm_device *dev, int level,
3626 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 3539 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3627 large = line_count * line_size; 3540 large = line_count * line_size;
3628 3541
3629 entries = DIV_ROUND_UP(min(small, large), 3542 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
3630 sandybridge_display_srwm_info.cacheline_size); 3543 *display_wm = entries + display->guard_size;
3631 *display_wm = entries + sandybridge_display_srwm_info.guard_size;
3632 3544
3633 /* 3545 /*
3634 * Spec said: 3546 * Spec says:
3635 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 3547 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
3636 */ 3548 */
3637 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; 3549 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
3638 3550
3639 /* calculate the self-refresh watermark for display cursor */ 3551 /* calculate the self-refresh watermark for display cursor */
3640 entries = line_count * pixel_size * 64; 3552 entries = line_count * pixel_size * 64;
3641 entries = DIV_ROUND_UP(entries, 3553 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3642 sandybridge_cursor_srwm_info.cacheline_size); 3554 *cursor_wm = entries + cursor->guard_size;
3643 *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size;
3644 3555
3645 return sandybridge_check_srwm(dev, level, 3556 return ironlake_check_srwm(dev, level,
3646 *fbc_wm, *display_wm, *cursor_wm); 3557 *fbc_wm, *display_wm, *cursor_wm,
3558 display, cursor);
3559}
3560
3561static void ironlake_update_wm(struct drm_device *dev,
3562 int planea_clock, int planeb_clock,
3563 int hdisplay, int htotal,
3564 int pixel_size)
3565{
3566 struct drm_i915_private *dev_priv = dev->dev_private;
3567 int fbc_wm, plane_wm, cursor_wm, enabled;
3568 int clock;
3569
3570 enabled = 0;
3571 if (ironlake_compute_wm0(dev, 0,
3572 &ironlake_display_wm_info,
3573 ILK_LP0_PLANE_LATENCY,
3574 &ironlake_cursor_wm_info,
3575 ILK_LP0_CURSOR_LATENCY,
3576 &plane_wm, &cursor_wm)) {
3577 I915_WRITE(WM0_PIPEA_ILK,
3578 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3579 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
3580 " plane %d, " "cursor: %d\n",
3581 plane_wm, cursor_wm);
3582 enabled++;
3583 }
3584
3585 if (ironlake_compute_wm0(dev, 1,
3586 &ironlake_display_wm_info,
3587 ILK_LP0_PLANE_LATENCY,
3588 &ironlake_cursor_wm_info,
3589 ILK_LP0_CURSOR_LATENCY,
3590 &plane_wm, &cursor_wm)) {
3591 I915_WRITE(WM0_PIPEB_ILK,
3592 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3593 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
3594 " plane %d, cursor: %d\n",
3595 plane_wm, cursor_wm);
3596 enabled++;
3597 }
3598
3599 /*
3600 * Calculate and update the self-refresh watermark only when one
3601 * display plane is used.
3602 */
3603 I915_WRITE(WM3_LP_ILK, 0);
3604 I915_WRITE(WM2_LP_ILK, 0);
3605 I915_WRITE(WM1_LP_ILK, 0);
3606
3607 if (enabled != 1)
3608 return;
3609
3610 clock = planea_clock ? planea_clock : planeb_clock;
3611
3612 /* WM1 */
3613 if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
3614 clock, ILK_READ_WM1_LATENCY() * 500,
3615 &ironlake_display_srwm_info,
3616 &ironlake_cursor_srwm_info,
3617 &fbc_wm, &plane_wm, &cursor_wm))
3618 return;
3619
3620 I915_WRITE(WM1_LP_ILK,
3621 WM1_LP_SR_EN |
3622 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
3623 (fbc_wm << WM1_LP_FBC_SHIFT) |
3624 (plane_wm << WM1_LP_SR_SHIFT) |
3625 cursor_wm);
3626
3627 /* WM2 */
3628 if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
3629 clock, ILK_READ_WM2_LATENCY() * 500,
3630 &ironlake_display_srwm_info,
3631 &ironlake_cursor_srwm_info,
3632 &fbc_wm, &plane_wm, &cursor_wm))
3633 return;
3634
3635 I915_WRITE(WM2_LP_ILK,
3636 WM2_LP_EN |
3637 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
3638 (fbc_wm << WM1_LP_FBC_SHIFT) |
3639 (plane_wm << WM1_LP_SR_SHIFT) |
3640 cursor_wm);
3641
3642 /*
3643 * WM3 is unsupported on ILK, probably because we don't have latency
3644 * data for that power state
3645 */
3647} 3646}
3648 3647
3649static void sandybridge_update_wm(struct drm_device *dev, 3648static void sandybridge_update_wm(struct drm_device *dev,
@@ -3652,7 +3651,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
3652 int pixel_size) 3651 int pixel_size)
3653{ 3652{
3654 struct drm_i915_private *dev_priv = dev->dev_private; 3653 struct drm_i915_private *dev_priv = dev->dev_private;
3655 int latency = SNB_READ_WM0_LATENCY(); 3654 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
3656 int fbc_wm, plane_wm, cursor_wm, enabled; 3655 int fbc_wm, plane_wm, cursor_wm, enabled;
3657 int clock; 3656 int clock;
3658 3657
@@ -3701,9 +3700,11 @@ static void sandybridge_update_wm(struct drm_device *dev,
3701 clock = planea_clock ? planea_clock : planeb_clock; 3700 clock = planea_clock ? planea_clock : planeb_clock;
3702 3701
3703 /* WM1 */ 3702 /* WM1 */
3704 if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, 3703 if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
3705 clock, SNB_READ_WM1_LATENCY() * 500, 3704 clock, SNB_READ_WM1_LATENCY() * 500,
3706 &fbc_wm, &plane_wm, &cursor_wm)) 3705 &sandybridge_display_srwm_info,
3706 &sandybridge_cursor_srwm_info,
3707 &fbc_wm, &plane_wm, &cursor_wm))
3707 return; 3708 return;
3708 3709
3709 I915_WRITE(WM1_LP_ILK, 3710 I915_WRITE(WM1_LP_ILK,
@@ -3714,10 +3715,12 @@ static void sandybridge_update_wm(struct drm_device *dev,
3714 cursor_wm); 3715 cursor_wm);
3715 3716
3716 /* WM2 */ 3717 /* WM2 */
3717 if (!sandybridge_compute_srwm(dev, 2, 3718 if (!ironlake_compute_srwm(dev, 2,
3718 hdisplay, htotal, pixel_size, 3719 hdisplay, htotal, pixel_size,
3719 clock, SNB_READ_WM2_LATENCY() * 500, 3720 clock, SNB_READ_WM2_LATENCY() * 500,
3720 &fbc_wm, &plane_wm, &cursor_wm)) 3721 &sandybridge_display_srwm_info,
3722 &sandybridge_cursor_srwm_info,
3723 &fbc_wm, &plane_wm, &cursor_wm))
3721 return; 3724 return;
3722 3725
3723 I915_WRITE(WM2_LP_ILK, 3726 I915_WRITE(WM2_LP_ILK,
@@ -3728,10 +3731,12 @@ static void sandybridge_update_wm(struct drm_device *dev,
3728 cursor_wm); 3731 cursor_wm);
3729 3732
3730 /* WM3 */ 3733 /* WM3 */
3731 if (!sandybridge_compute_srwm(dev, 3, 3734 if (!ironlake_compute_srwm(dev, 3,
3732 hdisplay, htotal, pixel_size, 3735 hdisplay, htotal, pixel_size,
3733 clock, SNB_READ_WM3_LATENCY() * 500, 3736 clock, SNB_READ_WM3_LATENCY() * 500,
3734 &fbc_wm, &plane_wm, &cursor_wm)) 3737 &sandybridge_display_srwm_info,
3738 &sandybridge_cursor_srwm_info,
3739 &fbc_wm, &plane_wm, &cursor_wm))
3735 return; 3740 return;
3736 3741
3737 I915_WRITE(WM3_LP_ILK, 3742 I915_WRITE(WM3_LP_ILK,
@@ -3951,7 +3956,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3951 int lane = 0, link_bw, bpp; 3956 int lane = 0, link_bw, bpp;
3952 /* CPU eDP doesn't require FDI link, so just set DP M/N 3957 /* CPU eDP doesn't require FDI link, so just set DP M/N
3953 according to current link config */ 3958 according to current link config */
3954 if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { 3959 if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
3955 target_clock = mode->clock; 3960 target_clock = mode->clock;
3956 intel_edp_link_config(has_edp_encoder, 3961 intel_edp_link_config(has_edp_encoder,
3957 &lane, &link_bw); 3962 &lane, &link_bw);
@@ -5038,8 +5043,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
5038 drm_i915_private_t *dev_priv = dev->dev_private; 5043 drm_i915_private_t *dev_priv = dev->dev_private;
5039 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5044 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5040 int pipe = intel_crtc->pipe; 5045 int pipe = intel_crtc->pipe;
5041 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 5046 int dpll_reg = DPLL(pipe);
5042 int dpll = I915_READ(dpll_reg); 5047 int dpll;
5043 5048
5044 if (HAS_PCH_SPLIT(dev)) 5049 if (HAS_PCH_SPLIT(dev))
5045 return; 5050 return;
@@ -5047,17 +5052,19 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
5047 if (!dev_priv->lvds_downclock_avail) 5052 if (!dev_priv->lvds_downclock_avail)
5048 return; 5053 return;
5049 5054
5055 dpll = I915_READ(dpll_reg);
5050 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 5056 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
5051 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 5057 DRM_DEBUG_DRIVER("upclocking LVDS\n");
5052 5058
5053 /* Unlock panel regs */ 5059 /* Unlock panel regs */
5054 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | 5060 I915_WRITE(PP_CONTROL,
5055 PANEL_UNLOCK_REGS); 5061 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
5056 5062
5057 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 5063 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5058 I915_WRITE(dpll_reg, dpll); 5064 I915_WRITE(dpll_reg, dpll);
5059 dpll = I915_READ(dpll_reg); 5065 POSTING_READ(dpll_reg);
5060 intel_wait_for_vblank(dev, pipe); 5066 intel_wait_for_vblank(dev, pipe);
5067
5061 dpll = I915_READ(dpll_reg); 5068 dpll = I915_READ(dpll_reg);
5062 if (dpll & DISPLAY_RATE_SELECT_FPA1) 5069 if (dpll & DISPLAY_RATE_SELECT_FPA1)
5063 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 5070 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -5802,6 +5809,8 @@ static void intel_setup_outputs(struct drm_device *dev)
5802 encoder->base.possible_clones = 5809 encoder->base.possible_clones =
5803 intel_encoder_clones(dev, encoder->clone_mask); 5810 intel_encoder_clones(dev, encoder->clone_mask);
5804 } 5811 }
5812
5813 intel_panel_setup_backlight(dev);
5805} 5814}
5806 5815
5807static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 5816static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -6145,6 +6154,10 @@ void intel_init_emon(struct drm_device *dev)
6145 6154
6146void gen6_enable_rps(struct drm_i915_private *dev_priv) 6155void gen6_enable_rps(struct drm_i915_private *dev_priv)
6147{ 6156{
6157 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6158 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
6159 u32 pcu_mbox;
6160 int cur_freq, min_freq, max_freq;
6148 int i; 6161 int i;
6149 6162
6150 /* Here begins a magic sequence of register writes to enable 6163 /* Here begins a magic sequence of register writes to enable
@@ -6216,6 +6229,29 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6216 500)) 6229 500))
6217 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 6230 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6218 6231
6232 min_freq = (rp_state_cap & 0xff0000) >> 16;
6233 max_freq = rp_state_cap & 0xff;
6234 cur_freq = (gt_perf_status & 0xff00) >> 8;
6235
6236 /* Check for overclock support */
6237 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6238 500))
6239 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6240 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
6241 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
6242 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6243 500))
6244 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6245 if (pcu_mbox & (1<<31)) { /* OC supported */
6246 max_freq = pcu_mbox & 0xff;
6247 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100);
6248 }
6249
6250 /* In units of 100MHz */
6251 dev_priv->max_delay = max_freq;
6252 dev_priv->min_delay = min_freq;
6253 dev_priv->cur_delay = cur_freq;
6254
6219 /* requires MSI enabled */ 6255 /* requires MSI enabled */
6220 I915_WRITE(GEN6_PMIER, 6256 I915_WRITE(GEN6_PMIER,
6221 GEN6_PM_MBOX_EVENT | 6257 GEN6_PM_MBOX_EVENT |
@@ -6386,42 +6422,6 @@ void intel_enable_clock_gating(struct drm_device *dev)
6386 } else if (IS_I830(dev)) { 6422 } else if (IS_I830(dev)) {
6387 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 6423 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6388 } 6424 }
6389
6390 /*
6391 * GPU can automatically power down the render unit if given a page
6392 * to save state.
6393 */
6394 if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */
6395 if (dev_priv->renderctx == NULL)
6396 dev_priv->renderctx = intel_alloc_context_page(dev);
6397 if (dev_priv->renderctx) {
6398 struct drm_i915_gem_object *obj = dev_priv->renderctx;
6399 if (BEGIN_LP_RING(4) == 0) {
6400 OUT_RING(MI_SET_CONTEXT);
6401 OUT_RING(obj->gtt_offset |
6402 MI_MM_SPACE_GTT |
6403 MI_SAVE_EXT_STATE_EN |
6404 MI_RESTORE_EXT_STATE_EN |
6405 MI_RESTORE_INHIBIT);
6406 OUT_RING(MI_NOOP);
6407 OUT_RING(MI_FLUSH);
6408 ADVANCE_LP_RING();
6409 }
6410 } else
6411 DRM_DEBUG_KMS("Failed to allocate render context."
6412 "Disable RC6\n");
6413 }
6414
6415 if (IS_GEN4(dev) && IS_MOBILE(dev)) {
6416 if (dev_priv->pwrctx == NULL)
6417 dev_priv->pwrctx = intel_alloc_context_page(dev);
6418 if (dev_priv->pwrctx) {
6419 struct drm_i915_gem_object *obj = dev_priv->pwrctx;
6420 I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
6421 I915_WRITE(MCHBAR_RENDER_STANDBY,
6422 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
6423 }
6424 }
6425} 6425}
6426 6426
6427void intel_disable_clock_gating(struct drm_device *dev) 6427void intel_disable_clock_gating(struct drm_device *dev)
@@ -6451,6 +6451,57 @@ void intel_disable_clock_gating(struct drm_device *dev)
6451 } 6451 }
6452} 6452}
6453 6453
6454static void ironlake_disable_rc6(struct drm_device *dev)
6455{
6456 struct drm_i915_private *dev_priv = dev->dev_private;
6457
6458 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
6459 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
6460 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
6461 10);
6462 POSTING_READ(CCID);
6463 I915_WRITE(PWRCTXA, 0);
6464 POSTING_READ(PWRCTXA);
6465 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6466 POSTING_READ(RSTDBYCTL);
6467 i915_gem_object_unpin(dev_priv->renderctx);
6468 drm_gem_object_unreference(&dev_priv->renderctx->base);
6469 dev_priv->renderctx = NULL;
6470 i915_gem_object_unpin(dev_priv->pwrctx);
6471 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6472 dev_priv->pwrctx = NULL;
6473}
6474
6475void ironlake_enable_rc6(struct drm_device *dev)
6476{
6477 struct drm_i915_private *dev_priv = dev->dev_private;
6478 int ret;
6479
6480 /*
6481 * GPU can automatically power down the render unit if given a page
6482 * to save state.
6483 */
6484 ret = BEGIN_LP_RING(6);
6485 if (ret) {
6486 ironlake_disable_rc6(dev);
6487 return;
6488 }
6489 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
6490 OUT_RING(MI_SET_CONTEXT);
6491 OUT_RING(dev_priv->renderctx->gtt_offset |
6492 MI_MM_SPACE_GTT |
6493 MI_SAVE_EXT_STATE_EN |
6494 MI_RESTORE_EXT_STATE_EN |
6495 MI_RESTORE_INHIBIT);
6496 OUT_RING(MI_SUSPEND_FLUSH);
6497 OUT_RING(MI_NOOP);
6498 OUT_RING(MI_FLUSH);
6499 ADVANCE_LP_RING();
6500
6501 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
6502 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6503}
6504
6454/* Set up chip specific display functions */ 6505/* Set up chip specific display functions */
6455static void intel_init_display(struct drm_device *dev) 6506static void intel_init_display(struct drm_device *dev)
6456{ 6507{
@@ -6665,12 +6716,7 @@ void intel_modeset_init(struct drm_device *dev)
6665 dev->mode_config.max_width = 8192; 6716 dev->mode_config.max_width = 8192;
6666 dev->mode_config.max_height = 8192; 6717 dev->mode_config.max_height = 8192;
6667 } 6718 }
6668 6719 dev->mode_config.fb_base = dev->agp->base;
6669 /* set memory base */
6670 if (IS_GEN2(dev))
6671 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
6672 else
6673 dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
6674 6720
6675 if (IS_MOBILE(dev) || !IS_GEN2(dev)) 6721 if (IS_MOBILE(dev) || !IS_GEN2(dev))
6676 dev_priv->num_pipe = 2; 6722 dev_priv->num_pipe = 2;
@@ -6698,6 +6744,21 @@ void intel_modeset_init(struct drm_device *dev)
6698 if (IS_GEN6(dev)) 6744 if (IS_GEN6(dev))
6699 gen6_enable_rps(dev_priv); 6745 gen6_enable_rps(dev_priv);
6700 6746
6747 if (IS_IRONLAKE_M(dev)) {
6748 dev_priv->renderctx = intel_alloc_context_page(dev);
6749 if (!dev_priv->renderctx)
6750 goto skip_rc6;
6751 dev_priv->pwrctx = intel_alloc_context_page(dev);
6752 if (!dev_priv->pwrctx) {
6753 i915_gem_object_unpin(dev_priv->renderctx);
6754 drm_gem_object_unreference(&dev_priv->renderctx->base);
6755 dev_priv->renderctx = NULL;
6756 goto skip_rc6;
6757 }
6758 ironlake_enable_rc6(dev);
6759 }
6760
6761skip_rc6:
6701 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6762 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6702 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6763 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6703 (unsigned long)dev); 6764 (unsigned long)dev);
@@ -6734,7 +6795,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
6734 if (IS_GEN6(dev)) 6795 if (IS_GEN6(dev))
6735 gen6_disable_rps(dev); 6796 gen6_disable_rps(dev);
6736 6797
6737 intel_disable_clock_gating(dev); 6798 if (IS_IRONLAKE_M(dev))
6799 ironlake_disable_rc6(dev);
6738 6800
6739 mutex_unlock(&dev->struct_mutex); 6801 mutex_unlock(&dev->struct_mutex);
6740 6802
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1dc60408d5b8..1f4242b682c8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1153,18 +1153,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
1153static uint32_t 1153static uint32_t
1154intel_gen6_edp_signal_levels(uint8_t train_set) 1154intel_gen6_edp_signal_levels(uint8_t train_set)
1155{ 1155{
1156 switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { 1156 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1157 DP_TRAIN_PRE_EMPHASIS_MASK);
1158 switch (signal_levels) {
1157 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1159 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1158 return EDP_LINK_TRAIN_400MV_0DB_SNB_B; 1160 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1161 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1162 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1163 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1159 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1164 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1160 return EDP_LINK_TRAIN_400MV_6DB_SNB_B; 1165 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1166 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1161 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1167 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1162 return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; 1168 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1169 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1163 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1170 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1164 return EDP_LINK_TRAIN_800MV_0DB_SNB_B; 1171 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1172 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1165 default: 1173 default:
1166 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); 1174 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1167 return EDP_LINK_TRAIN_400MV_0DB_SNB_B; 1175 "0x%x\n", signal_levels);
1176 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1168 } 1177 }
1169} 1178}
1170 1179
@@ -1334,17 +1343,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1334 struct drm_device *dev = intel_dp->base.base.dev; 1343 struct drm_device *dev = intel_dp->base.base.dev;
1335 struct drm_i915_private *dev_priv = dev->dev_private; 1344 struct drm_i915_private *dev_priv = dev->dev_private;
1336 bool channel_eq = false; 1345 bool channel_eq = false;
1337 int tries; 1346 int tries, cr_tries;
1338 u32 reg; 1347 u32 reg;
1339 uint32_t DP = intel_dp->DP; 1348 uint32_t DP = intel_dp->DP;
1340 1349
1341 /* channel equalization */ 1350 /* channel equalization */
1342 tries = 0; 1351 tries = 0;
1352 cr_tries = 0;
1343 channel_eq = false; 1353 channel_eq = false;
1344 for (;;) { 1354 for (;;) {
1345 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1355 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1346 uint32_t signal_levels; 1356 uint32_t signal_levels;
1347 1357
1358 if (cr_tries > 5) {
1359 DRM_ERROR("failed to train DP, aborting\n");
1360 intel_dp_link_down(intel_dp);
1361 break;
1362 }
1363
1348 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1364 if (IS_GEN6(dev) && is_edp(intel_dp)) {
1349 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1365 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1350 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1366 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
@@ -1367,14 +1383,26 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1367 if (!intel_dp_get_link_status(intel_dp)) 1383 if (!intel_dp_get_link_status(intel_dp))
1368 break; 1384 break;
1369 1385
1386 /* Make sure clock is still ok */
1387 if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1388 intel_dp_start_link_train(intel_dp);
1389 cr_tries++;
1390 continue;
1391 }
1392
1370 if (intel_channel_eq_ok(intel_dp)) { 1393 if (intel_channel_eq_ok(intel_dp)) {
1371 channel_eq = true; 1394 channel_eq = true;
1372 break; 1395 break;
1373 } 1396 }
1374 1397
1375 /* Try 5 times */ 1398 /* Try 5 times, then try clock recovery if that fails */
1376 if (tries > 5) 1399 if (tries > 5) {
1377 break; 1400 intel_dp_link_down(intel_dp);
1401 intel_dp_start_link_train(intel_dp);
1402 tries = 0;
1403 cr_tries++;
1404 continue;
1405 }
1378 1406
1379 /* Compute new intel_dp->train_set as requested by target */ 1407 /* Compute new intel_dp->train_set as requested by target */
1380 intel_get_adjust_train(intel_dp); 1408 intel_get_adjust_train(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d782ad9fd6db..74db2557d644 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -257,6 +257,9 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
257extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 257extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
258extern u32 intel_panel_get_backlight(struct drm_device *dev); 258extern u32 intel_panel_get_backlight(struct drm_device *dev);
259extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 259extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
260extern void intel_panel_setup_backlight(struct drm_device *dev);
261extern void intel_panel_enable_backlight(struct drm_device *dev);
262extern void intel_panel_disable_backlight(struct drm_device *dev);
260 263
261extern void intel_crtc_load_lut(struct drm_crtc *crtc); 264extern void intel_crtc_load_lut(struct drm_crtc *crtc);
262extern void intel_encoder_prepare (struct drm_encoder *encoder); 265extern void intel_encoder_prepare (struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 701e830d0012..ee145a257287 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -62,6 +62,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
62 struct drm_fb_helper_surface_size *sizes) 62 struct drm_fb_helper_surface_size *sizes)
63{ 63{
64 struct drm_device *dev = ifbdev->helper.dev; 64 struct drm_device *dev = ifbdev->helper.dev;
65 struct drm_i915_private *dev_priv = dev->dev_private;
65 struct fb_info *info; 66 struct fb_info *info;
66 struct drm_framebuffer *fb; 67 struct drm_framebuffer *fb;
67 struct drm_mode_fb_cmd mode_cmd; 68 struct drm_mode_fb_cmd mode_cmd;
@@ -77,7 +78,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
77 mode_cmd.height = sizes->surface_height; 78 mode_cmd.height = sizes->surface_height;
78 79
79 mode_cmd.bpp = sizes->surface_bpp; 80 mode_cmd.bpp = sizes->surface_bpp;
80 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); 81 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
81 mode_cmd.depth = sizes->surface_depth; 82 mode_cmd.depth = sizes->surface_depth;
82 83
83 size = mode_cmd.pitch * mode_cmd.height; 84 size = mode_cmd.pitch * mode_cmd.height;
@@ -120,6 +121,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
120 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 121 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
121 info->fbops = &intelfb_ops; 122 info->fbops = &intelfb_ops;
122 123
124 ret = fb_alloc_cmap(&info->cmap, 256, 0);
125 if (ret) {
126 ret = -ENOMEM;
127 goto out_unpin;
128 }
123 /* setup aperture base/size for vesafb takeover */ 129 /* setup aperture base/size for vesafb takeover */
124 info->apertures = alloc_apertures(1); 130 info->apertures = alloc_apertures(1);
125 if (!info->apertures) { 131 if (!info->apertures) {
@@ -127,10 +133,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
127 goto out_unpin; 133 goto out_unpin;
128 } 134 }
129 info->apertures->ranges[0].base = dev->mode_config.fb_base; 135 info->apertures->ranges[0].base = dev->mode_config.fb_base;
130 if (!IS_GEN2(dev)) 136 info->apertures->ranges[0].size =
131 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); 137 dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
132 else
133 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
134 138
135 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; 139 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
136 info->fix.smem_len = size; 140 info->fix.smem_len = size;
@@ -140,12 +144,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
140 ret = -ENOSPC; 144 ret = -ENOSPC;
141 goto out_unpin; 145 goto out_unpin;
142 } 146 }
143
144 ret = fb_alloc_cmap(&info->cmap, 256, 0);
145 if (ret) {
146 ret = -ENOMEM;
147 goto out_unpin;
148 }
149 info->screen_size = size; 147 info->screen_size = size;
150 148
151// memset(info->screen_base, 0, size); 149// memset(info->screen_base, 0, size);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa2307080be2..8f4f6bd33ee9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
107 POSTING_READ(lvds_reg); 107 POSTING_READ(lvds_reg);
108 108
109 intel_panel_set_backlight(dev, dev_priv->backlight_level); 109 intel_panel_enable_backlight(dev);
110} 110}
111 111
112static void intel_lvds_disable(struct intel_lvds *intel_lvds) 112static void intel_lvds_disable(struct intel_lvds *intel_lvds)
@@ -123,8 +123,7 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
123 lvds_reg = LVDS; 123 lvds_reg = LVDS;
124 } 124 }
125 125
126 dev_priv->backlight_level = intel_panel_get_backlight(dev); 126 intel_panel_disable_backlight(dev);
127 intel_panel_set_backlight(dev, 0);
128 127
129 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 128 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
130 129
@@ -375,6 +374,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
375 } 374 }
376 375
377out: 376out:
377 if ((pfit_control & PFIT_ENABLE) == 0) {
378 pfit_control = 0;
379 pfit_pgm_ratios = 0;
380 }
378 if (pfit_control != intel_lvds->pfit_control || 381 if (pfit_control != intel_lvds->pfit_control ||
379 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { 382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
380 intel_lvds->pfit_control = pfit_control; 383 intel_lvds->pfit_control = pfit_control;
@@ -398,8 +401,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
398 struct drm_i915_private *dev_priv = dev->dev_private; 401 struct drm_i915_private *dev_priv = dev->dev_private;
399 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 402 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
400 403
401 dev_priv->backlight_level = intel_panel_get_backlight(dev);
402
403 /* We try to do the minimum that is necessary in order to unlock 404 /* We try to do the minimum that is necessary in order to unlock
404 * the registers for mode setting. 405 * the registers for mode setting.
405 * 406 *
@@ -430,9 +431,6 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
430 struct drm_i915_private *dev_priv = dev->dev_private; 431 struct drm_i915_private *dev_priv = dev->dev_private;
431 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 432 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
432 433
433 if (dev_priv->backlight_level == 0)
434 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
435
436 /* Undo any unlocking done in prepare to prevent accidental 434 /* Undo any unlocking done in prepare to prevent accidental
437 * adjustment of the registers. 435 * adjustment of the registers.
438 */ 436 */
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 7350ec2515c6..e00d200df3db 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -250,3 +250,34 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
250 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; 250 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
251 I915_WRITE(BLC_PWM_CTL, tmp | level); 251 I915_WRITE(BLC_PWM_CTL, tmp | level);
252} 252}
253
254void intel_panel_disable_backlight(struct drm_device *dev)
255{
256 struct drm_i915_private *dev_priv = dev->dev_private;
257
258 if (dev_priv->backlight_enabled) {
259 dev_priv->backlight_level = intel_panel_get_backlight(dev);
260 dev_priv->backlight_enabled = false;
261 }
262
263 intel_panel_set_backlight(dev, 0);
264}
265
266void intel_panel_enable_backlight(struct drm_device *dev)
267{
268 struct drm_i915_private *dev_priv = dev->dev_private;
269
270 if (dev_priv->backlight_level == 0)
271 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
272
273 intel_panel_set_backlight(dev, dev_priv->backlight_level);
274 dev_priv->backlight_enabled = true;
275}
276
277void intel_panel_setup_backlight(struct drm_device *dev)
278{
279 struct drm_i915_private *dev_priv = dev->dev_private;
280
281 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
282 dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
283}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 56bc95c056dd..03e337072517 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,7 +48,7 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
48 return seqno; 48 return seqno;
49} 49}
50 50
51static void 51static int
52render_ring_flush(struct intel_ring_buffer *ring, 52render_ring_flush(struct intel_ring_buffer *ring,
53 u32 invalidate_domains, 53 u32 invalidate_domains,
54 u32 flush_domains) 54 u32 flush_domains)
@@ -56,6 +56,7 @@ render_ring_flush(struct intel_ring_buffer *ring,
56 struct drm_device *dev = ring->dev; 56 struct drm_device *dev = ring->dev;
57 drm_i915_private_t *dev_priv = dev->dev_private; 57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd; 58 u32 cmd;
59 int ret;
59 60
60#if WATCH_EXEC 61#if WATCH_EXEC
61 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 62 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
@@ -116,12 +117,16 @@ render_ring_flush(struct intel_ring_buffer *ring,
116#if WATCH_EXEC 117#if WATCH_EXEC
117 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 118 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
118#endif 119#endif
119 if (intel_ring_begin(ring, 2) == 0) { 120 ret = intel_ring_begin(ring, 2);
120 intel_ring_emit(ring, cmd); 121 if (ret)
121 intel_ring_emit(ring, MI_NOOP); 122 return ret;
122 intel_ring_advance(ring); 123
123 } 124 intel_ring_emit(ring, cmd);
125 intel_ring_emit(ring, MI_NOOP);
126 intel_ring_advance(ring);
124 } 127 }
128
129 return 0;
125} 130}
126 131
127static void ring_write_tail(struct intel_ring_buffer *ring, 132static void ring_write_tail(struct intel_ring_buffer *ring,
@@ -480,26 +485,56 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
480 return pc->cpu_page[0]; 485 return pc->cpu_page[0];
481} 486}
482 487
488static void
489ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
490{
491 dev_priv->gt_irq_mask &= ~mask;
492 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
493 POSTING_READ(GTIMR);
494}
495
496static void
497ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
498{
499 dev_priv->gt_irq_mask |= mask;
500 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
501 POSTING_READ(GTIMR);
502}
503
504static void
505i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
506{
507 dev_priv->irq_mask &= ~mask;
508 I915_WRITE(IMR, dev_priv->irq_mask);
509 POSTING_READ(IMR);
510}
511
512static void
513i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
514{
515 dev_priv->irq_mask |= mask;
516 I915_WRITE(IMR, dev_priv->irq_mask);
517 POSTING_READ(IMR);
518}
519
483static bool 520static bool
484render_ring_get_irq(struct intel_ring_buffer *ring) 521render_ring_get_irq(struct intel_ring_buffer *ring)
485{ 522{
486 struct drm_device *dev = ring->dev; 523 struct drm_device *dev = ring->dev;
524 drm_i915_private_t *dev_priv = dev->dev_private;
487 525
488 if (!dev->irq_enabled) 526 if (!dev->irq_enabled)
489 return false; 527 return false;
490 528
491 if (atomic_inc_return(&ring->irq_refcount) == 1) { 529 spin_lock(&ring->irq_lock);
492 drm_i915_private_t *dev_priv = dev->dev_private; 530 if (ring->irq_refcount++ == 0) {
493 unsigned long irqflags;
494
495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
496 if (HAS_PCH_SPLIT(dev)) 531 if (HAS_PCH_SPLIT(dev))
497 ironlake_enable_graphics_irq(dev_priv, 532 ironlake_enable_irq(dev_priv,
498 GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 533 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
499 else 534 else
500 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 535 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
502 } 536 }
537 spin_unlock(&ring->irq_lock);
503 538
504 return true; 539 return true;
505} 540}
@@ -508,20 +543,18 @@ static void
508render_ring_put_irq(struct intel_ring_buffer *ring) 543render_ring_put_irq(struct intel_ring_buffer *ring)
509{ 544{
510 struct drm_device *dev = ring->dev; 545 struct drm_device *dev = ring->dev;
546 drm_i915_private_t *dev_priv = dev->dev_private;
511 547
512 if (atomic_dec_and_test(&ring->irq_refcount)) { 548 spin_lock(&ring->irq_lock);
513 drm_i915_private_t *dev_priv = dev->dev_private; 549 if (--ring->irq_refcount == 0) {
514 unsigned long irqflags;
515
516 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
517 if (HAS_PCH_SPLIT(dev)) 550 if (HAS_PCH_SPLIT(dev))
518 ironlake_disable_graphics_irq(dev_priv, 551 ironlake_disable_irq(dev_priv,
519 GT_USER_INTERRUPT | 552 GT_USER_INTERRUPT |
520 GT_PIPE_NOTIFY); 553 GT_PIPE_NOTIFY);
521 else 554 else
522 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 555 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
523 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
524 } 556 }
557 spin_unlock(&ring->irq_lock);
525} 558}
526 559
527void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 560void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -534,19 +567,24 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
534 POSTING_READ(mmio); 567 POSTING_READ(mmio);
535} 568}
536 569
537static void 570static int
538bsd_ring_flush(struct intel_ring_buffer *ring, 571bsd_ring_flush(struct intel_ring_buffer *ring,
539 u32 invalidate_domains, 572 u32 invalidate_domains,
540 u32 flush_domains) 573 u32 flush_domains)
541{ 574{
575 int ret;
576
542 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 577 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
543 return; 578 return 0;
544 579
545 if (intel_ring_begin(ring, 2) == 0) { 580 ret = intel_ring_begin(ring, 2);
546 intel_ring_emit(ring, MI_FLUSH); 581 if (ret)
547 intel_ring_emit(ring, MI_NOOP); 582 return ret;
548 intel_ring_advance(ring); 583
549 } 584 intel_ring_emit(ring, MI_FLUSH);
585 intel_ring_emit(ring, MI_NOOP);
586 intel_ring_advance(ring);
587 return 0;
550} 588}
551 589
552static int 590static int
@@ -577,18 +615,15 @@ static bool
577ring_get_irq(struct intel_ring_buffer *ring, u32 flag) 615ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
578{ 616{
579 struct drm_device *dev = ring->dev; 617 struct drm_device *dev = ring->dev;
618 drm_i915_private_t *dev_priv = dev->dev_private;
580 619
581 if (!dev->irq_enabled) 620 if (!dev->irq_enabled)
582 return false; 621 return false;
583 622
584 if (atomic_inc_return(&ring->irq_refcount) == 1) { 623 spin_lock(&ring->irq_lock);
585 drm_i915_private_t *dev_priv = dev->dev_private; 624 if (ring->irq_refcount++ == 0)
586 unsigned long irqflags; 625 ironlake_enable_irq(dev_priv, flag);
587 626 spin_unlock(&ring->irq_lock);
588 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
589 ironlake_enable_graphics_irq(dev_priv, flag);
590 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
591 }
592 627
593 return true; 628 return true;
594} 629}
@@ -597,15 +632,47 @@ static void
597ring_put_irq(struct intel_ring_buffer *ring, u32 flag) 632ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
598{ 633{
599 struct drm_device *dev = ring->dev; 634 struct drm_device *dev = ring->dev;
635 drm_i915_private_t *dev_priv = dev->dev_private;
600 636
601 if (atomic_dec_and_test(&ring->irq_refcount)) { 637 spin_lock(&ring->irq_lock);
602 drm_i915_private_t *dev_priv = dev->dev_private; 638 if (--ring->irq_refcount == 0)
603 unsigned long irqflags; 639 ironlake_disable_irq(dev_priv, flag);
640 spin_unlock(&ring->irq_lock);
641}
604 642
605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 643static bool
606 ironlake_disable_graphics_irq(dev_priv, flag); 644gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
607 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 645{
646 struct drm_device *dev = ring->dev;
647 drm_i915_private_t *dev_priv = dev->dev_private;
648
649 if (!dev->irq_enabled)
650 return false;
651
652 spin_lock(&ring->irq_lock);
653 if (ring->irq_refcount++ == 0) {
654 ring->irq_mask &= ~rflag;
655 I915_WRITE_IMR(ring, ring->irq_mask);
656 ironlake_enable_irq(dev_priv, gflag);
608 } 657 }
658 spin_unlock(&ring->irq_lock);
659
660 return true;
661}
662
663static void
664gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
665{
666 struct drm_device *dev = ring->dev;
667 drm_i915_private_t *dev_priv = dev->dev_private;
668
669 spin_lock(&ring->irq_lock);
670 if (--ring->irq_refcount == 0) {
671 ring->irq_mask |= rflag;
672 I915_WRITE_IMR(ring, ring->irq_mask);
673 ironlake_disable_irq(dev_priv, gflag);
674 }
675 spin_unlock(&ring->irq_lock);
609} 676}
610 677
611static bool 678static bool
@@ -748,6 +815,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
748 INIT_LIST_HEAD(&ring->request_list); 815 INIT_LIST_HEAD(&ring->request_list);
749 INIT_LIST_HEAD(&ring->gpu_write_list); 816 INIT_LIST_HEAD(&ring->gpu_write_list);
750 817
818 spin_lock_init(&ring->irq_lock);
819 ring->irq_mask = ~0;
820
751 if (I915_NEED_GFX_HWS(dev)) { 821 if (I915_NEED_GFX_HWS(dev)) {
752 ret = init_status_page(ring); 822 ret = init_status_page(ring);
753 if (ret) 823 if (ret)
@@ -785,6 +855,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
785 if (ret) 855 if (ret)
786 goto err_unmap; 856 goto err_unmap;
787 857
858 /* Workaround an erratum on the i830 which causes a hang if
859 * the TAIL pointer points to within the last 2 cachelines
860 * of the buffer.
861 */
862 ring->effective_size = ring->size;
863 if (IS_I830(ring->dev))
864 ring->effective_size -= 128;
865
788 return 0; 866 return 0;
789 867
790err_unmap: 868err_unmap:
@@ -827,8 +905,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
827static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 905static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
828{ 906{
829 unsigned int *virt; 907 unsigned int *virt;
830 int rem; 908 int rem = ring->size - ring->tail;
831 rem = ring->size - ring->tail;
832 909
833 if (ring->space < rem) { 910 if (ring->space < rem) {
834 int ret = intel_wait_ring_buffer(ring, rem); 911 int ret = intel_wait_ring_buffer(ring, rem);
@@ -895,7 +972,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
895 int n = 4*num_dwords; 972 int n = 4*num_dwords;
896 int ret; 973 int ret;
897 974
898 if (unlikely(ring->tail + n > ring->size)) { 975 if (unlikely(ring->tail + n > ring->effective_size)) {
899 ret = intel_wrap_ring_buffer(ring); 976 ret = intel_wrap_ring_buffer(ring);
900 if (unlikely(ret)) 977 if (unlikely(ret))
901 return ret; 978 return ret;
@@ -973,20 +1050,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
973 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 1050 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
974} 1051}
975 1052
976static void gen6_ring_flush(struct intel_ring_buffer *ring, 1053static int gen6_ring_flush(struct intel_ring_buffer *ring,
977 u32 invalidate_domains, 1054 u32 invalidate_domains,
978 u32 flush_domains) 1055 u32 flush_domains)
979{ 1056{
1057 int ret;
1058
980 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1059 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
981 return; 1060 return 0;
982 1061
983 if (intel_ring_begin(ring, 4) == 0) { 1062 ret = intel_ring_begin(ring, 4);
984 intel_ring_emit(ring, MI_FLUSH_DW); 1063 if (ret)
985 intel_ring_emit(ring, 0); 1064 return ret;
986 intel_ring_emit(ring, 0); 1065
987 intel_ring_emit(ring, 0); 1066 intel_ring_emit(ring, MI_FLUSH_DW);
988 intel_ring_advance(ring); 1067 intel_ring_emit(ring, 0);
989 } 1068 intel_ring_emit(ring, 0);
1069 intel_ring_emit(ring, 0);
1070 intel_ring_advance(ring);
1071 return 0;
990} 1072}
991 1073
992static int 1074static int
@@ -1008,15 +1090,35 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1008} 1090}
1009 1091
1010static bool 1092static bool
1093gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1094{
1095 return gen6_ring_get_irq(ring,
1096 GT_USER_INTERRUPT,
1097 GEN6_RENDER_USER_INTERRUPT);
1098}
1099
1100static void
1101gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1102{
1103 return gen6_ring_put_irq(ring,
1104 GT_USER_INTERRUPT,
1105 GEN6_RENDER_USER_INTERRUPT);
1106}
1107
1108static bool
1011gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) 1109gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1012{ 1110{
1013 return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 1111 return gen6_ring_get_irq(ring,
1112 GT_GEN6_BSD_USER_INTERRUPT,
1113 GEN6_BSD_USER_INTERRUPT);
1014} 1114}
1015 1115
1016static void 1116static void
1017gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) 1117gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1018{ 1118{
1019 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 1119 return gen6_ring_put_irq(ring,
1120 GT_GEN6_BSD_USER_INTERRUPT,
1121 GEN6_BSD_USER_INTERRUPT);
1020} 1122}
1021 1123
1022/* ring buffer for Video Codec for Gen6+ */ 1124/* ring buffer for Video Codec for Gen6+ */
@@ -1040,13 +1142,17 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1040static bool 1142static bool
1041blt_ring_get_irq(struct intel_ring_buffer *ring) 1143blt_ring_get_irq(struct intel_ring_buffer *ring)
1042{ 1144{
1043 return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); 1145 return gen6_ring_get_irq(ring,
1146 GT_BLT_USER_INTERRUPT,
1147 GEN6_BLITTER_USER_INTERRUPT);
1044} 1148}
1045 1149
1046static void 1150static void
1047blt_ring_put_irq(struct intel_ring_buffer *ring) 1151blt_ring_put_irq(struct intel_ring_buffer *ring)
1048{ 1152{
1049 ring_put_irq(ring, GT_BLT_USER_INTERRUPT); 1153 gen6_ring_put_irq(ring,
1154 GT_BLT_USER_INTERRUPT,
1155 GEN6_BLITTER_USER_INTERRUPT);
1050} 1156}
1051 1157
1052 1158
@@ -1115,20 +1221,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1115 return intel_ring_begin(ring, 4); 1221 return intel_ring_begin(ring, 4);
1116} 1222}
1117 1223
1118static void blt_ring_flush(struct intel_ring_buffer *ring, 1224static int blt_ring_flush(struct intel_ring_buffer *ring,
1119 u32 invalidate_domains, 1225 u32 invalidate_domains,
1120 u32 flush_domains) 1226 u32 flush_domains)
1121{ 1227{
1228 int ret;
1229
1122 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1230 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1123 return; 1231 return 0;
1124 1232
1125 if (blt_ring_begin(ring, 4) == 0) { 1233 ret = blt_ring_begin(ring, 4);
1126 intel_ring_emit(ring, MI_FLUSH_DW); 1234 if (ret)
1127 intel_ring_emit(ring, 0); 1235 return ret;
1128 intel_ring_emit(ring, 0); 1236
1129 intel_ring_emit(ring, 0); 1237 intel_ring_emit(ring, MI_FLUSH_DW);
1130 intel_ring_advance(ring); 1238 intel_ring_emit(ring, 0);
1131 } 1239 intel_ring_emit(ring, 0);
1240 intel_ring_emit(ring, 0);
1241 intel_ring_advance(ring);
1242 return 0;
1132} 1243}
1133 1244
1134static void blt_ring_cleanup(struct intel_ring_buffer *ring) 1245static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -1165,6 +1276,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1165 *ring = render_ring; 1276 *ring = render_ring;
1166 if (INTEL_INFO(dev)->gen >= 6) { 1277 if (INTEL_INFO(dev)->gen >= 6) {
1167 ring->add_request = gen6_add_request; 1278 ring->add_request = gen6_add_request;
1279 ring->irq_get = gen6_render_ring_get_irq;
1280 ring->irq_put = gen6_render_ring_put_irq;
1168 } else if (IS_GEN5(dev)) { 1281 } else if (IS_GEN5(dev)) {
1169 ring->add_request = pc_render_add_request; 1282 ring->add_request = pc_render_add_request;
1170 ring->get_seqno = pc_render_get_seqno; 1283 ring->get_seqno = pc_render_get_seqno;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8e2e357ad6ee..be9087e4c9be 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -16,21 +16,24 @@ struct intel_hw_status_page {
16 16
17#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) 17#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
18 18
19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base)) 19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
20#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) 20#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
21 21
22#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base)) 22#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
23#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) 23#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
24 24
25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base)) 25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
26#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) 26#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
27 27
28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) 28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) 29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
30 30
31#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) 31#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
32#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) 32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
33#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base)) 33
34#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
35#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
36#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
34 37
35struct intel_ring_buffer { 38struct intel_ring_buffer {
36 const char *name; 39 const char *name;
@@ -49,12 +52,15 @@ struct intel_ring_buffer {
49 u32 tail; 52 u32 tail;
50 int space; 53 int space;
51 int size; 54 int size;
55 int effective_size;
52 struct intel_hw_status_page status_page; 56 struct intel_hw_status_page status_page;
53 57
58 spinlock_t irq_lock;
59 u32 irq_refcount;
60 u32 irq_mask;
54 u32 irq_seqno; /* last seq seem at irq time */ 61 u32 irq_seqno; /* last seq seem at irq time */
55 u32 waiting_seqno; 62 u32 waiting_seqno;
56 u32 sync_seqno[I915_NUM_RINGS-1]; 63 u32 sync_seqno[I915_NUM_RINGS-1];
57 atomic_t irq_refcount;
58 bool __must_check (*irq_get)(struct intel_ring_buffer *ring); 64 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
59 void (*irq_put)(struct intel_ring_buffer *ring); 65 void (*irq_put)(struct intel_ring_buffer *ring);
60 66
@@ -62,9 +68,9 @@ struct intel_ring_buffer {
62 68
63 void (*write_tail)(struct intel_ring_buffer *ring, 69 void (*write_tail)(struct intel_ring_buffer *ring,
64 u32 value); 70 u32 value);
65 void (*flush)(struct intel_ring_buffer *ring, 71 int __must_check (*flush)(struct intel_ring_buffer *ring,
66 u32 invalidate_domains, 72 u32 invalidate_domains,
67 u32 flush_domains); 73 u32 flush_domains);
68 int (*add_request)(struct intel_ring_buffer *ring, 74 int (*add_request)(struct intel_ring_buffer *ring,
69 u32 *seqno); 75 u32 *seqno);
70 u32 (*get_seqno)(struct intel_ring_buffer *ring); 76 u32 (*get_seqno)(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9d0af36a13ec..45cd37652a37 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1024,9 +1024,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1024 if (!intel_sdvo_set_target_input(intel_sdvo)) 1024 if (!intel_sdvo_set_target_input(intel_sdvo))
1025 return; 1025 return;
1026 1026
1027 if (intel_sdvo->has_hdmi_monitor && 1027 if (intel_sdvo->has_hdmi_monitor) {
1028 !intel_sdvo_set_avi_infoframe(intel_sdvo)) 1028 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
1029 return; 1029 intel_sdvo_set_colorimetry(intel_sdvo,
1030 SDVO_COLORIMETRY_RGB256);
1031 intel_sdvo_set_avi_infoframe(intel_sdvo);
1032 } else
1033 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
1030 1034
1031 if (intel_sdvo->is_tv && 1035 if (intel_sdvo->is_tv &&
1032 !intel_sdvo_set_tv_format(intel_sdvo)) 1036 !intel_sdvo_set_tv_format(intel_sdvo))
@@ -1398,6 +1402,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1398 1402
1399 intel_sdvo->attached_output = response; 1403 intel_sdvo->attached_output = response;
1400 1404
1405 intel_sdvo->has_hdmi_monitor = false;
1406 intel_sdvo->has_hdmi_audio = false;
1407
1401 if ((intel_sdvo_connector->output_flag & response) == 0) 1408 if ((intel_sdvo_connector->output_flag & response) == 0)
1402 ret = connector_status_disconnected; 1409 ret = connector_status_disconnected;
1403 else if (response & SDVO_TMDS_MASK) 1410 else if (response & SDVO_TMDS_MASK)
@@ -1922,20 +1929,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1922static bool 1929static bool
1923intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) 1930intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
1924{ 1931{
1925 int is_hdmi; 1932 return intel_sdvo_check_supp_encode(intel_sdvo);
1926
1927 if (!intel_sdvo_check_supp_encode(intel_sdvo))
1928 return false;
1929
1930 if (!intel_sdvo_set_target_output(intel_sdvo,
1931 device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
1932 return false;
1933
1934 is_hdmi = 0;
1935 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
1936 return false;
1937
1938 return !!is_hdmi;
1939} 1933}
1940 1934
1941static u8 1935static u8
@@ -2037,12 +2031,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2037 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2031 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2038 2032
2039 if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { 2033 if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
2040 /* enable hdmi encoding mode if supported */
2041 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
2042 intel_sdvo_set_colorimetry(intel_sdvo,
2043 SDVO_COLORIMETRY_RGB256);
2044 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2034 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2045
2046 intel_sdvo->is_hdmi = true; 2035 intel_sdvo->is_hdmi = true;
2047 } 2036 }
2048 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2037 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ff652c77a0a5..4c8bfc97fb4c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2963,6 +2963,7 @@ config TILE_NET
2963config XEN_NETDEV_FRONTEND 2963config XEN_NETDEV_FRONTEND
2964 tristate "Xen network device frontend driver" 2964 tristate "Xen network device frontend driver"
2965 depends on XEN 2965 depends on XEN
2966 select XEN_XENBUS_FRONTEND
2966 default y 2967 default y
2967 help 2968 help
2968 The network device frontend driver allows the kernel to 2969 The network device frontend driver allows the kernel to
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 5b1630e4e9e3..a9523fdc6911 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -45,6 +45,7 @@ config XEN_PCIDEV_FRONTEND
45 depends on PCI && X86 && XEN 45 depends on PCI && X86 && XEN
46 select HOTPLUG 46 select HOTPLUG
47 select PCI_XEN 47 select PCI_XEN
48 select XEN_XENBUS_FRONTEND
48 default y 49 default y
49 help 50 help
50 The PCI device frontend driver allows the kernel to import arbitrary 51 The PCI device frontend driver allows the kernel to import arbitrary
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 6e6180ccd726..5a48ce996dea 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -29,6 +29,14 @@ config XEN_DEV_EVTCHN
29 firing. 29 firing.
30 If in doubt, say yes. 30 If in doubt, say yes.
31 31
32config XEN_BACKEND
33 bool "Backend driver support"
34 depends on XEN_DOM0
35 default y
36 help
37 Support for backend device drivers that provide I/O services
38 to other virtual machines.
39
32config XENFS 40config XENFS
33 tristate "Xen filesystem" 41 tristate "Xen filesystem"
34 default y 42 default y
@@ -62,6 +70,9 @@ config XEN_SYS_HYPERVISOR
62 virtual environment, /sys/hypervisor will still be present, 70 virtual environment, /sys/hypervisor will still be present,
63 but will have no xen contents. 71 but will have no xen contents.
64 72
73config XEN_XENBUS_FRONTEND
74 tristate
75
65config XEN_PLATFORM_PCI 76config XEN_PLATFORM_PCI
66 tristate "xen platform pci device driver" 77 tristate "xen platform pci device driver"
67 depends on XEN_PVHVM 78 depends on XEN_PVHVM
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 5571f5b84223..8dca685358b4 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -5,3 +5,8 @@ xenbus-objs += xenbus_client.o
5xenbus-objs += xenbus_comms.o 5xenbus-objs += xenbus_comms.o
6xenbus-objs += xenbus_xs.o 6xenbus-objs += xenbus_xs.o
7xenbus-objs += xenbus_probe.o 7xenbus-objs += xenbus_probe.o
8
9xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
10xenbus-objs += $(xenbus-be-objs-y)
11
12obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index deb9c4ba3a93..baa65e7fbbc7 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -56,7 +56,6 @@
56#include <xen/events.h> 56#include <xen/events.h>
57#include <xen/page.h> 57#include <xen/page.h>
58 58
59#include <xen/platform_pci.h>
60#include <xen/hvm.h> 59#include <xen/hvm.h>
61 60
62#include "xenbus_comms.h" 61#include "xenbus_comms.h"
@@ -73,15 +72,6 @@ static unsigned long xen_store_mfn;
73 72
74static BLOCKING_NOTIFIER_HEAD(xenstore_chain); 73static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
75 74
76static void wait_for_devices(struct xenbus_driver *xendrv);
77
78static int xenbus_probe_frontend(const char *type, const char *name);
79
80static void xenbus_dev_shutdown(struct device *_dev);
81
82static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
83static int xenbus_dev_resume(struct device *dev);
84
85/* If something in array of ids matches this device, return it. */ 75/* If something in array of ids matches this device, return it. */
86static const struct xenbus_device_id * 76static const struct xenbus_device_id *
87match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) 77match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@@ -102,34 +92,7 @@ int xenbus_match(struct device *_dev, struct device_driver *_drv)
102 92
103 return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; 93 return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
104} 94}
105 95EXPORT_SYMBOL_GPL(xenbus_match);
106static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
107{
108 struct xenbus_device *dev = to_xenbus_device(_dev);
109
110 if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
111 return -ENOMEM;
112
113 return 0;
114}
115
116/* device/<type>/<id> => <type>-<id> */
117static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
118{
119 nodename = strchr(nodename, '/');
120 if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
121 printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
122 return -EINVAL;
123 }
124
125 strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
126 if (!strchr(bus_id, '/')) {
127 printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
128 return -EINVAL;
129 }
130 *strchr(bus_id, '/') = '-';
131 return 0;
132}
133 96
134 97
135static void free_otherend_details(struct xenbus_device *dev) 98static void free_otherend_details(struct xenbus_device *dev)
@@ -149,7 +112,30 @@ static void free_otherend_watch(struct xenbus_device *dev)
149} 112}
150 113
151 114
152int read_otherend_details(struct xenbus_device *xendev, 115static int talk_to_otherend(struct xenbus_device *dev)
116{
117 struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
118
119 free_otherend_watch(dev);
120 free_otherend_details(dev);
121
122 return drv->read_otherend_details(dev);
123}
124
125
126
127static int watch_otherend(struct xenbus_device *dev)
128{
129 struct xen_bus_type *bus =
130 container_of(dev->dev.bus, struct xen_bus_type, bus);
131
132 return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
133 bus->otherend_changed,
134 "%s/%s", dev->otherend, "state");
135}
136
137
138int xenbus_read_otherend_details(struct xenbus_device *xendev,
153 char *id_node, char *path_node) 139 char *id_node, char *path_node)
154{ 140{
155 int err = xenbus_gather(XBT_NIL, xendev->nodename, 141 int err = xenbus_gather(XBT_NIL, xendev->nodename,
@@ -174,39 +160,11 @@ int read_otherend_details(struct xenbus_device *xendev,
174 160
175 return 0; 161 return 0;
176} 162}
163EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
177 164
178 165void xenbus_otherend_changed(struct xenbus_watch *watch,
179static int read_backend_details(struct xenbus_device *xendev) 166 const char **vec, unsigned int len,
180{ 167 int ignore_on_shutdown)
181 return read_otherend_details(xendev, "backend-id", "backend");
182}
183
184static struct device_attribute xenbus_dev_attrs[] = {
185 __ATTR_NULL
186};
187
188/* Bus type for frontend drivers. */
189static struct xen_bus_type xenbus_frontend = {
190 .root = "device",
191 .levels = 2, /* device/type/<id> */
192 .get_bus_id = frontend_bus_id,
193 .probe = xenbus_probe_frontend,
194 .bus = {
195 .name = "xen",
196 .match = xenbus_match,
197 .uevent = xenbus_uevent,
198 .probe = xenbus_dev_probe,
199 .remove = xenbus_dev_remove,
200 .shutdown = xenbus_dev_shutdown,
201 .dev_attrs = xenbus_dev_attrs,
202
203 .suspend = xenbus_dev_suspend,
204 .resume = xenbus_dev_resume,
205 },
206};
207
208static void otherend_changed(struct xenbus_watch *watch,
209 const char **vec, unsigned int len)
210{ 168{
211 struct xenbus_device *dev = 169 struct xenbus_device *dev =
212 container_of(watch, struct xenbus_device, otherend_watch); 170 container_of(watch, struct xenbus_device, otherend_watch);
@@ -234,11 +192,7 @@ static void otherend_changed(struct xenbus_watch *watch,
234 * work that can fail e.g., when the rootfs is gone. 192 * work that can fail e.g., when the rootfs is gone.
235 */ 193 */
236 if (system_state > SYSTEM_RUNNING) { 194 if (system_state > SYSTEM_RUNNING) {
237 struct xen_bus_type *bus = bus; 195 if (ignore_on_shutdown && (state == XenbusStateClosing))
238 bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
239 /* If we're frontend, drive the state machine to Closed. */
240 /* This should cause the backend to release our resources. */
241 if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
242 xenbus_frontend_closed(dev); 196 xenbus_frontend_closed(dev);
243 return; 197 return;
244 } 198 }
@@ -246,25 +200,7 @@ static void otherend_changed(struct xenbus_watch *watch,
246 if (drv->otherend_changed) 200 if (drv->otherend_changed)
247 drv->otherend_changed(dev, state); 201 drv->otherend_changed(dev, state);
248} 202}
249 203EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
250
251static int talk_to_otherend(struct xenbus_device *dev)
252{
253 struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
254
255 free_otherend_watch(dev);
256 free_otherend_details(dev);
257
258 return drv->read_otherend_details(dev);
259}
260
261
262static int watch_otherend(struct xenbus_device *dev)
263{
264 return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
265 "%s/%s", dev->otherend, "state");
266}
267
268 204
269int xenbus_dev_probe(struct device *_dev) 205int xenbus_dev_probe(struct device *_dev)
270{ 206{
@@ -308,8 +244,9 @@ int xenbus_dev_probe(struct device *_dev)
308fail: 244fail:
309 xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); 245 xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
310 xenbus_switch_state(dev, XenbusStateClosed); 246 xenbus_switch_state(dev, XenbusStateClosed);
311 return -ENODEV; 247 return err;
312} 248}
249EXPORT_SYMBOL_GPL(xenbus_dev_probe);
313 250
314int xenbus_dev_remove(struct device *_dev) 251int xenbus_dev_remove(struct device *_dev)
315{ 252{
@@ -327,8 +264,9 @@ int xenbus_dev_remove(struct device *_dev)
327 xenbus_switch_state(dev, XenbusStateClosed); 264 xenbus_switch_state(dev, XenbusStateClosed);
328 return 0; 265 return 0;
329} 266}
267EXPORT_SYMBOL_GPL(xenbus_dev_remove);
330 268
331static void xenbus_dev_shutdown(struct device *_dev) 269void xenbus_dev_shutdown(struct device *_dev)
332{ 270{
333 struct xenbus_device *dev = to_xenbus_device(_dev); 271 struct xenbus_device *dev = to_xenbus_device(_dev);
334 unsigned long timeout = 5*HZ; 272 unsigned long timeout = 5*HZ;
@@ -349,6 +287,7 @@ static void xenbus_dev_shutdown(struct device *_dev)
349 out: 287 out:
350 put_device(&dev->dev); 288 put_device(&dev->dev);
351} 289}
290EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
352 291
353int xenbus_register_driver_common(struct xenbus_driver *drv, 292int xenbus_register_driver_common(struct xenbus_driver *drv,
354 struct xen_bus_type *bus, 293 struct xen_bus_type *bus,
@@ -362,25 +301,7 @@ int xenbus_register_driver_common(struct xenbus_driver *drv,
362 301
363 return driver_register(&drv->driver); 302 return driver_register(&drv->driver);
364} 303}
365 304EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
366int __xenbus_register_frontend(struct xenbus_driver *drv,
367 struct module *owner, const char *mod_name)
368{
369 int ret;
370
371 drv->read_otherend_details = read_backend_details;
372
373 ret = xenbus_register_driver_common(drv, &xenbus_frontend,
374 owner, mod_name);
375 if (ret)
376 return ret;
377
378 /* If this driver is loaded as a module wait for devices to attach. */
379 wait_for_devices(drv);
380
381 return 0;
382}
383EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
384 305
385void xenbus_unregister_driver(struct xenbus_driver *drv) 306void xenbus_unregister_driver(struct xenbus_driver *drv)
386{ 307{
@@ -551,24 +472,7 @@ fail:
551 kfree(xendev); 472 kfree(xendev);
552 return err; 473 return err;
553} 474}
554 475EXPORT_SYMBOL_GPL(xenbus_probe_node);
555/* device/<typename>/<name> */
556static int xenbus_probe_frontend(const char *type, const char *name)
557{
558 char *nodename;
559 int err;
560
561 nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
562 xenbus_frontend.root, type, name);
563 if (!nodename)
564 return -ENOMEM;
565
566 DPRINTK("%s", nodename);
567
568 err = xenbus_probe_node(&xenbus_frontend, type, nodename);
569 kfree(nodename);
570 return err;
571}
572 476
573static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) 477static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
574{ 478{
@@ -582,10 +486,11 @@ static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
582 return PTR_ERR(dir); 486 return PTR_ERR(dir);
583 487
584 for (i = 0; i < dir_n; i++) { 488 for (i = 0; i < dir_n; i++) {
585 err = bus->probe(type, dir[i]); 489 err = bus->probe(bus, type, dir[i]);
586 if (err) 490 if (err)
587 break; 491 break;
588 } 492 }
493
589 kfree(dir); 494 kfree(dir);
590 return err; 495 return err;
591} 496}
@@ -605,9 +510,11 @@ int xenbus_probe_devices(struct xen_bus_type *bus)
605 if (err) 510 if (err)
606 break; 511 break;
607 } 512 }
513
608 kfree(dir); 514 kfree(dir);
609 return err; 515 return err;
610} 516}
517EXPORT_SYMBOL_GPL(xenbus_probe_devices);
611 518
612static unsigned int char_count(const char *str, char c) 519static unsigned int char_count(const char *str, char c)
613{ 520{
@@ -670,32 +577,18 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
670} 577}
671EXPORT_SYMBOL_GPL(xenbus_dev_changed); 578EXPORT_SYMBOL_GPL(xenbus_dev_changed);
672 579
673static void frontend_changed(struct xenbus_watch *watch, 580int xenbus_dev_suspend(struct device *dev, pm_message_t state)
674 const char **vec, unsigned int len)
675{
676 DPRINTK("");
677
678 xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
679}
680
681/* We watch for devices appearing and vanishing. */
682static struct xenbus_watch fe_watch = {
683 .node = "device",
684 .callback = frontend_changed,
685};
686
687static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
688{ 581{
689 int err = 0; 582 int err = 0;
690 struct xenbus_driver *drv; 583 struct xenbus_driver *drv;
691 struct xenbus_device *xdev; 584 struct xenbus_device *xdev
585 = container_of(dev, struct xenbus_device, dev);
692 586
693 DPRINTK(""); 587 DPRINTK("%s", xdev->nodename);
694 588
695 if (dev->driver == NULL) 589 if (dev->driver == NULL)
696 return 0; 590 return 0;
697 drv = to_xenbus_driver(dev->driver); 591 drv = to_xenbus_driver(dev->driver);
698 xdev = container_of(dev, struct xenbus_device, dev);
699 if (drv->suspend) 592 if (drv->suspend)
700 err = drv->suspend(xdev, state); 593 err = drv->suspend(xdev, state);
701 if (err) 594 if (err)
@@ -703,21 +596,20 @@ static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
703 "xenbus: suspend %s failed: %i\n", dev_name(dev), err); 596 "xenbus: suspend %s failed: %i\n", dev_name(dev), err);
704 return 0; 597 return 0;
705} 598}
599EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
706 600
707static int xenbus_dev_resume(struct device *dev) 601int xenbus_dev_resume(struct device *dev)
708{ 602{
709 int err; 603 int err;
710 struct xenbus_driver *drv; 604 struct xenbus_driver *drv;
711 struct xenbus_device *xdev; 605 struct xenbus_device *xdev
606 = container_of(dev, struct xenbus_device, dev);
712 607
713 DPRINTK(""); 608 DPRINTK("%s", xdev->nodename);
714 609
715 if (dev->driver == NULL) 610 if (dev->driver == NULL)
716 return 0; 611 return 0;
717
718 drv = to_xenbus_driver(dev->driver); 612 drv = to_xenbus_driver(dev->driver);
719 xdev = container_of(dev, struct xenbus_device, dev);
720
721 err = talk_to_otherend(xdev); 613 err = talk_to_otherend(xdev);
722 if (err) { 614 if (err) {
723 printk(KERN_WARNING 615 printk(KERN_WARNING
@@ -748,6 +640,7 @@ static int xenbus_dev_resume(struct device *dev)
748 640
749 return 0; 641 return 0;
750} 642}
643EXPORT_SYMBOL_GPL(xenbus_dev_resume);
751 644
752/* A flag to determine if xenstored is 'ready' (i.e. has started) */ 645/* A flag to determine if xenstored is 'ready' (i.e. has started) */
753int xenstored_ready = 0; 646int xenstored_ready = 0;
@@ -776,11 +669,6 @@ void xenbus_probe(struct work_struct *unused)
776{ 669{
777 xenstored_ready = 1; 670 xenstored_ready = 1;
778 671
779 /* Enumerate devices in xenstore and watch for changes. */
780 xenbus_probe_devices(&xenbus_frontend);
781 register_xenbus_watch(&fe_watch);
782 xenbus_backend_probe_and_watch();
783
784 /* Notify others that xenstore is up */ 672 /* Notify others that xenstore is up */
785 blocking_notifier_call_chain(&xenstore_chain, 0, NULL); 673 blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
786} 674}
@@ -809,16 +697,7 @@ static int __init xenbus_init(void)
809 697
810 err = -ENODEV; 698 err = -ENODEV;
811 if (!xen_domain()) 699 if (!xen_domain())
812 goto out_error; 700 return err;
813
814 /* Register ourselves with the kernel bus subsystem */
815 err = bus_register(&xenbus_frontend.bus);
816 if (err)
817 goto out_error;
818
819 err = xenbus_backend_bus_register();
820 if (err)
821 goto out_unreg_front;
822 701
823 /* 702 /*
824 * Domain0 doesn't have a store_evtchn or store_mfn yet. 703 * Domain0 doesn't have a store_evtchn or store_mfn yet.
@@ -874,7 +753,7 @@ static int __init xenbus_init(void)
874 if (err) { 753 if (err) {
875 printk(KERN_WARNING 754 printk(KERN_WARNING
876 "XENBUS: Error initializing xenstore comms: %i\n", err); 755 "XENBUS: Error initializing xenstore comms: %i\n", err);
877 goto out_unreg_back; 756 goto out_error;
878 } 757 }
879 758
880#ifdef CONFIG_XEN_COMPAT_XENFS 759#ifdef CONFIG_XEN_COMPAT_XENFS
@@ -887,133 +766,13 @@ static int __init xenbus_init(void)
887 766
888 return 0; 767 return 0;
889 768
890 out_unreg_back:
891 xenbus_backend_bus_unregister();
892
893 out_unreg_front:
894 bus_unregister(&xenbus_frontend.bus);
895
896 out_error: 769 out_error:
897 if (page != 0) 770 if (page != 0)
898 free_page(page); 771 free_page(page);
772
899 return err; 773 return err;
900} 774}
901 775
902postcore_initcall(xenbus_init); 776postcore_initcall(xenbus_init);
903 777
904MODULE_LICENSE("GPL"); 778MODULE_LICENSE("GPL");
905
906static int is_device_connecting(struct device *dev, void *data)
907{
908 struct xenbus_device *xendev = to_xenbus_device(dev);
909 struct device_driver *drv = data;
910 struct xenbus_driver *xendrv;
911
912 /*
913 * A device with no driver will never connect. We care only about
914 * devices which should currently be in the process of connecting.
915 */
916 if (!dev->driver)
917 return 0;
918
919 /* Is this search limited to a particular driver? */
920 if (drv && (dev->driver != drv))
921 return 0;
922
923 xendrv = to_xenbus_driver(dev->driver);
924 return (xendev->state < XenbusStateConnected ||
925 (xendev->state == XenbusStateConnected &&
926 xendrv->is_ready && !xendrv->is_ready(xendev)));
927}
928
929static int exists_connecting_device(struct device_driver *drv)
930{
931 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
932 is_device_connecting);
933}
934
935static int print_device_status(struct device *dev, void *data)
936{
937 struct xenbus_device *xendev = to_xenbus_device(dev);
938 struct device_driver *drv = data;
939
940 /* Is this operation limited to a particular driver? */
941 if (drv && (dev->driver != drv))
942 return 0;
943
944 if (!dev->driver) {
945 /* Information only: is this too noisy? */
946 printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
947 xendev->nodename);
948 } else if (xendev->state < XenbusStateConnected) {
949 enum xenbus_state rstate = XenbusStateUnknown;
950 if (xendev->otherend)
951 rstate = xenbus_read_driver_state(xendev->otherend);
952 printk(KERN_WARNING "XENBUS: Timeout connecting "
953 "to device: %s (local state %d, remote state %d)\n",
954 xendev->nodename, xendev->state, rstate);
955 }
956
957 return 0;
958}
959
960/* We only wait for device setup after most initcalls have run. */
961static int ready_to_wait_for_devices;
962
963/*
964 * On a 5-minute timeout, wait for all devices currently configured. We need
965 * to do this to guarantee that the filesystems and / or network devices
966 * needed for boot are available, before we can allow the boot to proceed.
967 *
968 * This needs to be on a late_initcall, to happen after the frontend device
969 * drivers have been initialised, but before the root fs is mounted.
970 *
971 * A possible improvement here would be to have the tools add a per-device
972 * flag to the store entry, indicating whether it is needed at boot time.
973 * This would allow people who knew what they were doing to accelerate their
974 * boot slightly, but of course needs tools or manual intervention to set up
975 * those flags correctly.
976 */
977static void wait_for_devices(struct xenbus_driver *xendrv)
978{
979 unsigned long start = jiffies;
980 struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
981 unsigned int seconds_waited = 0;
982
983 if (!ready_to_wait_for_devices || !xen_domain())
984 return;
985
986 while (exists_connecting_device(drv)) {
987 if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
988 if (!seconds_waited)
989 printk(KERN_WARNING "XENBUS: Waiting for "
990 "devices to initialise: ");
991 seconds_waited += 5;
992 printk("%us...", 300 - seconds_waited);
993 if (seconds_waited == 300)
994 break;
995 }
996
997 schedule_timeout_interruptible(HZ/10);
998 }
999
1000 if (seconds_waited)
1001 printk("\n");
1002
1003 bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
1004 print_device_status);
1005}
1006
1007#ifndef MODULE
1008static int __init boot_wait_for_devices(void)
1009{
1010 if (xen_hvm_domain() && !xen_platform_pci_unplug)
1011 return -ENODEV;
1012
1013 ready_to_wait_for_devices = 1;
1014 wait_for_devices(NULL);
1015 return 0;
1016}
1017
1018late_initcall(boot_wait_for_devices);
1019#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 6c5e3185a6a2..24665812316a 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -36,26 +36,15 @@
36 36
37#define XEN_BUS_ID_SIZE 20 37#define XEN_BUS_ID_SIZE 20
38 38
39#ifdef CONFIG_XEN_BACKEND
40extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
41extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
42extern void xenbus_backend_probe_and_watch(void);
43extern int xenbus_backend_bus_register(void);
44extern void xenbus_backend_bus_unregister(void);
45#else
46static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
47static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
48static inline void xenbus_backend_probe_and_watch(void) {}
49static inline int xenbus_backend_bus_register(void) { return 0; }
50static inline void xenbus_backend_bus_unregister(void) {}
51#endif
52
53struct xen_bus_type 39struct xen_bus_type
54{ 40{
55 char *root; 41 char *root;
56 unsigned int levels; 42 unsigned int levels;
57 int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); 43 int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
58 int (*probe)(const char *type, const char *dir); 44 int (*probe)(struct xen_bus_type *bus, const char *type,
45 const char *dir);
46 void (*otherend_changed)(struct xenbus_watch *watch, const char **vec,
47 unsigned int len);
59 struct bus_type bus; 48 struct bus_type bus;
60}; 49};
61 50
@@ -73,4 +62,16 @@ extern int xenbus_probe_devices(struct xen_bus_type *bus);
73 62
74extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); 63extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
75 64
65extern void xenbus_dev_shutdown(struct device *_dev);
66
67extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
68extern int xenbus_dev_resume(struct device *dev);
69
70extern void xenbus_otherend_changed(struct xenbus_watch *watch,
71 const char **vec, unsigned int len,
72 int ignore_on_shutdown);
73
74extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
75 char *id_node, char *path_node);
76
76#endif 77#endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
new file mode 100644
index 000000000000..6cf467bf63ec
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -0,0 +1,276 @@
1/******************************************************************************
2 * Talks to Xen Store to figure out what devices we have (backend half).
3 *
4 * Copyright (C) 2005 Rusty Russell, IBM Corporation
5 * Copyright (C) 2005 Mike Wray, Hewlett-Packard
6 * Copyright (C) 2005, 2006 XenSource Ltd
7 * Copyright (C) 2007 Solarflare Communications, Inc.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#define DPRINTK(fmt, args...) \
35 pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
36 __func__, __LINE__, ##args)
37
38#include <linux/kernel.h>
39#include <linux/err.h>
40#include <linux/string.h>
41#include <linux/ctype.h>
42#include <linux/fcntl.h>
43#include <linux/mm.h>
44#include <linux/notifier.h>
45
46#include <asm/page.h>
47#include <asm/pgtable.h>
48#include <asm/xen/hypervisor.h>
49#include <asm/hypervisor.h>
50#include <xen/xenbus.h>
51#include <xen/features.h>
52
53#include "xenbus_comms.h"
54#include "xenbus_probe.h"
55
56/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
57static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
58{
59 int domid, err;
60 const char *devid, *type, *frontend;
61 unsigned int typelen;
62
63 type = strchr(nodename, '/');
64 if (!type)
65 return -EINVAL;
66 type++;
67 typelen = strcspn(type, "/");
68 if (!typelen || type[typelen] != '/')
69 return -EINVAL;
70
71 devid = strrchr(nodename, '/') + 1;
72
73 err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
74 "frontend", NULL, &frontend,
75 NULL);
76 if (err)
77 return err;
78 if (strlen(frontend) == 0)
79 err = -ERANGE;
80 if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
81 err = -ENOENT;
82 kfree(frontend);
83
84 if (err)
85 return err;
86
87 if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s",
88 typelen, type, domid, devid) >= XEN_BUS_ID_SIZE)
89 return -ENOSPC;
90 return 0;
91}
92
93static int xenbus_uevent_backend(struct device *dev,
94 struct kobj_uevent_env *env)
95{
96 struct xenbus_device *xdev;
97 struct xenbus_driver *drv;
98 struct xen_bus_type *bus;
99
100 DPRINTK("");
101
102 if (dev == NULL)
103 return -ENODEV;
104
105 xdev = to_xenbus_device(dev);
106 bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
107 if (xdev == NULL)
108 return -ENODEV;
109
110 /* stuff we want to pass to /sbin/hotplug */
111 if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype))
112 return -ENOMEM;
113
114 if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename))
115 return -ENOMEM;
116
117 if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root))
118 return -ENOMEM;
119
120 if (dev->driver) {
121 drv = to_xenbus_driver(dev->driver);
122 if (drv && drv->uevent)
123 return drv->uevent(xdev, env);
124 }
125
126 return 0;
127}
128
129/* backend/<typename>/<frontend-uuid>/<name> */
130static int xenbus_probe_backend_unit(struct xen_bus_type *bus,
131 const char *dir,
132 const char *type,
133 const char *name)
134{
135 char *nodename;
136 int err;
137
138 nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
139 if (!nodename)
140 return -ENOMEM;
141
142 DPRINTK("%s\n", nodename);
143
144 err = xenbus_probe_node(bus, type, nodename);
145 kfree(nodename);
146 return err;
147}
148
149/* backend/<typename>/<frontend-domid> */
150static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
151 const char *domid)
152{
153 char *nodename;
154 int err = 0;
155 char **dir;
156 unsigned int i, dir_n = 0;
157
158 DPRINTK("");
159
160 nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid);
161 if (!nodename)
162 return -ENOMEM;
163
164 dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
165 if (IS_ERR(dir)) {
166 kfree(nodename);
167 return PTR_ERR(dir);
168 }
169
170 for (i = 0; i < dir_n; i++) {
171 err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]);
172 if (err)
173 break;
174 }
175 kfree(dir);
176 kfree(nodename);
177 return err;
178}
179
180static void frontend_changed(struct xenbus_watch *watch,
181 const char **vec, unsigned int len)
182{
183 xenbus_otherend_changed(watch, vec, len, 0);
184}
185
186static struct device_attribute xenbus_backend_dev_attrs[] = {
187 __ATTR_NULL
188};
189
190static struct xen_bus_type xenbus_backend = {
191 .root = "backend",
192 .levels = 3, /* backend/type/<frontend>/<id> */
193 .get_bus_id = backend_bus_id,
194 .probe = xenbus_probe_backend,
195 .otherend_changed = frontend_changed,
196 .bus = {
197 .name = "xen-backend",
198 .match = xenbus_match,
199 .uevent = xenbus_uevent_backend,
200 .probe = xenbus_dev_probe,
201 .remove = xenbus_dev_remove,
202 .shutdown = xenbus_dev_shutdown,
203 .dev_attrs = xenbus_backend_dev_attrs,
204 },
205};
206
207static void backend_changed(struct xenbus_watch *watch,
208 const char **vec, unsigned int len)
209{
210 DPRINTK("");
211
212 xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
213}
214
215static struct xenbus_watch be_watch = {
216 .node = "backend",
217 .callback = backend_changed,
218};
219
220static int read_frontend_details(struct xenbus_device *xendev)
221{
222 return xenbus_read_otherend_details(xendev, "frontend-id", "frontend");
223}
224
225int xenbus_dev_is_online(struct xenbus_device *dev)
226{
227 int rc, val;
228
229 rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
230 if (rc != 1)
231 val = 0; /* no online node present */
232
233 return val;
234}
235EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
236
237int __xenbus_register_backend(struct xenbus_driver *drv,
238 struct module *owner, const char *mod_name)
239{
240 drv->read_otherend_details = read_frontend_details;
241
242 return xenbus_register_driver_common(drv, &xenbus_backend,
243 owner, mod_name);
244}
245EXPORT_SYMBOL_GPL(__xenbus_register_backend);
246
247static int backend_probe_and_watch(struct notifier_block *notifier,
248 unsigned long event,
249 void *data)
250{
251 /* Enumerate devices in xenstore and watch for changes. */
252 xenbus_probe_devices(&xenbus_backend);
253 register_xenbus_watch(&be_watch);
254
255 return NOTIFY_DONE;
256}
257
258static int __init xenbus_probe_backend_init(void)
259{
260 static struct notifier_block xenstore_notifier = {
261 .notifier_call = backend_probe_and_watch
262 };
263 int err;
264
265 DPRINTK("");
266
267 /* Register ourselves with the kernel bus subsystem */
268 err = bus_register(&xenbus_backend.bus);
269 if (err)
270 return err;
271
272 register_xenstore_notifier(&xenstore_notifier);
273
274 return 0;
275}
276subsys_initcall(xenbus_probe_backend_init);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
new file mode 100644
index 000000000000..5bcc2d6cf129
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -0,0 +1,294 @@
1#define DPRINTK(fmt, args...) \
2 pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
3 __func__, __LINE__, ##args)
4
5#include <linux/kernel.h>
6#include <linux/err.h>
7#include <linux/string.h>
8#include <linux/ctype.h>
9#include <linux/fcntl.h>
10#include <linux/mm.h>
11#include <linux/proc_fs.h>
12#include <linux/notifier.h>
13#include <linux/kthread.h>
14#include <linux/mutex.h>
15#include <linux/io.h>
16
17#include <asm/page.h>
18#include <asm/pgtable.h>
19#include <asm/xen/hypervisor.h>
20#include <xen/xenbus.h>
21#include <xen/events.h>
22#include <xen/page.h>
23
24#include <xen/platform_pci.h>
25
26#include "xenbus_comms.h"
27#include "xenbus_probe.h"
28
29
30/* device/<type>/<id> => <type>-<id> */
31static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
32{
33 nodename = strchr(nodename, '/');
34 if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
35 printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
36 return -EINVAL;
37 }
38
39 strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
40 if (!strchr(bus_id, '/')) {
41 printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
42 return -EINVAL;
43 }
44 *strchr(bus_id, '/') = '-';
45 return 0;
46}
47
48/* device/<typename>/<name> */
49static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type,
50 const char *name)
51{
52 char *nodename;
53 int err;
54
55 nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name);
56 if (!nodename)
57 return -ENOMEM;
58
59 DPRINTK("%s", nodename);
60
61 err = xenbus_probe_node(bus, type, nodename);
62 kfree(nodename);
63 return err;
64}
65
66static int xenbus_uevent_frontend(struct device *_dev,
67 struct kobj_uevent_env *env)
68{
69 struct xenbus_device *dev = to_xenbus_device(_dev);
70
71 if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
72 return -ENOMEM;
73
74 return 0;
75}
76
77
78static void backend_changed(struct xenbus_watch *watch,
79 const char **vec, unsigned int len)
80{
81 xenbus_otherend_changed(watch, vec, len, 1);
82}
83
84static struct device_attribute xenbus_frontend_dev_attrs[] = {
85 __ATTR_NULL
86};
87
88static struct xen_bus_type xenbus_frontend = {
89 .root = "device",
90 .levels = 2, /* device/type/<id> */
91 .get_bus_id = frontend_bus_id,
92 .probe = xenbus_probe_frontend,
93 .otherend_changed = backend_changed,
94 .bus = {
95 .name = "xen",
96 .match = xenbus_match,
97 .uevent = xenbus_uevent_frontend,
98 .probe = xenbus_dev_probe,
99 .remove = xenbus_dev_remove,
100 .shutdown = xenbus_dev_shutdown,
101 .dev_attrs = xenbus_frontend_dev_attrs,
102
103 .suspend = xenbus_dev_suspend,
104 .resume = xenbus_dev_resume,
105 },
106};
107
108static void frontend_changed(struct xenbus_watch *watch,
109 const char **vec, unsigned int len)
110{
111 DPRINTK("");
112
113 xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
114}
115
116
117/* We watch for devices appearing and vanishing. */
118static struct xenbus_watch fe_watch = {
119 .node = "device",
120 .callback = frontend_changed,
121};
122
123static int read_backend_details(struct xenbus_device *xendev)
124{
125 return xenbus_read_otherend_details(xendev, "backend-id", "backend");
126}
127
128static int is_device_connecting(struct device *dev, void *data)
129{
130 struct xenbus_device *xendev = to_xenbus_device(dev);
131 struct device_driver *drv = data;
132 struct xenbus_driver *xendrv;
133
134 /*
135 * A device with no driver will never connect. We care only about
136 * devices which should currently be in the process of connecting.
137 */
138 if (!dev->driver)
139 return 0;
140
141 /* Is this search limited to a particular driver? */
142 if (drv && (dev->driver != drv))
143 return 0;
144
145 xendrv = to_xenbus_driver(dev->driver);
146 return (xendev->state < XenbusStateConnected ||
147 (xendev->state == XenbusStateConnected &&
148 xendrv->is_ready && !xendrv->is_ready(xendev)));
149}
150
151static int exists_connecting_device(struct device_driver *drv)
152{
153 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
154 is_device_connecting);
155}
156
157static int print_device_status(struct device *dev, void *data)
158{
159 struct xenbus_device *xendev = to_xenbus_device(dev);
160 struct device_driver *drv = data;
161
162 /* Is this operation limited to a particular driver? */
163 if (drv && (dev->driver != drv))
164 return 0;
165
166 if (!dev->driver) {
167 /* Information only: is this too noisy? */
168 printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
169 xendev->nodename);
170 } else if (xendev->state < XenbusStateConnected) {
171 enum xenbus_state rstate = XenbusStateUnknown;
172 if (xendev->otherend)
173 rstate = xenbus_read_driver_state(xendev->otherend);
174 printk(KERN_WARNING "XENBUS: Timeout connecting "
175 "to device: %s (local state %d, remote state %d)\n",
176 xendev->nodename, xendev->state, rstate);
177 }
178
179 return 0;
180}
181
182/* We only wait for device setup after most initcalls have run. */
183static int ready_to_wait_for_devices;
184
185/*
186 * On a 5-minute timeout, wait for all devices currently configured. We need
187 * to do this to guarantee that the filesystems and / or network devices
188 * needed for boot are available, before we can allow the boot to proceed.
189 *
190 * This needs to be on a late_initcall, to happen after the frontend device
191 * drivers have been initialised, but before the root fs is mounted.
192 *
193 * A possible improvement here would be to have the tools add a per-device
194 * flag to the store entry, indicating whether it is needed at boot time.
195 * This would allow people who knew what they were doing to accelerate their
196 * boot slightly, but of course needs tools or manual intervention to set up
197 * those flags correctly.
198 */
199static void wait_for_devices(struct xenbus_driver *xendrv)
200{
201 unsigned long start = jiffies;
202 struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
203 unsigned int seconds_waited = 0;
204
205 if (!ready_to_wait_for_devices || !xen_domain())
206 return;
207
208 while (exists_connecting_device(drv)) {
209 if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
210 if (!seconds_waited)
211 printk(KERN_WARNING "XENBUS: Waiting for "
212 "devices to initialise: ");
213 seconds_waited += 5;
214 printk("%us...", 300 - seconds_waited);
215 if (seconds_waited == 300)
216 break;
217 }
218
219 schedule_timeout_interruptible(HZ/10);
220 }
221
222 if (seconds_waited)
223 printk("\n");
224
225 bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
226 print_device_status);
227}
228
229int __xenbus_register_frontend(struct xenbus_driver *drv,
230 struct module *owner, const char *mod_name)
231{
232 int ret;
233
234 drv->read_otherend_details = read_backend_details;
235
236 ret = xenbus_register_driver_common(drv, &xenbus_frontend,
237 owner, mod_name);
238 if (ret)
239 return ret;
240
241 /* If this driver is loaded as a module wait for devices to attach. */
242 wait_for_devices(drv);
243
244 return 0;
245}
246EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
247
248static int frontend_probe_and_watch(struct notifier_block *notifier,
249 unsigned long event,
250 void *data)
251{
252 /* Enumerate devices in xenstore and watch for changes. */
253 xenbus_probe_devices(&xenbus_frontend);
254 register_xenbus_watch(&fe_watch);
255
256 return NOTIFY_DONE;
257}
258
259
260static int __init xenbus_probe_frontend_init(void)
261{
262 static struct notifier_block xenstore_notifier = {
263 .notifier_call = frontend_probe_and_watch
264 };
265 int err;
266
267 DPRINTK("");
268
269 /* Register ourselves with the kernel bus subsystem */
270 err = bus_register(&xenbus_frontend.bus);
271 if (err)
272 return err;
273
274 register_xenstore_notifier(&xenstore_notifier);
275
276 return 0;
277}
278subsys_initcall(xenbus_probe_frontend_init);
279
280#ifndef MODULE
281static int __init boot_wait_for_devices(void)
282{
283 if (xen_hvm_domain() && !xen_platform_pci_unplug)
284 return -ENODEV;
285
286 ready_to_wait_for_devices = 1;
287 wait_for_devices(NULL);
288 return 0;
289}
290
291late_initcall(boot_wait_for_devices);
292#endif
293
294MODULE_LICENSE("GPL");
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index 58b6be992544..4ff028fcfd6e 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
6 index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \ 6 index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
7 unistr.o upcase.o 7 unistr.o upcase.o
8 8
9EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\" 9EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.30\"
10 10
11ifeq ($(CONFIG_NTFS_DEBUG),y) 11ifeq ($(CONFIG_NTFS_DEBUG),y)
12EXTRA_CFLAGS += -DDEBUG 12EXTRA_CFLAGS += -DDEBUG
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 113ebd9f25a4..f4b1057abdd2 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. 2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
3 * 3 *
4 * Copyright (c) 2001-2007 Anton Altaparmakov 4 * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
5 * 5 *
6 * This program/include file is free software; you can redistribute it and/or 6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published 7 * modify it under the terms of the GNU General Public License as published
@@ -1380,15 +1380,14 @@ static inline void ntfs_set_next_iovec(const struct iovec **iovp,
1380 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s 1380 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
1381 * single-segment behaviour. 1381 * single-segment behaviour.
1382 * 1382 *
1383 * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both 1383 * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
1384 * when atomic and when not atomic. This is ok because 1384 * atomic and when not atomic. This is ok because it calls
1385 * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic() 1385 * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In
1386 * and it is ok to call this when non-atomic. 1386 * fact, the only difference between __copy_from_user_inatomic() and
1387 * Infact, the only difference between __copy_from_user_inatomic() and
1388 * __copy_from_user() is that the latter calls might_sleep() and the former 1387 * __copy_from_user() is that the latter calls might_sleep() and the former
1389 * should not zero the tail of the buffer on error. And on many 1388 * should not zero the tail of the buffer on error. And on many architectures
1390 * architectures __copy_from_user_inatomic() is just defined to 1389 * __copy_from_user_inatomic() is just defined to __copy_from_user() so it
1391 * __copy_from_user() so it makes no difference at all on those architectures. 1390 * makes no difference at all on those architectures.
1392 */ 1391 */
1393static inline size_t ntfs_copy_from_user_iovec(struct page **pages, 1392static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
1394 unsigned nr_pages, unsigned ofs, const struct iovec **iov, 1393 unsigned nr_pages, unsigned ofs, const struct iovec **iov,
@@ -1409,28 +1408,28 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
1409 if (unlikely(copied != len)) { 1408 if (unlikely(copied != len)) {
1410 /* Do it the slow way. */ 1409 /* Do it the slow way. */
1411 addr = kmap(*pages); 1410 addr = kmap(*pages);
1412 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, 1411 copied = __ntfs_copy_from_user_iovec_inatomic(addr +
1413 *iov, *iov_ofs, len); 1412 ofs, *iov, *iov_ofs, len);
1414 /*
1415 * Zero the rest of the target like __copy_from_user().
1416 */
1417 memset(addr + ofs + copied, 0, len - copied);
1418 kunmap(*pages);
1419 if (unlikely(copied != len)) 1413 if (unlikely(copied != len))
1420 goto err_out; 1414 goto err_out;
1415 kunmap(*pages);
1421 } 1416 }
1422 total += len; 1417 total += len;
1418 ntfs_set_next_iovec(iov, iov_ofs, len);
1423 bytes -= len; 1419 bytes -= len;
1424 if (!bytes) 1420 if (!bytes)
1425 break; 1421 break;
1426 ntfs_set_next_iovec(iov, iov_ofs, len);
1427 ofs = 0; 1422 ofs = 0;
1428 } while (++pages < last_page); 1423 } while (++pages < last_page);
1429out: 1424out:
1430 return total; 1425 return total;
1431err_out: 1426err_out:
1432 total += copied; 1427 BUG_ON(copied > len);
1433 /* Zero the rest of the target like __copy_from_user(). */ 1428 /* Zero the rest of the target like __copy_from_user(). */
1429 memset(addr + ofs + copied, 0, len - copied);
1430 kunmap(*pages);
1431 total += copied;
1432 ntfs_set_next_iovec(iov, iov_ofs, copied);
1434 while (++pages < last_page) { 1433 while (++pages < last_page) {
1435 bytes -= len; 1434 bytes -= len;
1436 if (!bytes) 1435 if (!bytes)
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index a30ecacc01f2..29099a07b9fe 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project. 2 * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
3 * 3 *
4 * Copyright (c) 2001-2007 Anton Altaparmakov 4 * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
5 * Copyright (c) 2001,2002 Richard Russon 5 * Copyright (c) 2001,2002 Richard Russon
6 * 6 *
7 * This program/include file is free software; you can redistribute it and/or 7 * This program/include file is free software; you can redistribute it and/or
@@ -3193,8 +3193,8 @@ static void __exit exit_ntfs_fs(void)
3193 ntfs_sysctl(0); 3193 ntfs_sysctl(0);
3194} 3194}
3195 3195
3196MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>"); 3196MODULE_AUTHOR("Anton Altaparmakov <anton@tuxera.com>");
3197MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2007 Anton Altaparmakov"); 3197MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.");
3198MODULE_VERSION(NTFS_VERSION); 3198MODULE_VERSION(NTFS_VERSION);
3199MODULE_LICENSE("GPL"); 3199MODULE_LICENSE("GPL");
3200#ifdef DEBUG 3200#ifdef DEBUG
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 43e2d7d33976..7a1d15ff19b7 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -94,7 +94,7 @@ struct xenbus_driver {
94 int (*remove)(struct xenbus_device *dev); 94 int (*remove)(struct xenbus_device *dev);
95 int (*suspend)(struct xenbus_device *dev, pm_message_t state); 95 int (*suspend)(struct xenbus_device *dev, pm_message_t state);
96 int (*resume)(struct xenbus_device *dev); 96 int (*resume)(struct xenbus_device *dev);
97 int (*uevent)(struct xenbus_device *, char **, int, char *, int); 97 int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
98 struct device_driver driver; 98 struct device_driver driver;
99 int (*read_otherend_details)(struct xenbus_device *dev); 99 int (*read_otherend_details)(struct xenbus_device *dev);
100 int (*is_ready)(struct xenbus_device *dev); 100 int (*is_ready)(struct xenbus_device *dev);
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
new file mode 100644
index 000000000000..fd8e1f1297aa
--- /dev/null
+++ b/tools/power/x86/turbostat/Makefile
@@ -0,0 +1,8 @@
1turbostat : turbostat.c
2
3clean :
4 rm -f turbostat
5
6install :
7 install turbostat /usr/bin/turbostat
8 install turbostat.8 /usr/share/man/man8
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
new file mode 100644
index 000000000000..ff75125deed0
--- /dev/null
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -0,0 +1,172 @@
1.TH TURBOSTAT 8
2.SH NAME
3turbostat \- Report processor frequency and idle statistics
4.SH SYNOPSIS
5.ft B
6.B turbostat
7.RB [ "\-v" ]
8.RB [ "\-M MSR#" ]
9.RB command
10.br
11.B turbostat
12.RB [ "\-v" ]
13.RB [ "\-M MSR#" ]
14.RB [ "\-i interval_sec" ]
15.SH DESCRIPTION
16\fBturbostat \fP reports processor topology, frequency
17and idle power state statistics on modern X86 processors.
18Either \fBcommand\fP is forked and statistics are printed
19upon its completion, or statistics are printed periodically.
20
21\fBturbostat \fP
22requires that the processor
23supports an "invariant" TSC, plus the APERF and MPERF MSRs.
24\fBturbostat \fP will report idle cpu power state residency
25on processors that additionally support C-state residency counters.
26
27.SS Options
28The \fB-v\fP option increases verbosity.
29.PP
30The \fB-M MSR#\fP option dumps the specified MSR,
31in addition to the usual frequency and idle statistics.
32.PP
33The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds.
34The default is 5 seconds.
35.PP
36The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit,
37displays the statistics gathered since it was forked.
38.PP
39.SH FIELD DESCRIPTIONS
40.nf
41\fBpkg\fP processor package number.
42\fBcore\fP processor core number.
43\fBCPU\fP Linux CPU (logical processor) number.
44\fB%c0\fP percent of the interval that the CPU retired instructions.
45\fBGHz\fP average clock rate while the CPU was in c0 state.
46\fBTSC\fP average GHz that the TSC ran during the entire interval.
47\fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states.
48\fB%pc3, %pc6\fP percentage residency in hardware package idle states.
49.fi
50.PP
51.SH EXAMPLE
52Without any parameters, turbostat prints out counters ever 5 seconds.
53(override interval with "-i sec" option, or specify a command
54for turbostat to fork).
55
56The first row of statistics reflect the average for the entire system.
57Subsequent rows show per-CPU statistics.
58
59.nf
60[root@x980]# ./turbostat
61core CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
62 0.04 1.62 3.38 0.11 0.00 99.85 0.00 95.07
63 0 0 0.04 1.62 3.38 0.06 0.00 99.90 0.00 95.07
64 0 6 0.02 1.62 3.38 0.08 0.00 99.90 0.00 95.07
65 1 2 0.10 1.62 3.38 0.29 0.00 99.61 0.00 95.07
66 1 8 0.11 1.62 3.38 0.28 0.00 99.61 0.00 95.07
67 2 4 0.01 1.62 3.38 0.01 0.00 99.98 0.00 95.07
68 2 10 0.01 1.61 3.38 0.02 0.00 99.98 0.00 95.07
69 8 1 0.07 1.62 3.38 0.15 0.00 99.78 0.00 95.07
70 8 7 0.03 1.62 3.38 0.19 0.00 99.78 0.00 95.07
71 9 3 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07
72 9 9 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07
73 10 5 0.01 1.62 3.38 0.13 0.00 99.86 0.00 95.07
74 10 11 0.08 1.62 3.38 0.05 0.00 99.86 0.00 95.07
75.fi
76.SH VERBOSE EXAMPLE
77The "-v" option adds verbosity to the output:
78
79.nf
80GenuineIntel 11 CPUID levels; family:model:stepping 0x6:2c:2 (6:44:2)
8112 * 133 = 1600 MHz max efficiency
8225 * 133 = 3333 MHz TSC frequency
8326 * 133 = 3467 MHz max turbo 4 active cores
8426 * 133 = 3467 MHz max turbo 3 active cores
8527 * 133 = 3600 MHz max turbo 2 active cores
8627 * 133 = 3600 MHz max turbo 1 active cores
87
88.fi
89The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
90available at the minimum package voltage. The \fBTSC frequency\fP is the nominal
91maximum frequency of the processor if turbo-mode were not available. This frequency
92should be sustainable on all CPUs indefinitely, given nominal power and cooling.
93The remaining rows show what maximum turbo frequency is possible
94depending on the number of idle cores. Note that this information is
95not available on all processors.
96.SH FORK EXAMPLE
97If turbostat is invoked with a command, it will fork that command
98and output the statistics gathered when the command exits.
99eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds
100until ^C while the other CPUs are mostly idle:
101
102.nf
103[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
104
105^Ccore CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
106 8.49 3.63 3.38 16.23 0.66 74.63 0.00 0.00
107 0 0 1.22 3.62 3.38 32.18 0.00 66.60 0.00 0.00
108 0 6 0.40 3.61 3.38 33.00 0.00 66.60 0.00 0.00
109 1 2 0.11 3.14 3.38 0.19 3.95 95.75 0.00 0.00
110 1 8 0.05 2.88 3.38 0.25 3.95 95.75 0.00 0.00
111 2 4 0.00 3.13 3.38 0.02 0.00 99.98 0.00 0.00
112 2 10 0.00 3.09 3.38 0.02 0.00 99.98 0.00 0.00
113 8 1 0.04 3.50 3.38 14.43 0.00 85.54 0.00 0.00
114 8 7 0.03 2.98 3.38 14.43 0.00 85.54 0.00 0.00
115 9 3 0.00 3.16 3.38 100.00 0.00 0.00 0.00 0.00
116 9 9 99.93 3.63 3.38 0.06 0.00 0.00 0.00 0.00
117 10 5 0.01 2.82 3.38 0.08 0.00 99.91 0.00 0.00
118 10 11 0.02 3.36 3.38 0.06 0.00 99.91 0.00 0.00
1196.950866 sec
120
121.fi
122Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit
123while the other processors are generally in various states of idle.
124
125Note that cpu3 is an HT sibling sharing core9
126with cpu9, and thus it is unable to get to an idle state
127deeper than c1 while cpu9 is busy.
128
129Note that turbostat reports average GHz of 3.61, while
130the arithmetic average of the GHz column above is 3.24.
131This is a weighted average, where the weight is %c0. ie. it is the total number of
132un-halted cycles elapsed per time divided by the number of CPUs.
133.SH NOTES
134
135.B "turbostat "
136must be run as root.
137
138.B "turbostat "
139reads hardware counters, but doesn't write them.
140So it will not interfere with the OS or other programs, including
141multiple invocations of itself.
142
143\fBturbostat \fP
144may work poorly on Linux-2.6.20 through 2.6.29,
145as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF
146in those kernels.
147
148The APERF, MPERF MSRs are defined to count non-halted cycles.
149Although it is not guaranteed by the architecture, turbostat assumes
150that they count at TSC rate, which is true on all processors tested to date.
151
152.SH REFERENCES
153"IntelÂŽ Turbo Boost Technology
154in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
155http://download.intel.com/design/processor/applnots/320354.pdf
156
157"IntelÂŽ 64 and IA-32 Architectures Software Developer's Manual
158Volume 3B: System Programming Guide"
159http://www.intel.com/products/processor/manuals/
160
161.SH FILES
162.ta
163.nf
164/dev/cpu/*/msr
165.fi
166
167.SH "SEE ALSO"
168msr(4), vmstat(8)
169.PP
170.SH AUTHORS
171.nf
172Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
new file mode 100644
index 000000000000..4c6983de6fd9
--- /dev/null
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -0,0 +1,1048 @@
1/*
2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors.
4 *
5 * Copyright (c) 2010, Intel Corporation.
6 * Len Brown <len.brown@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22#include <stdio.h>
23#include <unistd.h>
24#include <sys/types.h>
25#include <sys/wait.h>
26#include <sys/stat.h>
27#include <sys/resource.h>
28#include <fcntl.h>
29#include <signal.h>
30#include <sys/time.h>
31#include <stdlib.h>
32#include <dirent.h>
33#include <string.h>
34#include <ctype.h>
35
36#define MSR_TSC 0x10
37#define MSR_NEHALEM_PLATFORM_INFO 0xCE
38#define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1AD
39#define MSR_APERF 0xE8
40#define MSR_MPERF 0xE7
41#define MSR_PKG_C2_RESIDENCY 0x60D /* SNB only */
42#define MSR_PKG_C3_RESIDENCY 0x3F8
43#define MSR_PKG_C6_RESIDENCY 0x3F9
44#define MSR_PKG_C7_RESIDENCY 0x3FA /* SNB only */
45#define MSR_CORE_C3_RESIDENCY 0x3FC
46#define MSR_CORE_C6_RESIDENCY 0x3FD
47#define MSR_CORE_C7_RESIDENCY 0x3FE /* SNB only */
48
49char *proc_stat = "/proc/stat";
50unsigned int interval_sec = 5; /* set with -i interval_sec */
51unsigned int verbose; /* set with -v */
52unsigned int skip_c0;
53unsigned int skip_c1;
54unsigned int do_nhm_cstates;
55unsigned int do_snb_cstates;
56unsigned int has_aperf;
57unsigned int units = 1000000000; /* Ghz etc */
58unsigned int genuine_intel;
59unsigned int has_invariant_tsc;
60unsigned int do_nehalem_platform_info;
61unsigned int do_nehalem_turbo_ratio_limit;
62unsigned int extra_msr_offset;
63double bclk;
64unsigned int show_pkg;
65unsigned int show_core;
66unsigned int show_cpu;
67
68int aperf_mperf_unstable;
69int backwards_count;
70char *progname;
71int need_reinitialize;
72
73int num_cpus;
74
75typedef struct per_cpu_counters {
76 unsigned long long tsc; /* per thread */
77 unsigned long long aperf; /* per thread */
78 unsigned long long mperf; /* per thread */
79 unsigned long long c1; /* per thread (calculated) */
80 unsigned long long c3; /* per core */
81 unsigned long long c6; /* per core */
82 unsigned long long c7; /* per core */
83 unsigned long long pc2; /* per package */
84 unsigned long long pc3; /* per package */
85 unsigned long long pc6; /* per package */
86 unsigned long long pc7; /* per package */
87 unsigned long long extra_msr; /* per thread */
88 int pkg;
89 int core;
90 int cpu;
91 struct per_cpu_counters *next;
92} PCC;
93
94PCC *pcc_even;
95PCC *pcc_odd;
96PCC *pcc_delta;
97PCC *pcc_average;
98struct timeval tv_even;
99struct timeval tv_odd;
100struct timeval tv_delta;
101
102unsigned long long get_msr(int cpu, off_t offset)
103{
104 ssize_t retval;
105 unsigned long long msr;
106 char pathname[32];
107 int fd;
108
109 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
110 fd = open(pathname, O_RDONLY);
111 if (fd < 0) {
112 perror(pathname);
113 need_reinitialize = 1;
114 return 0;
115 }
116
117 retval = pread(fd, &msr, sizeof msr, offset);
118 if (retval != sizeof msr) {
119 fprintf(stderr, "cpu%d pread(..., 0x%zx) = %jd\n",
120 cpu, offset, retval);
121 exit(-2);
122 }
123
124 close(fd);
125 return msr;
126}
127
128void print_header()
129{
130 if (show_pkg)
131 fprintf(stderr, "pkg ");
132 if (show_core)
133 fprintf(stderr, "core");
134 if (show_cpu)
135 fprintf(stderr, " CPU");
136 if (do_nhm_cstates)
137 fprintf(stderr, " %%c0 ");
138 if (has_aperf)
139 fprintf(stderr, " GHz");
140 fprintf(stderr, " TSC");
141 if (do_nhm_cstates)
142 fprintf(stderr, " %%c1 ");
143 if (do_nhm_cstates)
144 fprintf(stderr, " %%c3 ");
145 if (do_nhm_cstates)
146 fprintf(stderr, " %%c6 ");
147 if (do_snb_cstates)
148 fprintf(stderr, " %%c7 ");
149 if (do_snb_cstates)
150 fprintf(stderr, " %%pc2 ");
151 if (do_nhm_cstates)
152 fprintf(stderr, " %%pc3 ");
153 if (do_nhm_cstates)
154 fprintf(stderr, " %%pc6 ");
155 if (do_snb_cstates)
156 fprintf(stderr, " %%pc7 ");
157 if (extra_msr_offset)
158 fprintf(stderr, " MSR 0x%x ", extra_msr_offset);
159
160 putc('\n', stderr);
161}
162
163void dump_pcc(PCC *pcc)
164{
165 fprintf(stderr, "package: %d ", pcc->pkg);
166 fprintf(stderr, "core:: %d ", pcc->core);
167 fprintf(stderr, "CPU: %d ", pcc->cpu);
168 fprintf(stderr, "TSC: %016llX\n", pcc->tsc);
169 fprintf(stderr, "c3: %016llX\n", pcc->c3);
170 fprintf(stderr, "c6: %016llX\n", pcc->c6);
171 fprintf(stderr, "c7: %016llX\n", pcc->c7);
172 fprintf(stderr, "aperf: %016llX\n", pcc->aperf);
173 fprintf(stderr, "pc2: %016llX\n", pcc->pc2);
174 fprintf(stderr, "pc3: %016llX\n", pcc->pc3);
175 fprintf(stderr, "pc6: %016llX\n", pcc->pc6);
176 fprintf(stderr, "pc7: %016llX\n", pcc->pc7);
177 fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr);
178}
179
180void dump_list(PCC *pcc)
181{
182 printf("dump_list 0x%p\n", pcc);
183
184 for (; pcc; pcc = pcc->next)
185 dump_pcc(pcc);
186}
187
188void print_pcc(PCC *p)
189{
190 double interval_float;
191
192 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
193
194 /* topology columns, print blanks on 1st (average) line */
195 if (p == pcc_average) {
196 if (show_pkg)
197 fprintf(stderr, " ");
198 if (show_core)
199 fprintf(stderr, " ");
200 if (show_cpu)
201 fprintf(stderr, " ");
202 } else {
203 if (show_pkg)
204 fprintf(stderr, "%4d", p->pkg);
205 if (show_core)
206 fprintf(stderr, "%4d", p->core);
207 if (show_cpu)
208 fprintf(stderr, "%4d", p->cpu);
209 }
210
211 /* %c0 */
212 if (do_nhm_cstates) {
213 if (!skip_c0)
214 fprintf(stderr, "%7.2f", 100.0 * p->mperf/p->tsc);
215 else
216 fprintf(stderr, " ****");
217 }
218
219 /* GHz */
220 if (has_aperf) {
221 if (!aperf_mperf_unstable) {
222 fprintf(stderr, "%5.2f",
223 1.0 * p->tsc / units * p->aperf /
224 p->mperf / interval_float);
225 } else {
226 if (p->aperf > p->tsc || p->mperf > p->tsc) {
227 fprintf(stderr, " ****");
228 } else {
229 fprintf(stderr, "%4.1f*",
230 1.0 * p->tsc /
231 units * p->aperf /
232 p->mperf / interval_float);
233 }
234 }
235 }
236
237 /* TSC */
238 fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float);
239
240 if (do_nhm_cstates) {
241 if (!skip_c1)
242 fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc);
243 else
244 fprintf(stderr, " ****");
245 }
246 if (do_nhm_cstates)
247 fprintf(stderr, "%7.2f", 100.0 * p->c3/p->tsc);
248 if (do_nhm_cstates)
249 fprintf(stderr, "%7.2f", 100.0 * p->c6/p->tsc);
250 if (do_snb_cstates)
251 fprintf(stderr, "%7.2f", 100.0 * p->c7/p->tsc);
252 if (do_snb_cstates)
253 fprintf(stderr, "%7.2f", 100.0 * p->pc2/p->tsc);
254 if (do_nhm_cstates)
255 fprintf(stderr, "%7.2f", 100.0 * p->pc3/p->tsc);
256 if (do_nhm_cstates)
257 fprintf(stderr, "%7.2f", 100.0 * p->pc6/p->tsc);
258 if (do_snb_cstates)
259 fprintf(stderr, "%7.2f", 100.0 * p->pc7/p->tsc);
260 if (extra_msr_offset)
261 fprintf(stderr, " 0x%016llx", p->extra_msr);
262 putc('\n', stderr);
263}
264
265void print_counters(PCC *cnt)
266{
267 PCC *pcc;
268
269 print_header();
270
271 if (num_cpus > 1)
272 print_pcc(pcc_average);
273
274 for (pcc = cnt; pcc != NULL; pcc = pcc->next)
275 print_pcc(pcc);
276
277}
278
279#define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
280
281
282int compute_delta(PCC *after, PCC *before, PCC *delta)
283{
284 int errors = 0;
285 int perf_err = 0;
286
287 skip_c0 = skip_c1 = 0;
288
289 for ( ; after && before && delta;
290 after = after->next, before = before->next, delta = delta->next) {
291 if (before->cpu != after->cpu) {
292 printf("cpu configuration changed: %d != %d\n",
293 before->cpu, after->cpu);
294 return -1;
295 }
296
297 if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) {
298 fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n",
299 before->cpu, before->tsc, after->tsc);
300 errors++;
301 }
302 /* check for TSC < 1 Mcycles over interval */
303 if (delta->tsc < (1000 * 1000)) {
304 fprintf(stderr, "Insanely slow TSC rate,"
305 " TSC stops in idle?\n");
306 fprintf(stderr, "You can disable all c-states"
307 " by booting with \"idle=poll\"\n");
308 fprintf(stderr, "or just the deep ones with"
309 " \"processor.max_cstate=1\"\n");
310 exit(-3);
311 }
312 if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) {
313 fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n",
314 before->cpu, before->c3, after->c3);
315 errors++;
316 }
317 if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) {
318 fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n",
319 before->cpu, before->c6, after->c6);
320 errors++;
321 }
322 if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) {
323 fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n",
324 before->cpu, before->c7, after->c7);
325 errors++;
326 }
327 if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) {
328 fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n",
329 before->cpu, before->pc2, after->pc2);
330 errors++;
331 }
332 if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) {
333 fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n",
334 before->cpu, before->pc3, after->pc3);
335 errors++;
336 }
337 if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) {
338 fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n",
339 before->cpu, before->pc6, after->pc6);
340 errors++;
341 }
342 if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) {
343 fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n",
344 before->cpu, before->pc7, after->pc7);
345 errors++;
346 }
347
348 perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf);
349 if (perf_err) {
350 fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n",
351 before->cpu, before->aperf, after->aperf);
352 }
353 perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf);
354 if (perf_err) {
355 fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n",
356 before->cpu, before->mperf, after->mperf);
357 }
358 if (perf_err) {
359 if (!aperf_mperf_unstable) {
360 fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
361 fprintf(stderr, "* Frequency results do not cover entire interval *\n");
362 fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
363
364 aperf_mperf_unstable = 1;
365 }
366 /*
367 * mperf delta is likely a huge "positive" number
368 * can not use it for calculating c0 time
369 */
370 skip_c0 = 1;
371 skip_c1 = 1;
372 }
373
374 /*
375 * As mperf and tsc collection are not atomic,
376 * it is possible for mperf's non-halted cycles
377 * to exceed TSC's all cycles: show c1 = 0% in that case.
378 */
379 if (delta->mperf > delta->tsc)
380 delta->c1 = 0;
381 else /* normal case, derive c1 */
382 delta->c1 = delta->tsc - delta->mperf
383 - delta->c3 - delta->c6 - delta->c7;
384
385 if (delta->mperf == 0)
386 delta->mperf = 1; /* divide by 0 protection */
387
388 /*
389 * for "extra msr", just copy the latest w/o subtracting
390 */
391 delta->extra_msr = after->extra_msr;
392 if (errors) {
393 fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
394 dump_pcc(before);
395 fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
396 dump_pcc(after);
397 errors = 0;
398 }
399 }
400 return 0;
401}
402
403void compute_average(PCC *delta, PCC *avg)
404{
405 PCC *sum;
406
407 sum = calloc(1, sizeof(PCC));
408 if (sum == NULL) {
409 perror("calloc sum");
410 exit(1);
411 }
412
413 for (; delta; delta = delta->next) {
414 sum->tsc += delta->tsc;
415 sum->c1 += delta->c1;
416 sum->c3 += delta->c3;
417 sum->c6 += delta->c6;
418 sum->c7 += delta->c7;
419 sum->aperf += delta->aperf;
420 sum->mperf += delta->mperf;
421 sum->pc2 += delta->pc2;
422 sum->pc3 += delta->pc3;
423 sum->pc6 += delta->pc6;
424 sum->pc7 += delta->pc7;
425 }
426 avg->tsc = sum->tsc/num_cpus;
427 avg->c1 = sum->c1/num_cpus;
428 avg->c3 = sum->c3/num_cpus;
429 avg->c6 = sum->c6/num_cpus;
430 avg->c7 = sum->c7/num_cpus;
431 avg->aperf = sum->aperf/num_cpus;
432 avg->mperf = sum->mperf/num_cpus;
433 avg->pc2 = sum->pc2/num_cpus;
434 avg->pc3 = sum->pc3/num_cpus;
435 avg->pc6 = sum->pc6/num_cpus;
436 avg->pc7 = sum->pc7/num_cpus;
437
438 free(sum);
439}
440
441void get_counters(PCC *pcc)
442{
443 for ( ; pcc; pcc = pcc->next) {
444 pcc->tsc = get_msr(pcc->cpu, MSR_TSC);
445 if (do_nhm_cstates)
446 pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY);
447 if (do_nhm_cstates)
448 pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY);
449 if (do_snb_cstates)
450 pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY);
451 if (has_aperf)
452 pcc->aperf = get_msr(pcc->cpu, MSR_APERF);
453 if (has_aperf)
454 pcc->mperf = get_msr(pcc->cpu, MSR_MPERF);
455 if (do_snb_cstates)
456 pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY);
457 if (do_nhm_cstates)
458 pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY);
459 if (do_nhm_cstates)
460 pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY);
461 if (do_snb_cstates)
462 pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY);
463 if (extra_msr_offset)
464 pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset);
465 }
466}
467
468
469void print_nehalem_info()
470{
471 unsigned long long msr;
472 unsigned int ratio;
473
474 if (!do_nehalem_platform_info)
475 return;
476
477 msr = get_msr(0, MSR_NEHALEM_PLATFORM_INFO);
478
479 ratio = (msr >> 40) & 0xFF;
480 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
481 ratio, bclk, ratio * bclk);
482
483 ratio = (msr >> 8) & 0xFF;
484 fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
485 ratio, bclk, ratio * bclk);
486
487 if (verbose > 1)
488 fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr);
489
490 if (!do_nehalem_turbo_ratio_limit)
491 return;
492
493 msr = get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT);
494
495 ratio = (msr >> 24) & 0xFF;
496 if (ratio)
497 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
498 ratio, bclk, ratio * bclk);
499
500 ratio = (msr >> 16) & 0xFF;
501 if (ratio)
502 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
503 ratio, bclk, ratio * bclk);
504
505 ratio = (msr >> 8) & 0xFF;
506 if (ratio)
507 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
508 ratio, bclk, ratio * bclk);
509
510 ratio = (msr >> 0) & 0xFF;
511 if (ratio)
512 fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
513 ratio, bclk, ratio * bclk);
514
515}
516
517void free_counter_list(PCC *list)
518{
519 PCC *p;
520
521 for (p = list; p; ) {
522 PCC *free_me;
523
524 free_me = p;
525 p = p->next;
526 free(free_me);
527 }
528 return;
529}
530
531void free_all_counters(void)
532{
533 free_counter_list(pcc_even);
534 pcc_even = NULL;
535
536 free_counter_list(pcc_odd);
537 pcc_odd = NULL;
538
539 free_counter_list(pcc_delta);
540 pcc_delta = NULL;
541
542 free_counter_list(pcc_average);
543 pcc_average = NULL;
544}
545
546void insert_cpu_counters(PCC **list, PCC *new)
547{
548 PCC *prev;
549
550 /*
551 * list was empty
552 */
553 if (*list == NULL) {
554 new->next = *list;
555 *list = new;
556 return;
557 }
558
559 show_cpu = 1; /* there is more than one CPU */
560
561 /*
562 * insert on front of list.
563 * It is sorted by ascending package#, core#, cpu#
564 */
565 if (((*list)->pkg > new->pkg) ||
566 (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) ||
567 (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) {
568 new->next = *list;
569 *list = new;
570 return;
571 }
572
573 prev = *list;
574
575 while (prev->next && (prev->next->pkg < new->pkg)) {
576 prev = prev->next;
577 show_pkg = 1; /* there is more than 1 package */
578 }
579
580 while (prev->next && (prev->next->pkg == new->pkg)
581 && (prev->next->core < new->core)) {
582 prev = prev->next;
583 show_core = 1; /* there is more than 1 core */
584 }
585
586 while (prev->next && (prev->next->pkg == new->pkg)
587 && (prev->next->core == new->core)
588 && (prev->next->cpu < new->cpu)) {
589 prev = prev->next;
590 }
591
592 /*
593 * insert after "prev"
594 */
595 new->next = prev->next;
596 prev->next = new;
597
598 return;
599}
600
601void alloc_new_cpu_counters(int pkg, int core, int cpu)
602{
603 PCC *new;
604
605 if (verbose > 1)
606 printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
607
608 new = (PCC *)calloc(1, sizeof(PCC));
609 if (new == NULL) {
610 perror("calloc");
611 exit(1);
612 }
613 new->pkg = pkg;
614 new->core = core;
615 new->cpu = cpu;
616 insert_cpu_counters(&pcc_odd, new);
617
618 new = (PCC *)calloc(1, sizeof(PCC));
619 if (new == NULL) {
620 perror("calloc");
621 exit(1);
622 }
623 new->pkg = pkg;
624 new->core = core;
625 new->cpu = cpu;
626 insert_cpu_counters(&pcc_even, new);
627
628 new = (PCC *)calloc(1, sizeof(PCC));
629 if (new == NULL) {
630 perror("calloc");
631 exit(1);
632 }
633 new->pkg = pkg;
634 new->core = core;
635 new->cpu = cpu;
636 insert_cpu_counters(&pcc_delta, new);
637
638 new = (PCC *)calloc(1, sizeof(PCC));
639 if (new == NULL) {
640 perror("calloc");
641 exit(1);
642 }
643 new->pkg = pkg;
644 new->core = core;
645 new->cpu = cpu;
646 pcc_average = new;
647}
648
649int get_physical_package_id(int cpu)
650{
651 char path[64];
652 FILE *filep;
653 int pkg;
654
655 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
656 filep = fopen(path, "r");
657 if (filep == NULL) {
658 perror(path);
659 exit(1);
660 }
661 fscanf(filep, "%d", &pkg);
662 fclose(filep);
663 return pkg;
664}
665
666int get_core_id(int cpu)
667{
668 char path[64];
669 FILE *filep;
670 int core;
671
672 sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
673 filep = fopen(path, "r");
674 if (filep == NULL) {
675 perror(path);
676 exit(1);
677 }
678 fscanf(filep, "%d", &core);
679 fclose(filep);
680 return core;
681}
682
683/*
684 * run func(index, cpu) on every cpu in /proc/stat
685 */
686
687int for_all_cpus(void (func)(int, int, int))
688{
689 FILE *fp;
690 int cpu_count;
691 int retval;
692
693 fp = fopen(proc_stat, "r");
694 if (fp == NULL) {
695 perror(proc_stat);
696 exit(1);
697 }
698
699 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
700 if (retval != 0) {
701 perror("/proc/stat format");
702 exit(1);
703 }
704
705 for (cpu_count = 0; ; cpu_count++) {
706 int cpu;
707
708 retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu);
709 if (retval != 1)
710 break;
711
712 func(get_physical_package_id(cpu), get_core_id(cpu), cpu);
713 }
714 fclose(fp);
715 return cpu_count;
716}
717
718void re_initialize(void)
719{
720 printf("turbostat: topology changed, re-initializing.\n");
721 free_all_counters();
722 num_cpus = for_all_cpus(alloc_new_cpu_counters);
723 need_reinitialize = 0;
724 printf("num_cpus is now %d\n", num_cpus);
725}
726
727void dummy(int pkg, int core, int cpu) { return; }
728/*
729 * check to see if a cpu came on-line
730 */
731void verify_num_cpus()
732{
733 int new_num_cpus;
734
735 new_num_cpus = for_all_cpus(dummy);
736
737 if (new_num_cpus != num_cpus) {
738 if (verbose)
739 printf("num_cpus was %d, is now %d\n",
740 num_cpus, new_num_cpus);
741 need_reinitialize = 1;
742 }
743
744 return;
745}
746
747void turbostat_loop()
748{
749restart:
750 get_counters(pcc_even);
751 gettimeofday(&tv_even, (struct timezone *)NULL);
752
753 while (1) {
754 verify_num_cpus();
755 if (need_reinitialize) {
756 re_initialize();
757 goto restart;
758 }
759 sleep(interval_sec);
760 get_counters(pcc_odd);
761 gettimeofday(&tv_odd, (struct timezone *)NULL);
762
763 compute_delta(pcc_odd, pcc_even, pcc_delta);
764 timersub(&tv_odd, &tv_even, &tv_delta);
765 compute_average(pcc_delta, pcc_average);
766 print_counters(pcc_delta);
767 if (need_reinitialize) {
768 re_initialize();
769 goto restart;
770 }
771 sleep(interval_sec);
772 get_counters(pcc_even);
773 gettimeofday(&tv_even, (struct timezone *)NULL);
774 compute_delta(pcc_even, pcc_odd, pcc_delta);
775 timersub(&tv_even, &tv_odd, &tv_delta);
776 compute_average(pcc_delta, pcc_average);
777 print_counters(pcc_delta);
778 }
779}
780
781void check_dev_msr()
782{
783 struct stat sb;
784
785 if (stat("/dev/cpu/0/msr", &sb)) {
786 fprintf(stderr, "no /dev/cpu/0/msr\n");
787 fprintf(stderr, "Try \"# modprobe msr\"\n");
788 exit(-5);
789 }
790}
791
792void check_super_user()
793{
794 if (getuid() != 0) {
795 fprintf(stderr, "must be root\n");
796 exit(-6);
797 }
798}
799
800int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
801{
802 if (!genuine_intel)
803 return 0;
804
805 if (family != 6)
806 return 0;
807
808 switch (model) {
809 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
810 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
811 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
812 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
813 case 0x2C: /* Westmere EP - Gulftown */
814 case 0x2A: /* SNB */
815 case 0x2D: /* SNB Xeon */
816 return 1;
817 case 0x2E: /* Nehalem-EX Xeon - Beckton */
818 case 0x2F: /* Westmere-EX Xeon - Eagleton */
819 default:
820 return 0;
821 }
822}
823
824int is_snb(unsigned int family, unsigned int model)
825{
826 if (!genuine_intel)
827 return 0;
828
829 switch (model) {
830 case 0x2A:
831 case 0x2D:
832 return 1;
833 }
834 return 0;
835}
836
837double discover_bclk(unsigned int family, unsigned int model)
838{
839 if (is_snb(family, model))
840 return 100.00;
841 else
842 return 133.33;
843}
844
845void check_cpuid()
846{
847 unsigned int eax, ebx, ecx, edx, max_level;
848 unsigned int fms, family, model, stepping;
849
850 eax = ebx = ecx = edx = 0;
851
852 asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
853
854 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
855 genuine_intel = 1;
856
857 if (verbose)
858 fprintf(stderr, "%.4s%.4s%.4s ",
859 (char *)&ebx, (char *)&edx, (char *)&ecx);
860
861 asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
862 family = (fms >> 8) & 0xf;
863 model = (fms >> 4) & 0xf;
864 stepping = fms & 0xf;
865 if (family == 6 || family == 0xf)
866 model += ((fms >> 16) & 0xf) << 4;
867
868 if (verbose)
869 fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
870 max_level, family, model, stepping, family, model, stepping);
871
872 if (!(edx & (1 << 5))) {
873 fprintf(stderr, "CPUID: no MSR\n");
874 exit(1);
875 }
876
877 /*
878 * check max extended function levels of CPUID.
879 * This is needed to check for invariant TSC.
880 * This check is valid for both Intel and AMD.
881 */
882 ebx = ecx = edx = 0;
883 asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
884
885 if (max_level < 0x80000007) {
886 fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
887 exit(1);
888 }
889
890 /*
891 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
892 * this check is valid for both Intel and AMD
893 */
894 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
895 has_invariant_tsc = edx && (1 << 8);
896
897 if (!has_invariant_tsc) {
898 fprintf(stderr, "No invariant TSC\n");
899 exit(1);
900 }
901
902 /*
903 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
904 * this check is valid for both Intel and AMD
905 */
906
907 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
908 has_aperf = ecx && (1 << 0);
909 if (!has_aperf) {
910 fprintf(stderr, "No APERF MSR\n");
911 exit(1);
912 }
913
914 do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
915 do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
916 do_snb_cstates = is_snb(family, model);
917 bclk = discover_bclk(family, model);
918
919 do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
920}
921
922
923void usage()
924{
925 fprintf(stderr, "%s: [-v] [-M MSR#] [-i interval_sec | command ...]\n",
926 progname);
927 exit(1);
928}
929
930
931/*
932 * in /dev/cpu/ return success for names that are numbers
933 * ie. filter out ".", "..", "microcode".
934 */
935int dir_filter(const struct dirent *dirp)
936{
937 if (isdigit(dirp->d_name[0]))
938 return 1;
939 else
940 return 0;
941}
942
943int open_dev_cpu_msr(int dummy1)
944{
945 return 0;
946}
947
948void turbostat_init()
949{
950 check_cpuid();
951
952 check_dev_msr();
953 check_super_user();
954
955 num_cpus = for_all_cpus(alloc_new_cpu_counters);
956
957 if (verbose)
958 print_nehalem_info();
959}
960
961int fork_it(char **argv)
962{
963 int retval;
964 pid_t child_pid;
965 get_counters(pcc_even);
966 gettimeofday(&tv_even, (struct timezone *)NULL);
967
968 child_pid = fork();
969 if (!child_pid) {
970 /* child */
971 execvp(argv[0], argv);
972 } else {
973 int status;
974
975 /* parent */
976 if (child_pid == -1) {
977 perror("fork");
978 exit(1);
979 }
980
981 signal(SIGINT, SIG_IGN);
982 signal(SIGQUIT, SIG_IGN);
983 if (waitpid(child_pid, &status, 0) == -1) {
984 perror("wait");
985 exit(1);
986 }
987 }
988 get_counters(pcc_odd);
989 gettimeofday(&tv_odd, (struct timezone *)NULL);
990 retval = compute_delta(pcc_odd, pcc_even, pcc_delta);
991
992 timersub(&tv_odd, &tv_even, &tv_delta);
993 compute_average(pcc_delta, pcc_average);
994 if (!retval)
995 print_counters(pcc_delta);
996
997 fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);;
998
999 return 0;
1000}
1001
1002void cmdline(int argc, char **argv)
1003{
1004 int opt;
1005
1006 progname = argv[0];
1007
1008 while ((opt = getopt(argc, argv, "+vi:M:")) != -1) {
1009 switch (opt) {
1010 case 'v':
1011 verbose++;
1012 break;
1013 case 'i':
1014 interval_sec = atoi(optarg);
1015 break;
1016 case 'M':
1017 sscanf(optarg, "%x", &extra_msr_offset);
1018 if (verbose > 1)
1019 fprintf(stderr, "MSR 0x%X\n", extra_msr_offset);
1020 break;
1021 default:
1022 usage();
1023 }
1024 }
1025}
1026
1027int main(int argc, char **argv)
1028{
1029 cmdline(argc, argv);
1030
1031 if (verbose > 1)
1032 fprintf(stderr, "turbostat Dec 6, 2010"
1033 " - Len Brown <lenb@kernel.org>\n");
1034 if (verbose > 1)
1035 fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n");
1036
1037 turbostat_init();
1038
1039 /*
1040 * if any params left, it must be a command to fork
1041 */
1042 if (argc - optind)
1043 return fork_it(argv + optind);
1044 else
1045 turbostat_loop();
1046
1047 return 0;
1048}
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
new file mode 100644
index 000000000000..f458237fdd79
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -0,0 +1,8 @@
1x86_energy_perf_policy : x86_energy_perf_policy.c
2
3clean :
4 rm -f x86_energy_perf_policy
5
6install :
7 install x86_energy_perf_policy /usr/bin/
8 install x86_energy_perf_policy.8 /usr/share/man/man8/
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
new file mode 100644
index 000000000000..8eaaad648cdb
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -0,0 +1,104 @@
1.\" This page Copyright (C) 2010 Len Brown <len.brown@intel.com>
2.\" Distributed under the GPL, Copyleft 1994.
3.TH X86_ENERGY_PERF_POLICY 8
4.SH NAME
5x86_energy_perf_policy \- read or write MSR_IA32_ENERGY_PERF_BIAS
6.SH SYNOPSIS
7.ft B
8.B x86_energy_perf_policy
9.RB [ "\-c cpu" ]
10.RB [ "\-v" ]
11.RB "\-r"
12.br
13.B x86_energy_perf_policy
14.RB [ "\-c cpu" ]
15.RB [ "\-v" ]
16.RB 'performance'
17.br
18.B x86_energy_perf_policy
19.RB [ "\-c cpu" ]
20.RB [ "\-v" ]
21.RB 'normal'
22.br
23.B x86_energy_perf_policy
24.RB [ "\-c cpu" ]
25.RB [ "\-v" ]
26.RB 'powersave'
27.br
28.B x86_energy_perf_policy
29.RB [ "\-c cpu" ]
30.RB [ "\-v" ]
31.RB n
32.br
33.SH DESCRIPTION
34\fBx86_energy_perf_policy\fP
35allows software to convey
36its policy for the relative importance of performance
37versus energy savings to the processor.
38
39The processor uses this information in model-specific ways
40when it must select trade-offs between performance and
41energy efficiency.
42
43This policy hint does not supersede Processor Performance states
44(P-states) or CPU Idle power states (C-states), but allows
45software to have influence where it would otherwise be unable
46to express a preference.
47
48For example, this setting may tell the hardware how
49aggressively or conservatively to control frequency
50in the "turbo range" above the explicitly OS-controlled
51P-state frequency range. It may also tell the hardware
52how aggressively is should enter the OS requested C-states.
53
54Support for this feature is indicated by CPUID.06H.ECX.bit3
55per the Intel Architectures Software Developer's Manual.
56
57.SS Options
58\fB-c\fP limits operation to a single CPU.
59The default is to operate on all CPUs.
60Note that MSR_IA32_ENERGY_PERF_BIAS is defined per
61logical processor, but that the initial implementations
62of the MSR were shared among all processors in each package.
63.PP
64\fB-v\fP increases verbosity. By default
65x86_energy_perf_policy is silent.
66.PP
67\fB-r\fP is for "read-only" mode - the unchanged state
68is read and displayed.
69.PP
70.I performance
71Set a policy where performance is paramount.
72The processor will be unwilling to sacrifice any performance
73for the sake of energy saving. This is the hardware default.
74.PP
75.I normal
76Set a policy with a normal balance between performance and energy efficiency.
77The processor will tolerate minor performance compromise
78for potentially significant energy savings.
79This reasonable default for most desktops and servers.
80.PP
81.I powersave
82Set a policy where the processor can accept
83a measurable performance hit to maximize energy efficiency.
84.PP
85.I n
86Set MSR_IA32_ENERGY_PERF_BIAS to the specified number.
87The range of valid numbers is 0-15, where 0 is maximum
88performance and 15 is maximum energy efficiency.
89
90.SH NOTES
91.B "x86_energy_perf_policy "
92runs only as root.
93.SH FILES
94.ta
95.nf
96/dev/cpu/*/msr
97.fi
98
99.SH "SEE ALSO"
100msr(4)
101.PP
102.SH AUTHORS
103.nf
104Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
new file mode 100644
index 000000000000..d9678a34dd70
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -0,0 +1,325 @@
1/*
2 * x86_energy_perf_policy -- set the energy versus performance
3 * policy preference bias on recent X86 processors.
4 */
5/*
6 * Copyright (c) 2010, Intel Corporation.
7 * Len Brown <len.brown@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <stdio.h>
24#include <unistd.h>
25#include <sys/types.h>
26#include <sys/stat.h>
27#include <sys/resource.h>
28#include <fcntl.h>
29#include <signal.h>
30#include <sys/time.h>
31#include <stdlib.h>
32#include <string.h>
33
34unsigned int verbose; /* set with -v */
35unsigned int read_only; /* set with -r */
36char *progname;
37unsigned long long new_bias;
38int cpu = -1;
39
40/*
41 * Usage:
42 *
43 * -c cpu: limit action to a single CPU (default is all CPUs)
44 * -v: verbose output (can invoke more than once)
45 * -r: read-only, don't change any settings
46 *
47 * performance
48 * Performance is paramount.
49 * Unwilling to sacrafice any performance
50 * for the sake of energy saving. (hardware default)
51 *
52 * normal
53 * Can tolerate minor performance compromise
54 * for potentially significant energy savings.
55 * (reasonable default for most desktops and servers)
56 *
57 * powersave
58 * Can tolerate significant performance hit
59 * to maximize energy savings.
60 *
61 * n
62 * a numerical value to write to the underlying MSR.
63 */
64void usage(void)
65{
66 printf("%s: [-c cpu] [-v] "
67 "(-r | 'performance' | 'normal' | 'powersave' | n)\n",
68 progname);
69 exit(1);
70}
71
72#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
73
74#define BIAS_PERFORMANCE 0
75#define BIAS_BALANCE 6
76#define BIAS_POWERSAVE 15
77
78void cmdline(int argc, char **argv)
79{
80 int opt;
81
82 progname = argv[0];
83
84 while ((opt = getopt(argc, argv, "+rvc:")) != -1) {
85 switch (opt) {
86 case 'c':
87 cpu = atoi(optarg);
88 break;
89 case 'r':
90 read_only = 1;
91 break;
92 case 'v':
93 verbose++;
94 break;
95 default:
96 usage();
97 }
98 }
99 /* if -r, then should be no additional optind */
100 if (read_only && (argc > optind))
101 usage();
102
103 /*
104 * if no -r , then must be one additional optind
105 */
106 if (!read_only) {
107
108 if (argc != optind + 1) {
109 printf("must supply -r or policy param\n");
110 usage();
111 }
112
113 if (!strcmp("performance", argv[optind])) {
114 new_bias = BIAS_PERFORMANCE;
115 } else if (!strcmp("normal", argv[optind])) {
116 new_bias = BIAS_BALANCE;
117 } else if (!strcmp("powersave", argv[optind])) {
118 new_bias = BIAS_POWERSAVE;
119 } else {
120 char *endptr;
121
122 new_bias = strtoull(argv[optind], &endptr, 0);
123 if (endptr == argv[optind] ||
124 new_bias > BIAS_POWERSAVE) {
125 fprintf(stderr, "invalid value: %s\n",
126 argv[optind]);
127 usage();
128 }
129 }
130 }
131}
132
133/*
134 * validate_cpuid()
135 * returns on success, quietly exits on failure (make verbose with -v)
136 */
137void validate_cpuid(void)
138{
139 unsigned int eax, ebx, ecx, edx, max_level;
140 char brand[16];
141 unsigned int fms, family, model, stepping;
142
143 eax = ebx = ecx = edx = 0;
144
145 asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx),
146 "=d" (edx) : "a" (0));
147
148 if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) {
149 if (verbose)
150 fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel",
151 (char *)&ebx, (char *)&edx, (char *)&ecx);
152 exit(1);
153 }
154
155 asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
156 family = (fms >> 8) & 0xf;
157 model = (fms >> 4) & 0xf;
158 stepping = fms & 0xf;
159 if (family == 6 || family == 0xf)
160 model += ((fms >> 16) & 0xf) << 4;
161
162 if (verbose > 1)
163 printf("CPUID %s %d levels family:model:stepping "
164 "0x%x:%x:%x (%d:%d:%d)\n", brand, max_level,
165 family, model, stepping, family, model, stepping);
166
167 if (!(edx & (1 << 5))) {
168 if (verbose)
169 printf("CPUID: no MSR\n");
170 exit(1);
171 }
172
173 /*
174 * Support for MSR_IA32_ENERGY_PERF_BIAS
175 * is indicated by CPUID.06H.ECX.bit3
176 */
177 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6));
178 if (verbose)
179 printf("CPUID.06H.ECX: 0x%x\n", ecx);
180 if (!(ecx & (1 << 3))) {
181 if (verbose)
182 printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n");
183 exit(1);
184 }
185 return; /* success */
186}
187
188unsigned long long get_msr(int cpu, int offset)
189{
190 unsigned long long msr;
191 char msr_path[32];
192 int retval;
193 int fd;
194
195 sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
196 fd = open(msr_path, O_RDONLY);
197 if (fd < 0) {
198 printf("Try \"# modprobe msr\"\n");
199 perror(msr_path);
200 exit(1);
201 }
202
203 retval = pread(fd, &msr, sizeof msr, offset);
204
205 if (retval != sizeof msr) {
206 printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
207 exit(-2);
208 }
209 close(fd);
210 return msr;
211}
212
213unsigned long long put_msr(int cpu, unsigned long long new_msr, int offset)
214{
215 unsigned long long old_msr;
216 char msr_path[32];
217 int retval;
218 int fd;
219
220 sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
221 fd = open(msr_path, O_RDWR);
222 if (fd < 0) {
223 perror(msr_path);
224 exit(1);
225 }
226
227 retval = pread(fd, &old_msr, sizeof old_msr, offset);
228 if (retval != sizeof old_msr) {
229 perror("pwrite");
230 printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
231 exit(-2);
232 }
233
234 retval = pwrite(fd, &new_msr, sizeof new_msr, offset);
235 if (retval != sizeof new_msr) {
236 perror("pwrite");
237 printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval);
238 exit(-2);
239 }
240
241 close(fd);
242
243 return old_msr;
244}
245
246void print_msr(int cpu)
247{
248 printf("cpu%d: 0x%016llx\n",
249 cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS));
250}
251
252void update_msr(int cpu)
253{
254 unsigned long long previous_msr;
255
256 previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS);
257
258 if (verbose)
259 printf("cpu%d msr0x%x 0x%016llx -> 0x%016llx\n",
260 cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias);
261
262 return;
263}
264
265char *proc_stat = "/proc/stat";
266/*
267 * run func() on every cpu in /dev/cpu
268 */
269void for_every_cpu(void (func)(int))
270{
271 FILE *fp;
272 int retval;
273
274 fp = fopen(proc_stat, "r");
275 if (fp == NULL) {
276 perror(proc_stat);
277 exit(1);
278 }
279
280 retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
281 if (retval != 0) {
282 perror("/proc/stat format");
283 exit(1);
284 }
285
286 while (1) {
287 int cpu;
288
289 retval = fscanf(fp,
290 "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n",
291 &cpu);
292 if (retval != 1)
293 return;
294
295 func(cpu);
296 }
297 fclose(fp);
298}
299
300int main(int argc, char **argv)
301{
302 cmdline(argc, argv);
303
304 if (verbose > 1)
305 printf("x86_energy_perf_policy Nov 24, 2010"
306 " - Len Brown <lenb@kernel.org>\n");
307 if (verbose > 1 && !read_only)
308 printf("new_bias %lld\n", new_bias);
309
310 validate_cpuid();
311
312 if (cpu != -1) {
313 if (read_only)
314 print_msr(cpu);
315 else
316 update_msr(cpu);
317 } else {
318 if (read_only)
319 for_every_cpu(print_msr);
320 else
321 for_every_cpu(update_msr);
322 }
323
324 return 0;
325}
diff --git a/tools/testing/ktest/compare-ktest-sample.pl b/tools/testing/ktest/compare-ktest-sample.pl
new file mode 100755
index 000000000000..9a571e71683c
--- /dev/null
+++ b/tools/testing/ktest/compare-ktest-sample.pl
@@ -0,0 +1,30 @@
1#!/usr/bin/perl
2
3open (IN,"ktest.pl");
4while (<IN>) {
5 if (/\$opt\{"?([A-Z].*?)(\[.*\])?"?\}/ ||
6 /set_test_option\("(.*?)"/) {
7 $opt{$1} = 1;
8 }
9}
10close IN;
11
12open (IN, "sample.conf");
13while (<IN>) {
14 if (/^\s*#?\s*(\S+)\s*=/) {
15 $samp{$1} = 1;
16 }
17}
18close IN;
19
20foreach $opt (keys %opt) {
21 if (!defined($samp{$opt})) {
22 print "opt = $opt\n";
23 }
24}
25
26foreach $samp (keys %samp) {
27 if (!defined($opt{$samp})) {
28 print "samp = $samp\n";
29 }
30}
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
new file mode 100755
index 000000000000..e1c62eeb88f5
--- /dev/null
+++ b/tools/testing/ktest/ktest.pl
@@ -0,0 +1,2023 @@
1#!/usr/bin/perl -w
2#
3# Copywrite 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
4# Licensed under the terms of the GNU GPL License version 2
5#
6
7use strict;
8use IPC::Open2;
9use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
10use File::Path qw(mkpath);
11use File::Copy qw(cp);
12use FileHandle;
13
14my $VERSION = "0.2";
15
16$| = 1;
17
18my %opt;
19my %repeat_tests;
20my %repeats;
21my %default;
22
23#default opts
24$default{"NUM_TESTS"} = 1;
25$default{"REBOOT_TYPE"} = "grub";
26$default{"TEST_TYPE"} = "test";
27$default{"BUILD_TYPE"} = "randconfig";
28$default{"MAKE_CMD"} = "make";
29$default{"TIMEOUT"} = 120;
30$default{"TMP_DIR"} = "/tmp/ktest";
31$default{"SLEEP_TIME"} = 60; # sleep time between tests
32$default{"BUILD_NOCLEAN"} = 0;
33$default{"REBOOT_ON_ERROR"} = 0;
34$default{"POWEROFF_ON_ERROR"} = 0;
35$default{"REBOOT_ON_SUCCESS"} = 1;
36$default{"POWEROFF_ON_SUCCESS"} = 0;
37$default{"BUILD_OPTIONS"} = "";
38$default{"BISECT_SLEEP_TIME"} = 60; # sleep time between bisects
39$default{"CLEAR_LOG"} = 0;
40$default{"SUCCESS_LINE"} = "login:";
41$default{"BOOTED_TIMEOUT"} = 1;
42$default{"DIE_ON_FAILURE"} = 1;
43$default{"SSH_EXEC"} = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND";
44$default{"SCP_TO_TARGET"} = "scp \$SRC_FILE \$SSH_USER\@\$MACHINE:\$DST_FILE";
45$default{"REBOOT"} = "ssh \$SSH_USER\@\$MACHINE reboot";
46$default{"STOP_AFTER_SUCCESS"} = 10;
47$default{"STOP_AFTER_FAILURE"} = 60;
48$default{"LOCALVERSION"} = "-test";
49
50my $ktest_config;
51my $version;
52my $machine;
53my $ssh_user;
54my $tmpdir;
55my $builddir;
56my $outputdir;
57my $output_config;
58my $test_type;
59my $build_type;
60my $build_options;
61my $reboot_type;
62my $reboot_script;
63my $power_cycle;
64my $reboot;
65my $reboot_on_error;
66my $poweroff_on_error;
67my $die_on_failure;
68my $powercycle_after_reboot;
69my $poweroff_after_halt;
70my $ssh_exec;
71my $scp_to_target;
72my $power_off;
73my $grub_menu;
74my $grub_number;
75my $target;
76my $make;
77my $post_install;
78my $noclean;
79my $minconfig;
80my $addconfig;
81my $in_bisect = 0;
82my $bisect_bad = "";
83my $reverse_bisect;
84my $in_patchcheck = 0;
85my $run_test;
86my $redirect;
87my $buildlog;
88my $dmesg;
89my $monitor_fp;
90my $monitor_pid;
91my $monitor_cnt = 0;
92my $sleep_time;
93my $bisect_sleep_time;
94my $store_failures;
95my $timeout;
96my $booted_timeout;
97my $console;
98my $success_line;
99my $stop_after_success;
100my $stop_after_failure;
101my $build_target;
102my $target_image;
103my $localversion;
104my $iteration = 0;
105my $successes = 0;
106
107my %entered_configs;
108my %config_help;
109
110$config_help{"MACHINE"} = << "EOF"
111 The machine hostname that you will test.
112EOF
113 ;
114$config_help{"SSH_USER"} = << "EOF"
115 The box is expected to have ssh on normal bootup, provide the user
116 (most likely root, since you need privileged operations)
117EOF
118 ;
119$config_help{"BUILD_DIR"} = << "EOF"
120 The directory that contains the Linux source code (full path).
121EOF
122 ;
123$config_help{"OUTPUT_DIR"} = << "EOF"
124 The directory that the objects will be built (full path).
125 (can not be same as BUILD_DIR)
126EOF
127 ;
128$config_help{"BUILD_TARGET"} = << "EOF"
129 The location of the compiled file to copy to the target.
130 (relative to OUTPUT_DIR)
131EOF
132 ;
133$config_help{"TARGET_IMAGE"} = << "EOF"
134 The place to put your image on the test machine.
135EOF
136 ;
137$config_help{"POWER_CYCLE"} = << "EOF"
138 A script or command to reboot the box.
139
140 Here is a digital loggers power switch example
141 POWER_CYCLE = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin\@power/outlet?5=CCL'
142
143 Here is an example to reboot a virtual box on the current host
144 with the name "Guest".
145 POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
146EOF
147 ;
148$config_help{"CONSOLE"} = << "EOF"
149 The script or command that reads the console
150
151 If you use ttywatch server, something like the following would work.
152CONSOLE = nc -d localhost 3001
153
154 For a virtual machine with guest name "Guest".
155CONSOLE = virsh console Guest
156EOF
157 ;
158$config_help{"LOCALVERSION"} = << "EOF"
159 Required version ending to differentiate the test
160 from other linux builds on the system.
161EOF
162 ;
163$config_help{"REBOOT_TYPE"} = << "EOF"
164 Way to reboot the box to the test kernel.
165 Only valid options so far are "grub" and "script".
166
167 If you specify grub, it will assume grub version 1
168 and will search in /boot/grub/menu.lst for the title \$GRUB_MENU
169 and select that target to reboot to the kernel. If this is not
170 your setup, then specify "script" and have a command or script
171 specified in REBOOT_SCRIPT to boot to the target.
172
173 The entry in /boot/grub/menu.lst must be entered in manually.
174 The test will not modify that file.
175EOF
176 ;
177$config_help{"GRUB_MENU"} = << "EOF"
178 The grub title name for the test kernel to boot
179 (Only mandatory if REBOOT_TYPE = grub)
180
181 Note, ktest.pl will not update the grub menu.lst, you need to
182 manually add an option for the test. ktest.pl will search
183 the grub menu.lst for this option to find what kernel to
184 reboot into.
185
186 For example, if in the /boot/grub/menu.lst the test kernel title has:
187 title Test Kernel
188 kernel vmlinuz-test
189 GRUB_MENU = Test Kernel
190EOF
191 ;
192$config_help{"REBOOT_SCRIPT"} = << "EOF"
193 A script to reboot the target into the test kernel
194 (Only mandatory if REBOOT_TYPE = script)
195EOF
196 ;
197
198
199sub get_ktest_config {
200 my ($config) = @_;
201
202 return if (defined($opt{$config}));
203
204 if (defined($config_help{$config})) {
205 print "\n";
206 print $config_help{$config};
207 }
208
209 for (;;) {
210 print "$config = ";
211 if (defined($default{$config})) {
212 print "\[$default{$config}\] ";
213 }
214 $entered_configs{$config} = <STDIN>;
215 $entered_configs{$config} =~ s/^\s*(.*\S)\s*$/$1/;
216 if ($entered_configs{$config} =~ /^\s*$/) {
217 if ($default{$config}) {
218 $entered_configs{$config} = $default{$config};
219 } else {
220 print "Your answer can not be blank\n";
221 next;
222 }
223 }
224 last;
225 }
226}
227
228sub get_ktest_configs {
229 get_ktest_config("MACHINE");
230 get_ktest_config("SSH_USER");
231 get_ktest_config("BUILD_DIR");
232 get_ktest_config("OUTPUT_DIR");
233 get_ktest_config("BUILD_TARGET");
234 get_ktest_config("TARGET_IMAGE");
235 get_ktest_config("POWER_CYCLE");
236 get_ktest_config("CONSOLE");
237 get_ktest_config("LOCALVERSION");
238
239 my $rtype = $opt{"REBOOT_TYPE"};
240
241 if (!defined($rtype)) {
242 if (!defined($opt{"GRUB_MENU"})) {
243 get_ktest_config("REBOOT_TYPE");
244 $rtype = $entered_configs{"REBOOT_TYPE"};
245 } else {
246 $rtype = "grub";
247 }
248 }
249
250 if ($rtype eq "grub") {
251 get_ktest_config("GRUB_MENU");
252 } else {
253 get_ktest_config("REBOOT_SCRIPT");
254 }
255}
256
257sub set_value {
258 my ($lvalue, $rvalue) = @_;
259
260 if (defined($opt{$lvalue})) {
261 die "Error: Option $lvalue defined more than once!\n";
262 }
263 if ($rvalue =~ /^\s*$/) {
264 delete $opt{$lvalue};
265 } else {
266 $opt{$lvalue} = $rvalue;
267 }
268}
269
270sub read_config {
271 my ($config) = @_;
272
273 open(IN, $config) || die "can't read file $config";
274
275 my $name = $config;
276 $name =~ s,.*/(.*),$1,;
277
278 my $test_num = 0;
279 my $default = 1;
280 my $repeat = 1;
281 my $num_tests_set = 0;
282 my $skip = 0;
283 my $rest;
284
285 while (<IN>) {
286
287 # ignore blank lines and comments
288 next if (/^\s*$/ || /\s*\#/);
289
290 if (/^\s*TEST_START(.*)/) {
291
292 $rest = $1;
293
294 if ($num_tests_set) {
295 die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
296 }
297
298 my $old_test_num = $test_num;
299 my $old_repeat = $repeat;
300
301 $test_num += $repeat;
302 $default = 0;
303 $repeat = 1;
304
305 if ($rest =~ /\s+SKIP(.*)/) {
306 $rest = $1;
307 $skip = 1;
308 } else {
309 $skip = 0;
310 }
311
312 if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) {
313 $repeat = $1;
314 $rest = $2;
315 $repeat_tests{"$test_num"} = $repeat;
316 }
317
318 if ($rest =~ /\s+SKIP(.*)/) {
319 $rest = $1;
320 $skip = 1;
321 }
322
323 if ($rest !~ /^\s*$/) {
324 die "$name: $.: Gargbage found after TEST_START\n$_";
325 }
326
327 if ($skip) {
328 $test_num = $old_test_num;
329 $repeat = $old_repeat;
330 }
331
332 } elsif (/^\s*DEFAULTS(.*)$/) {
333 $default = 1;
334
335 $rest = $1;
336
337 if ($rest =~ /\s+SKIP(.*)/) {
338 $rest = $1;
339 $skip = 1;
340 } else {
341 $skip = 0;
342 }
343
344 if ($rest !~ /^\s*$/) {
345 die "$name: $.: Gargbage found after DEFAULTS\n$_";
346 }
347
348 } elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
349
350 next if ($skip);
351
352 my $lvalue = $1;
353 my $rvalue = $2;
354
355 if (!$default &&
356 ($lvalue eq "NUM_TESTS" ||
357 $lvalue eq "LOG_FILE" ||
358 $lvalue eq "CLEAR_LOG")) {
359 die "$name: $.: $lvalue must be set in DEFAULTS section\n";
360 }
361
362 if ($lvalue eq "NUM_TESTS") {
363 if ($test_num) {
364 die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
365 }
366 if (!$default) {
367 die "$name: $.: NUM_TESTS must be set in default section\n";
368 }
369 $num_tests_set = 1;
370 }
371
372 if ($default || $lvalue =~ /\[\d+\]$/) {
373 set_value($lvalue, $rvalue);
374 } else {
375 my $val = "$lvalue\[$test_num\]";
376 set_value($val, $rvalue);
377
378 if ($repeat > 1) {
379 $repeats{$val} = $repeat;
380 }
381 }
382 } else {
383 die "$name: $.: Garbage found in config\n$_";
384 }
385 }
386
387 close(IN);
388
389 if ($test_num) {
390 $test_num += $repeat - 1;
391 $opt{"NUM_TESTS"} = $test_num;
392 }
393
394 # make sure we have all mandatory configs
395 get_ktest_configs;
396
397 # set any defaults
398
399 foreach my $default (keys %default) {
400 if (!defined($opt{$default})) {
401 $opt{$default} = $default{$default};
402 }
403 }
404}
405
406sub _logit {
407 if (defined($opt{"LOG_FILE"})) {
408 open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
409 print OUT @_;
410 close(OUT);
411 }
412}
413
414sub logit {
415 if (defined($opt{"LOG_FILE"})) {
416 _logit @_;
417 } else {
418 print @_;
419 }
420}
421
422sub doprint {
423 print @_;
424 _logit @_;
425}
426
427sub run_command;
428
429sub reboot {
430 # try to reboot normally
431 if (run_command $reboot) {
432 if (defined($powercycle_after_reboot)) {
433 sleep $powercycle_after_reboot;
434 run_command "$power_cycle";
435 }
436 } else {
437 # nope? power cycle it.
438 run_command "$power_cycle";
439 }
440}
441
442sub do_not_reboot {
443 my $i = $iteration;
444
445 return $test_type eq "build" ||
446 ($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") ||
447 ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build");
448}
449
450sub dodie {
451 doprint "CRITICAL FAILURE... ", @_, "\n";
452
453 my $i = $iteration;
454
455 if ($reboot_on_error && !do_not_reboot) {
456
457 doprint "REBOOTING\n";
458 reboot;
459
460 } elsif ($poweroff_on_error && defined($power_off)) {
461 doprint "POWERING OFF\n";
462 `$power_off`;
463 }
464
465 die @_, "\n";
466}
467
468sub open_console {
469 my ($fp) = @_;
470
471 my $flags;
472
473 my $pid = open($fp, "$console|") or
474 dodie "Can't open console $console";
475
476 $flags = fcntl($fp, F_GETFL, 0) or
477 dodie "Can't get flags for the socket: $!";
478 $flags = fcntl($fp, F_SETFL, $flags | O_NONBLOCK) or
479 dodie "Can't set flags for the socket: $!";
480
481 return $pid;
482}
483
484sub close_console {
485 my ($fp, $pid) = @_;
486
487 doprint "kill child process $pid\n";
488 kill 2, $pid;
489
490 print "closing!\n";
491 close($fp);
492}
493
494sub start_monitor {
495 if ($monitor_cnt++) {
496 return;
497 }
498 $monitor_fp = \*MONFD;
499 $monitor_pid = open_console $monitor_fp;
500
501 return;
502
503 open(MONFD, "Stop perl from warning about single use of MONFD");
504}
505
506sub end_monitor {
507 if (--$monitor_cnt) {
508 return;
509 }
510 close_console($monitor_fp, $monitor_pid);
511}
512
513sub wait_for_monitor {
514 my ($time) = @_;
515 my $line;
516
517 doprint "** Wait for monitor to settle down **\n";
518
519 # read the monitor and wait for the system to calm down
520 do {
521 $line = wait_for_input($monitor_fp, $time);
522 print "$line" if (defined($line));
523 } while (defined($line));
524 print "** Monitor flushed **\n";
525}
526
527sub fail {
528
529 if ($die_on_failure) {
530 dodie @_;
531 }
532
533 doprint "FAILED\n";
534
535 my $i = $iteration;
536
537 # no need to reboot for just building.
538 if (!do_not_reboot) {
539 doprint "REBOOTING\n";
540 reboot;
541 start_monitor;
542 wait_for_monitor $sleep_time;
543 end_monitor;
544 }
545
546 doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
547 doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
548 doprint "KTEST RESULT: TEST $i Failed: ", @_, "\n";
549 doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
550 doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
551
552 return 1 if (!defined($store_failures));
553
554 my @t = localtime;
555 my $date = sprintf "%04d%02d%02d%02d%02d%02d",
556 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
557
558 my $type = $build_type;
559 if ($type =~ /useconfig/) {
560 $type = "useconfig";
561 }
562
563 my $dir = "$machine-$test_type-$type-fail-$date";
564 my $faildir = "$store_failures/$dir";
565
566 if (!-d $faildir) {
567 mkpath($faildir) or
568 die "can't create $faildir";
569 }
570 if (-f "$output_config") {
571 cp "$output_config", "$faildir/config" or
572 die "failed to copy .config";
573 }
574 if (-f $buildlog) {
575 cp $buildlog, "$faildir/buildlog" or
576 die "failed to move $buildlog";
577 }
578 if (-f $dmesg) {
579 cp $dmesg, "$faildir/dmesg" or
580 die "failed to move $dmesg";
581 }
582
583 doprint "*** Saved info to $faildir ***\n";
584
585 return 1;
586}
587
588sub run_command {
589 my ($command) = @_;
590 my $dolog = 0;
591 my $dord = 0;
592 my $pid;
593
594 $command =~ s/\$SSH_USER/$ssh_user/g;
595 $command =~ s/\$MACHINE/$machine/g;
596
597 doprint("$command ... ");
598
599 $pid = open(CMD, "$command 2>&1 |") or
600 (fail "unable to exec $command" and return 0);
601
602 if (defined($opt{"LOG_FILE"})) {
603 open(LOG, ">>$opt{LOG_FILE}") or
604 dodie "failed to write to log";
605 $dolog = 1;
606 }
607
608 if (defined($redirect)) {
609 open (RD, ">$redirect") or
610 dodie "failed to write to redirect $redirect";
611 $dord = 1;
612 }
613
614 while (<CMD>) {
615 print LOG if ($dolog);
616 print RD if ($dord);
617 }
618
619 waitpid($pid, 0);
620 my $failed = $?;
621
622 close(CMD);
623 close(LOG) if ($dolog);
624 close(RD) if ($dord);
625
626 if ($failed) {
627 doprint "FAILED!\n";
628 } else {
629 doprint "SUCCESS\n";
630 }
631
632 return !$failed;
633}
634
635sub run_ssh {
636 my ($cmd) = @_;
637 my $cp_exec = $ssh_exec;
638
639 $cp_exec =~ s/\$SSH_COMMAND/$cmd/g;
640 return run_command "$cp_exec";
641}
642
643sub run_scp {
644 my ($src, $dst) = @_;
645 my $cp_scp = $scp_to_target;
646
647 $cp_scp =~ s/\$SRC_FILE/$src/g;
648 $cp_scp =~ s/\$DST_FILE/$dst/g;
649
650 return run_command "$cp_scp";
651}
652
653sub get_grub_index {
654
655 if ($reboot_type ne "grub") {
656 return;
657 }
658 return if (defined($grub_number));
659
660 doprint "Find grub menu ... ";
661 $grub_number = -1;
662
663 my $ssh_grub = $ssh_exec;
664 $ssh_grub =~ s,\$SSH_COMMAND,cat /boot/grub/menu.lst,g;
665
666 open(IN, "$ssh_grub |")
667 or die "unable to get menu.lst";
668
669 while (<IN>) {
670 if (/^\s*title\s+$grub_menu\s*$/) {
671 $grub_number++;
672 last;
673 } elsif (/^\s*title\s/) {
674 $grub_number++;
675 }
676 }
677 close(IN);
678
679 die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
680 if ($grub_number < 0);
681 doprint "$grub_number\n";
682}
683
684sub wait_for_input
685{
686 my ($fp, $time) = @_;
687 my $rin;
688 my $ready;
689 my $line;
690 my $ch;
691
692 if (!defined($time)) {
693 $time = $timeout;
694 }
695
696 $rin = '';
697 vec($rin, fileno($fp), 1) = 1;
698 $ready = select($rin, undef, undef, $time);
699
700 $line = "";
701
702 # try to read one char at a time
703 while (sysread $fp, $ch, 1) {
704 $line .= $ch;
705 last if ($ch eq "\n");
706 }
707
708 if (!length($line)) {
709 return undef;
710 }
711
712 return $line;
713}
714
715sub reboot_to {
716 if ($reboot_type eq "grub") {
717 run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch; reboot)'";
718 return;
719 }
720
721 run_command "$reboot_script";
722}
723
724sub get_sha1 {
725 my ($commit) = @_;
726
727 doprint "git rev-list --max-count=1 $commit ... ";
728 my $sha1 = `git rev-list --max-count=1 $commit`;
729 my $ret = $?;
730
731 logit $sha1;
732
733 if ($ret) {
734 doprint "FAILED\n";
735 dodie "Failed to get git $commit";
736 }
737
738 print "SUCCESS\n";
739
740 chomp $sha1;
741
742 return $sha1;
743}
744
745sub monitor {
746 my $booted = 0;
747 my $bug = 0;
748 my $skip_call_trace = 0;
749 my $loops;
750
751 wait_for_monitor 5;
752
753 my $line;
754 my $full_line = "";
755
756 open(DMESG, "> $dmesg") or
757 die "unable to write to $dmesg";
758
759 reboot_to;
760
761 my $success_start;
762 my $failure_start;
763
764 for (;;) {
765
766 if ($booted) {
767 $line = wait_for_input($monitor_fp, $booted_timeout);
768 } else {
769 $line = wait_for_input($monitor_fp);
770 }
771
772 last if (!defined($line));
773
774 doprint $line;
775 print DMESG $line;
776
777 # we are not guaranteed to get a full line
778 $full_line .= $line;
779
780 if ($full_line =~ /$success_line/) {
781 $booted = 1;
782 $success_start = time;
783 }
784
785 if ($booted && defined($stop_after_success) &&
786 $stop_after_success >= 0) {
787 my $now = time;
788 if ($now - $success_start >= $stop_after_success) {
789 doprint "Test forced to stop after $stop_after_success seconds after success\n";
790 last;
791 }
792 }
793
794 if ($full_line =~ /\[ backtrace testing \]/) {
795 $skip_call_trace = 1;
796 }
797
798 if ($full_line =~ /call trace:/i) {
799 if (!$skip_call_trace) {
800 $bug = 1;
801 $failure_start = time;
802 }
803 }
804
805 if ($bug && defined($stop_after_failure) &&
806 $stop_after_failure >= 0) {
807 my $now = time;
808 if ($now - $failure_start >= $stop_after_failure) {
809 doprint "Test forced to stop after $stop_after_failure seconds after failure\n";
810 last;
811 }
812 }
813
814 if ($full_line =~ /\[ end of backtrace testing \]/) {
815 $skip_call_trace = 0;
816 }
817
818 if ($full_line =~ /Kernel panic -/) {
819 $bug = 1;
820 }
821
822 if ($line =~ /\n/) {
823 $full_line = "";
824 }
825 }
826
827 close(DMESG);
828
829 if ($bug) {
830 return 0 if ($in_bisect);
831 fail "failed - got a bug report" and return 0;
832 }
833
834 if (!$booted) {
835 return 0 if ($in_bisect);
836 fail "failed - never got a boot prompt." and return 0;
837 }
838
839 return 1;
840}
841
842sub install {
843
844 run_scp "$outputdir/$build_target", "$target_image" or
845 dodie "failed to copy image";
846
847 my $install_mods = 0;
848
849 # should we process modules?
850 $install_mods = 0;
851 open(IN, "$output_config") or dodie("Can't read config file");
852 while (<IN>) {
853 if (/CONFIG_MODULES(=y)?/) {
854 $install_mods = 1 if (defined($1));
855 last;
856 }
857 }
858 close(IN);
859
860 if (!$install_mods) {
861 doprint "No modules needed\n";
862 return;
863 }
864
865 run_command "$make INSTALL_MOD_PATH=$tmpdir modules_install" or
866 dodie "Failed to install modules";
867
868 my $modlib = "/lib/modules/$version";
869 my $modtar = "ktest-mods.tar.bz2";
870
871 run_ssh "rm -rf $modlib" or
872 dodie "failed to remove old mods: $modlib";
873
874 # would be nice if scp -r did not follow symbolic links
875 run_command "cd $tmpdir && tar -cjf $modtar lib/modules/$version" or
876 dodie "making tarball";
877
878 run_scp "$tmpdir/$modtar", "/tmp" or
879 dodie "failed to copy modules";
880
881 unlink "$tmpdir/$modtar";
882
883 run_ssh "'(cd / && tar xf /tmp/$modtar)'" or
884 dodie "failed to tar modules";
885
886 run_ssh "rm -f /tmp/$modtar";
887
888 return if (!defined($post_install));
889
890 my $cp_post_install = $post_install;
891 $cp_post_install = s/\$KERNEL_VERSION/$version/g;
892 run_command "$cp_post_install" or
893 dodie "Failed to run post install";
894}
895
896sub check_buildlog {
897 my ($patch) = @_;
898
899 my @files = `git show $patch | diffstat -l`;
900
901 open(IN, "git show $patch |") or
902 dodie "failed to show $patch";
903 while (<IN>) {
904 if (m,^--- a/(.*),) {
905 chomp $1;
906 $files[$#files] = $1;
907 }
908 }
909 close(IN);
910
911 open(IN, $buildlog) or dodie "Can't open $buildlog";
912 while (<IN>) {
913 if (/^\s*(.*?):.*(warning|error)/) {
914 my $err = $1;
915 foreach my $file (@files) {
916 my $fullpath = "$builddir/$file";
917 if ($file eq $err || $fullpath eq $err) {
918 fail "$file built with warnings" and return 0;
919 }
920 }
921 }
922 }
923 close(IN);
924
925 return 1;
926}
927
928sub build {
929 my ($type) = @_;
930 my $defconfig = "";
931
932 unlink $buildlog;
933
934 if ($type =~ /^useconfig:(.*)/) {
935 run_command "cp $1 $output_config" or
936 dodie "could not copy $1 to .config";
937
938 $type = "oldconfig";
939 }
940
941 # old config can ask questions
942 if ($type eq "oldconfig") {
943 $type = "oldnoconfig";
944
945 # allow for empty configs
946 run_command "touch $output_config";
947
948 run_command "mv $output_config $outputdir/config_temp" or
949 dodie "moving .config";
950
951 if (!$noclean && !run_command "$make mrproper") {
952 dodie "make mrproper";
953 }
954
955 run_command "mv $outputdir/config_temp $output_config" or
956 dodie "moving config_temp";
957
958 } elsif (!$noclean) {
959 unlink "$output_config";
960 run_command "$make mrproper" or
961 dodie "make mrproper";
962 }
963
964 # add something to distinguish this build
965 open(OUT, "> $outputdir/localversion") or dodie("Can't make localversion file");
966 print OUT "$localversion\n";
967 close(OUT);
968
969 if (defined($minconfig)) {
970 $defconfig = "KCONFIG_ALLCONFIG=$minconfig";
971 }
972
973 run_command "$defconfig $make $type" or
974 dodie "failed make config";
975
976 $redirect = "$buildlog";
977 if (!run_command "$make $build_options") {
978 undef $redirect;
979 # bisect may need this to pass
980 return 0 if ($in_bisect);
981 fail "failed build" and return 0;
982 }
983 undef $redirect;
984
985 return 1;
986}
987
988sub halt {
989 if (!run_ssh "halt" or defined($power_off)) {
990 if (defined($poweroff_after_halt)) {
991 sleep $poweroff_after_halt;
992 run_command "$power_off";
993 }
994 } else {
995 # nope? the zap it!
996 run_command "$power_off";
997 }
998}
999
1000sub success {
1001 my ($i) = @_;
1002
1003 $successes++;
1004
1005 doprint "\n\n*******************************************\n";
1006 doprint "*******************************************\n";
1007 doprint "KTEST RESULT: TEST $i SUCCESS!!!! **\n";
1008 doprint "*******************************************\n";
1009 doprint "*******************************************\n";
1010
1011 if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
1012 doprint "Reboot and wait $sleep_time seconds\n";
1013 reboot;
1014 start_monitor;
1015 wait_for_monitor $sleep_time;
1016 end_monitor;
1017 }
1018}
1019
1020sub get_version {
1021 # get the release name
1022 doprint "$make kernelrelease ... ";
1023 $version = `$make kernelrelease | tail -1`;
1024 chomp($version);
1025 doprint "$version\n";
1026}
1027
1028sub child_run_test {
1029 my $failed = 0;
1030
1031 # child should have no power
1032 $reboot_on_error = 0;
1033 $poweroff_on_error = 0;
1034 $die_on_failure = 1;
1035
1036 run_command $run_test or $failed = 1;
1037 exit $failed;
1038}
1039
1040my $child_done;
1041
1042sub child_finished {
1043 $child_done = 1;
1044}
1045
1046sub do_run_test {
1047 my $child_pid;
1048 my $child_exit;
1049 my $line;
1050 my $full_line;
1051 my $bug = 0;
1052
1053 wait_for_monitor 1;
1054
1055 doprint "run test $run_test\n";
1056
1057 $child_done = 0;
1058
1059 $SIG{CHLD} = qw(child_finished);
1060
1061 $child_pid = fork;
1062
1063 child_run_test if (!$child_pid);
1064
1065 $full_line = "";
1066
1067 do {
1068 $line = wait_for_input($monitor_fp, 1);
1069 if (defined($line)) {
1070
1071 # we are not guaranteed to get a full line
1072 $full_line .= $line;
1073
1074 if ($full_line =~ /call trace:/i) {
1075 $bug = 1;
1076 }
1077
1078 if ($full_line =~ /Kernel panic -/) {
1079 $bug = 1;
1080 }
1081
1082 if ($line =~ /\n/) {
1083 $full_line = "";
1084 }
1085 }
1086 } while (!$child_done && !$bug);
1087
1088 if ($bug) {
1089 doprint "Detected kernel crash!\n";
1090 # kill the child with extreme prejudice
1091 kill 9, $child_pid;
1092 }
1093
1094 waitpid $child_pid, 0;
1095 $child_exit = $?;
1096
1097 if ($bug || $child_exit) {
1098 return 0 if $in_bisect;
1099 fail "test failed" and return 0;
1100 }
1101 return 1;
1102}
1103
1104sub run_git_bisect {
1105 my ($command) = @_;
1106
1107 doprint "$command ... ";
1108
1109 my $output = `$command 2>&1`;
1110 my $ret = $?;
1111
1112 logit $output;
1113
1114 if ($ret) {
1115 doprint "FAILED\n";
1116 dodie "Failed to git bisect";
1117 }
1118
1119 doprint "SUCCESS\n";
1120 if ($output =~ m/^(Bisecting: .*\(roughly \d+ steps?\))\s+\[([[:xdigit:]]+)\]/) {
1121 doprint "$1 [$2]\n";
1122 } elsif ($output =~ m/^([[:xdigit:]]+) is the first bad commit/) {
1123 $bisect_bad = $1;
1124 doprint "Found bad commit... $1\n";
1125 return 0;
1126 } else {
1127 # we already logged it, just print it now.
1128 print $output;
1129 }
1130
1131 return 1;
1132}
1133
1134# returns 1 on success, 0 on failure
1135sub run_bisect_test {
1136 my ($type, $buildtype) = @_;
1137
1138 my $failed = 0;
1139 my $result;
1140 my $output;
1141 my $ret;
1142
1143 $in_bisect = 1;
1144
1145 build $buildtype or $failed = 1;
1146
1147 if ($type ne "build") {
1148 dodie "Failed on build" if $failed;
1149
1150 # Now boot the box
1151 get_grub_index;
1152 get_version;
1153 install;
1154
1155 start_monitor;
1156 monitor or $failed = 1;
1157
1158 if ($type ne "boot") {
1159 dodie "Failed on boot" if $failed;
1160
1161 do_run_test or $failed = 1;
1162 }
1163 end_monitor;
1164 }
1165
1166 if ($failed) {
1167 $result = 0;
1168
1169 # reboot the box to a good kernel
1170 if ($type ne "build") {
1171 doprint "Reboot and sleep $bisect_sleep_time seconds\n";
1172 reboot;
1173 start_monitor;
1174 wait_for_monitor $bisect_sleep_time;
1175 end_monitor;
1176 }
1177 } else {
1178 $result = 1;
1179 }
1180 $in_bisect = 0;
1181
1182 return $result;
1183}
1184
1185sub run_bisect {
1186 my ($type) = @_;
1187 my $buildtype = "oldconfig";
1188
1189 # We should have a minconfig to use?
1190 if (defined($minconfig)) {
1191 $buildtype = "useconfig:$minconfig";
1192 }
1193
1194 my $ret = run_bisect_test $type, $buildtype;
1195
1196
1197 # Are we looking for where it worked, not failed?
1198 if ($reverse_bisect) {
1199 $ret = !$ret;
1200 }
1201
1202 if ($ret) {
1203 return "good";
1204 } else {
1205 return "bad";
1206 }
1207}
1208
1209sub bisect {
1210 my ($i) = @_;
1211
1212 my $result;
1213
1214 die "BISECT_GOOD[$i] not defined\n" if (!defined($opt{"BISECT_GOOD[$i]"}));
1215 die "BISECT_BAD[$i] not defined\n" if (!defined($opt{"BISECT_BAD[$i]"}));
1216 die "BISECT_TYPE[$i] not defined\n" if (!defined($opt{"BISECT_TYPE[$i]"}));
1217
1218 my $good = $opt{"BISECT_GOOD[$i]"};
1219 my $bad = $opt{"BISECT_BAD[$i]"};
1220 my $type = $opt{"BISECT_TYPE[$i]"};
1221 my $start = $opt{"BISECT_START[$i]"};
1222 my $replay = $opt{"BISECT_REPLAY[$i]"};
1223
1224 # convert to true sha1's
1225 $good = get_sha1($good);
1226 $bad = get_sha1($bad);
1227
1228 if (defined($opt{"BISECT_REVERSE[$i]"}) &&
1229 $opt{"BISECT_REVERSE[$i]"} == 1) {
1230 doprint "Performing a reverse bisect (bad is good, good is bad!)\n";
1231 $reverse_bisect = 1;
1232 } else {
1233 $reverse_bisect = 0;
1234 }
1235
1236 # Can't have a test without having a test to run
1237 if ($type eq "test" && !defined($run_test)) {
1238 $type = "boot";
1239 }
1240
1241 my $check = $opt{"BISECT_CHECK[$i]"};
1242 if (defined($check) && $check ne "0") {
1243
1244 # get current HEAD
1245 my $head = get_sha1("HEAD");
1246
1247 if ($check ne "good") {
1248 doprint "TESTING BISECT BAD [$bad]\n";
1249 run_command "git checkout $bad" or
1250 die "Failed to checkout $bad";
1251
1252 $result = run_bisect $type;
1253
1254 if ($result ne "bad") {
1255 fail "Tested BISECT_BAD [$bad] and it succeeded" and return 0;
1256 }
1257 }
1258
1259 if ($check ne "bad") {
1260 doprint "TESTING BISECT GOOD [$good]\n";
1261 run_command "git checkout $good" or
1262 die "Failed to checkout $good";
1263
1264 $result = run_bisect $type;
1265
1266 if ($result ne "good") {
1267 fail "Tested BISECT_GOOD [$good] and it failed" and return 0;
1268 }
1269 }
1270
1271 # checkout where we started
1272 run_command "git checkout $head" or
1273 die "Failed to checkout $head";
1274 }
1275
1276 run_command "git bisect start" or
1277 dodie "could not start bisect";
1278
1279 run_command "git bisect good $good" or
1280 dodie "could not set bisect good to $good";
1281
1282 run_git_bisect "git bisect bad $bad" or
1283 dodie "could not set bisect bad to $bad";
1284
1285 if (defined($replay)) {
1286 run_command "git bisect replay $replay" or
1287 dodie "failed to run replay";
1288 }
1289
1290 if (defined($start)) {
1291 run_command "git checkout $start" or
1292 dodie "failed to checkout $start";
1293 }
1294
1295 my $test;
1296 do {
1297 $result = run_bisect $type;
1298 $test = run_git_bisect "git bisect $result";
1299 } while ($test);
1300
1301 run_command "git bisect log" or
1302 dodie "could not capture git bisect log";
1303
1304 run_command "git bisect reset" or
1305 dodie "could not reset git bisect";
1306
1307 doprint "Bad commit was [$bisect_bad]\n";
1308
1309 success $i;
1310}
1311
1312my %config_ignore;
1313my %config_set;
1314
1315my %config_list;
1316my %null_config;
1317
1318my %dependency;
1319
1320sub process_config_ignore {
1321 my ($config) = @_;
1322
1323 open (IN, $config)
1324 or dodie "Failed to read $config";
1325
1326 while (<IN>) {
1327 if (/^(.*?(CONFIG\S*)(=.*| is not set))/) {
1328 $config_ignore{$2} = $1;
1329 }
1330 }
1331
1332 close(IN);
1333}
1334
1335sub read_current_config {
1336 my ($config_ref) = @_;
1337
1338 %{$config_ref} = ();
1339 undef %{$config_ref};
1340
1341 my @key = keys %{$config_ref};
1342 if ($#key >= 0) {
1343 print "did not delete!\n";
1344 exit;
1345 }
1346 open (IN, "$output_config");
1347
1348 while (<IN>) {
1349 if (/^(CONFIG\S+)=(.*)/) {
1350 ${$config_ref}{$1} = $2;
1351 }
1352 }
1353 close(IN);
1354}
1355
1356sub get_dependencies {
1357 my ($config) = @_;
1358
1359 my $arr = $dependency{$config};
1360 if (!defined($arr)) {
1361 return ();
1362 }
1363
1364 my @deps = @{$arr};
1365
1366 foreach my $dep (@{$arr}) {
1367 print "ADD DEP $dep\n";
1368 @deps = (@deps, get_dependencies $dep);
1369 }
1370
1371 return @deps;
1372}
1373
1374sub create_config {
1375 my @configs = @_;
1376
1377 open(OUT, ">$output_config") or dodie "Can not write to $output_config";
1378
1379 foreach my $config (@configs) {
1380 print OUT "$config_set{$config}\n";
1381 my @deps = get_dependencies $config;
1382 foreach my $dep (@deps) {
1383 print OUT "$config_set{$dep}\n";
1384 }
1385 }
1386
1387 foreach my $config (keys %config_ignore) {
1388 print OUT "$config_ignore{$config}\n";
1389 }
1390 close(OUT);
1391
1392# exit;
1393 run_command "$make oldnoconfig" or
1394 dodie "failed make config oldconfig";
1395
1396}
1397
1398sub compare_configs {
1399 my (%a, %b) = @_;
1400
1401 foreach my $item (keys %a) {
1402 if (!defined($b{$item})) {
1403 print "diff $item\n";
1404 return 1;
1405 }
1406 delete $b{$item};
1407 }
1408
1409 my @keys = keys %b;
1410 if ($#keys) {
1411 print "diff2 $keys[0]\n";
1412 }
1413 return -1 if ($#keys >= 0);
1414
1415 return 0;
1416}
1417
1418sub run_config_bisect_test {
1419 my ($type) = @_;
1420
1421 return run_bisect_test $type, "oldconfig";
1422}
1423
1424sub process_passed {
1425 my (%configs) = @_;
1426
1427 doprint "These configs had no failure: (Enabling them for further compiles)\n";
1428 # Passed! All these configs are part of a good compile.
1429 # Add them to the min options.
1430 foreach my $config (keys %configs) {
1431 if (defined($config_list{$config})) {
1432 doprint " removing $config\n";
1433 $config_ignore{$config} = $config_list{$config};
1434 delete $config_list{$config};
1435 }
1436 }
1437 doprint "config copied to $outputdir/config_good\n";
1438 run_command "cp -f $output_config $outputdir/config_good";
1439}
1440
1441sub process_failed {
1442 my ($config) = @_;
1443
1444 doprint "\n\n***************************************\n";
1445 doprint "Found bad config: $config\n";
1446 doprint "***************************************\n\n";
1447}
1448
1449sub run_config_bisect {
1450
1451 my @start_list = keys %config_list;
1452
1453 if ($#start_list < 0) {
1454 doprint "No more configs to test!!!\n";
1455 return -1;
1456 }
1457
1458 doprint "***** RUN TEST ***\n";
1459 my $type = $opt{"CONFIG_BISECT_TYPE[$iteration]"};
1460 my $ret;
1461 my %current_config;
1462
1463 my $count = $#start_list + 1;
1464 doprint " $count configs to test\n";
1465
1466 my $half = int($#start_list / 2);
1467
1468 do {
1469 my @tophalf = @start_list[0 .. $half];
1470
1471 create_config @tophalf;
1472 read_current_config \%current_config;
1473
1474 $count = $#tophalf + 1;
1475 doprint "Testing $count configs\n";
1476 my $found = 0;
1477 # make sure we test something
1478 foreach my $config (@tophalf) {
1479 if (defined($current_config{$config})) {
1480 logit " $config\n";
1481 $found = 1;
1482 }
1483 }
1484 if (!$found) {
1485 # try the other half
1486 doprint "Top half produced no set configs, trying bottom half\n";
1487 @tophalf = @start_list[$half .. $#start_list];
1488 create_config @tophalf;
1489 read_current_config \%current_config;
1490 foreach my $config (@tophalf) {
1491 if (defined($current_config{$config})) {
1492 logit " $config\n";
1493 $found = 1;
1494 }
1495 }
1496 if (!$found) {
1497 doprint "Failed: Can't make new config with current configs\n";
1498 foreach my $config (@start_list) {
1499 doprint " CONFIG: $config\n";
1500 }
1501 return -1;
1502 }
1503 $count = $#tophalf + 1;
1504 doprint "Testing $count configs\n";
1505 }
1506
1507 $ret = run_config_bisect_test $type;
1508
1509 if ($ret) {
1510 process_passed %current_config;
1511 return 0;
1512 }
1513
1514 doprint "This config had a failure.\n";
1515 doprint "Removing these configs that were not set in this config:\n";
1516 doprint "config copied to $outputdir/config_bad\n";
1517 run_command "cp -f $output_config $outputdir/config_bad";
1518
1519 # A config exists in this group that was bad.
1520 foreach my $config (keys %config_list) {
1521 if (!defined($current_config{$config})) {
1522 doprint " removing $config\n";
1523 delete $config_list{$config};
1524 }
1525 }
1526
1527 @start_list = @tophalf;
1528
1529 if ($#start_list == 0) {
1530 process_failed $start_list[0];
1531 return 1;
1532 }
1533
1534 # remove half the configs we are looking at and see if
1535 # they are good.
1536 $half = int($#start_list / 2);
1537 } while ($half > 0);
1538
1539 # we found a single config, try it again
1540 my @tophalf = @start_list[0 .. 0];
1541
1542 $ret = run_config_bisect_test $type;
1543 if ($ret) {
1544 process_passed %current_config;
1545 return 0;
1546 }
1547
1548 process_failed $start_list[0];
1549 return 1;
1550}
1551
1552sub config_bisect {
1553 my ($i) = @_;
1554
1555 my $start_config = $opt{"CONFIG_BISECT[$i]"};
1556
1557 my $tmpconfig = "$tmpdir/use_config";
1558
1559 # Make the file with the bad config and the min config
1560 if (defined($minconfig)) {
1561 # read the min config for things to ignore
1562 run_command "cp $minconfig $tmpconfig" or
1563 dodie "failed to copy $minconfig to $tmpconfig";
1564 } else {
1565 unlink $tmpconfig;
1566 }
1567
1568 # Add other configs
1569 if (defined($addconfig)) {
1570 run_command "cat $addconfig >> $tmpconfig" or
1571 dodie "failed to append $addconfig";
1572 }
1573
1574 my $defconfig = "";
1575 if (-f $tmpconfig) {
1576 $defconfig = "KCONFIG_ALLCONFIG=$tmpconfig";
1577 process_config_ignore $tmpconfig;
1578 }
1579
1580 # now process the start config
1581 run_command "cp $start_config $output_config" or
1582 dodie "failed to copy $start_config to $output_config";
1583
1584 # read directly what we want to check
1585 my %config_check;
1586 open (IN, $output_config)
1587 or dodie "faied to open $output_config";
1588
1589 while (<IN>) {
1590 if (/^((CONFIG\S*)=.*)/) {
1591 $config_check{$2} = $1;
1592 }
1593 }
1594 close(IN);
1595
1596 # Now run oldconfig with the minconfig (and addconfigs)
1597 run_command "$defconfig $make oldnoconfig" or
1598 dodie "failed make config oldconfig";
1599
1600 # check to see what we lost (or gained)
1601 open (IN, $output_config)
1602 or dodie "Failed to read $start_config";
1603
1604 my %removed_configs;
1605 my %added_configs;
1606
1607 while (<IN>) {
1608 if (/^((CONFIG\S*)=.*)/) {
1609 # save off all options
1610 $config_set{$2} = $1;
1611 if (defined($config_check{$2})) {
1612 if (defined($config_ignore{$2})) {
1613 $removed_configs{$2} = $1;
1614 } else {
1615 $config_list{$2} = $1;
1616 }
1617 } elsif (!defined($config_ignore{$2})) {
1618 $added_configs{$2} = $1;
1619 $config_list{$2} = $1;
1620 }
1621 }
1622 }
1623 close(IN);
1624
1625 my @confs = keys %removed_configs;
1626 if ($#confs >= 0) {
1627 doprint "Configs overridden by default configs and removed from check:\n";
1628 foreach my $config (@confs) {
1629 doprint " $config\n";
1630 }
1631 }
1632 @confs = keys %added_configs;
1633 if ($#confs >= 0) {
1634 doprint "Configs appearing in make oldconfig and added:\n";
1635 foreach my $config (@confs) {
1636 doprint " $config\n";
1637 }
1638 }
1639
1640 my %config_test;
1641 my $once = 0;
1642
1643 # Sometimes kconfig does weird things. We must make sure
1644 # that the config we autocreate has everything we need
1645 # to test, otherwise we may miss testing configs, or
1646 # may not be able to create a new config.
1647 # Here we create a config with everything set.
1648 create_config (keys %config_list);
1649 read_current_config \%config_test;
1650 foreach my $config (keys %config_list) {
1651 if (!defined($config_test{$config})) {
1652 if (!$once) {
1653 $once = 1;
1654 doprint "Configs not produced by kconfig (will not be checked):\n";
1655 }
1656 doprint " $config\n";
1657 delete $config_list{$config};
1658 }
1659 }
1660 my $ret;
1661 do {
1662 $ret = run_config_bisect;
1663 } while (!$ret);
1664
1665 return $ret if ($ret < 0);
1666
1667 success $i;
1668}
1669
1670sub patchcheck {
1671 my ($i) = @_;
1672
1673 die "PATCHCHECK_START[$i] not defined\n"
1674 if (!defined($opt{"PATCHCHECK_START[$i]"}));
1675 die "PATCHCHECK_TYPE[$i] not defined\n"
1676 if (!defined($opt{"PATCHCHECK_TYPE[$i]"}));
1677
1678 my $start = $opt{"PATCHCHECK_START[$i]"};
1679
1680 my $end = "HEAD";
1681 if (defined($opt{"PATCHCHECK_END[$i]"})) {
1682 $end = $opt{"PATCHCHECK_END[$i]"};
1683 }
1684
1685 # Get the true sha1's since we can use things like HEAD~3
1686 $start = get_sha1($start);
1687 $end = get_sha1($end);
1688
1689 my $type = $opt{"PATCHCHECK_TYPE[$i]"};
1690
1691 # Can't have a test without having a test to run
1692 if ($type eq "test" && !defined($run_test)) {
1693 $type = "boot";
1694 }
1695
1696 open (IN, "git log --pretty=oneline $end|") or
1697 dodie "could not get git list";
1698
1699 my @list;
1700
1701 while (<IN>) {
1702 chomp;
1703 $list[$#list+1] = $_;
1704 last if (/^$start/);
1705 }
1706 close(IN);
1707
1708 if ($list[$#list] !~ /^$start/) {
1709 fail "SHA1 $start not found";
1710 }
1711
1712 # go backwards in the list
1713 @list = reverse @list;
1714
1715 my $save_clean = $noclean;
1716
1717 $in_patchcheck = 1;
1718 foreach my $item (@list) {
1719 my $sha1 = $item;
1720 $sha1 =~ s/^([[:xdigit:]]+).*/$1/;
1721
1722 doprint "\nProcessing commit $item\n\n";
1723
1724 run_command "git checkout $sha1" or
1725 die "Failed to checkout $sha1";
1726
1727 # only clean on the first and last patch
1728 if ($item eq $list[0] ||
1729 $item eq $list[$#list]) {
1730 $noclean = $save_clean;
1731 } else {
1732 $noclean = 1;
1733 }
1734
1735 if (defined($minconfig)) {
1736 build "useconfig:$minconfig" or return 0;
1737 } else {
1738 # ?? no config to use?
1739 build "oldconfig" or return 0;
1740 }
1741
1742 check_buildlog $sha1 or return 0;
1743
1744 next if ($type eq "build");
1745
1746 get_grub_index;
1747 get_version;
1748 install;
1749
1750 my $failed = 0;
1751
1752 start_monitor;
1753 monitor or $failed = 1;
1754
1755 if (!$failed && $type ne "boot"){
1756 do_run_test or $failed = 1;
1757 }
1758 end_monitor;
1759 return 0 if ($failed);
1760
1761 }
1762 $in_patchcheck = 0;
1763 success $i;
1764
1765 return 1;
1766}
1767
1768$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl config-file\n";
1769
1770if ($#ARGV == 0) {
1771 $ktest_config = $ARGV[0];
1772 if (! -f $ktest_config) {
1773 print "$ktest_config does not exist.\n";
1774 my $ans;
1775 for (;;) {
1776 print "Create it? [Y/n] ";
1777 $ans = <STDIN>;
1778 chomp $ans;
1779 if ($ans =~ /^\s*$/) {
1780 $ans = "y";
1781 }
1782 last if ($ans =~ /^y$/i || $ans =~ /^n$/i);
1783 print "Please answer either 'y' or 'n'.\n";
1784 }
1785 if ($ans !~ /^y$/i) {
1786 exit 0;
1787 }
1788 }
1789} else {
1790 $ktest_config = "ktest.conf";
1791}
1792
1793if (! -f $ktest_config) {
1794 open(OUT, ">$ktest_config") or die "Can not create $ktest_config";
1795 print OUT << "EOF"
1796# Generated by ktest.pl
1797#
1798# Define each test with TEST_START
1799# The config options below it will override the defaults
1800TEST_START
1801
1802DEFAULTS
1803EOF
1804;
1805 close(OUT);
1806}
1807read_config $ktest_config;
1808
1809# Append any configs entered in manually to the config file.
1810my @new_configs = keys %entered_configs;
1811if ($#new_configs >= 0) {
1812 print "\nAppending entered in configs to $ktest_config\n";
1813 open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config";
1814 foreach my $config (@new_configs) {
1815 print OUT "$config = $entered_configs{$config}\n";
1816 $opt{$config} = $entered_configs{$config};
1817 }
1818}
1819
1820if ($opt{"CLEAR_LOG"} && defined($opt{"LOG_FILE"})) {
1821 unlink $opt{"LOG_FILE"};
1822}
1823
1824doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
1825
1826for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
1827
1828 if (!$i) {
1829 doprint "DEFAULT OPTIONS:\n";
1830 } else {
1831 doprint "\nTEST $i OPTIONS";
1832 if (defined($repeat_tests{$i})) {
1833 $repeat = $repeat_tests{$i};
1834 doprint " ITERATE $repeat";
1835 }
1836 doprint "\n";
1837 }
1838
1839 foreach my $option (sort keys %opt) {
1840
1841 if ($option =~ /\[(\d+)\]$/) {
1842 next if ($i != $1);
1843 } else {
1844 next if ($i);
1845 }
1846
1847 doprint "$option = $opt{$option}\n";
1848 }
1849}
1850
1851sub set_test_option {
1852 my ($name, $i) = @_;
1853
1854 my $option = "$name\[$i\]";
1855
1856 if (defined($opt{$option})) {
1857 return $opt{$option};
1858 }
1859
1860 foreach my $test (keys %repeat_tests) {
1861 if ($i >= $test &&
1862 $i < $test + $repeat_tests{$test}) {
1863 $option = "$name\[$test\]";
1864 if (defined($opt{$option})) {
1865 return $opt{$option};
1866 }
1867 }
1868 }
1869
1870 if (defined($opt{$name})) {
1871 return $opt{$name};
1872 }
1873
1874 return undef;
1875}
1876
1877# First we need to do is the builds
1878for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
1879
1880 $iteration = $i;
1881
1882 my $makecmd = set_test_option("MAKE_CMD", $i);
1883
1884 $machine = set_test_option("MACHINE", $i);
1885 $ssh_user = set_test_option("SSH_USER", $i);
1886 $tmpdir = set_test_option("TMP_DIR", $i);
1887 $outputdir = set_test_option("OUTPUT_DIR", $i);
1888 $builddir = set_test_option("BUILD_DIR", $i);
1889 $test_type = set_test_option("TEST_TYPE", $i);
1890 $build_type = set_test_option("BUILD_TYPE", $i);
1891 $build_options = set_test_option("BUILD_OPTIONS", $i);
1892 $power_cycle = set_test_option("POWER_CYCLE", $i);
1893 $reboot = set_test_option("REBOOT", $i);
1894 $noclean = set_test_option("BUILD_NOCLEAN", $i);
1895 $minconfig = set_test_option("MIN_CONFIG", $i);
1896 $run_test = set_test_option("TEST", $i);
1897 $addconfig = set_test_option("ADD_CONFIG", $i);
1898 $reboot_type = set_test_option("REBOOT_TYPE", $i);
1899 $grub_menu = set_test_option("GRUB_MENU", $i);
1900 $post_install = set_test_option("POST_INSTALL", $i);
1901 $reboot_script = set_test_option("REBOOT_SCRIPT", $i);
1902 $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i);
1903 $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i);
1904 $die_on_failure = set_test_option("DIE_ON_FAILURE", $i);
1905 $power_off = set_test_option("POWER_OFF", $i);
1906 $powercycle_after_reboot = set_test_option("POWERCYCLE_AFTER_REBOOT", $i);
1907 $poweroff_after_halt = set_test_option("POWEROFF_AFTER_HALT", $i);
1908 $sleep_time = set_test_option("SLEEP_TIME", $i);
1909 $bisect_sleep_time = set_test_option("BISECT_SLEEP_TIME", $i);
1910 $store_failures = set_test_option("STORE_FAILURES", $i);
1911 $timeout = set_test_option("TIMEOUT", $i);
1912 $booted_timeout = set_test_option("BOOTED_TIMEOUT", $i);
1913 $console = set_test_option("CONSOLE", $i);
1914 $success_line = set_test_option("SUCCESS_LINE", $i);
1915 $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i);
1916 $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i);
1917 $build_target = set_test_option("BUILD_TARGET", $i);
1918 $ssh_exec = set_test_option("SSH_EXEC", $i);
1919 $scp_to_target = set_test_option("SCP_TO_TARGET", $i);
1920 $target_image = set_test_option("TARGET_IMAGE", $i);
1921 $localversion = set_test_option("LOCALVERSION", $i);
1922
1923 chdir $builddir || die "can't change directory to $builddir";
1924
1925 if (!-d $tmpdir) {
1926 mkpath($tmpdir) or
1927 die "can't create $tmpdir";
1928 }
1929
1930 $ENV{"SSH_USER"} = $ssh_user;
1931 $ENV{"MACHINE"} = $machine;
1932
1933 $target = "$ssh_user\@$machine";
1934
1935 $buildlog = "$tmpdir/buildlog-$machine";
1936 $dmesg = "$tmpdir/dmesg-$machine";
1937 $make = "$makecmd O=$outputdir";
1938 $output_config = "$outputdir/.config";
1939
1940 if ($reboot_type eq "grub") {
1941 dodie "GRUB_MENU not defined" if (!defined($grub_menu));
1942 } elsif (!defined($reboot_script)) {
1943 dodie "REBOOT_SCRIPT not defined"
1944 }
1945
1946 my $run_type = $build_type;
1947 if ($test_type eq "patchcheck") {
1948 $run_type = $opt{"PATCHCHECK_TYPE[$i]"};
1949 } elsif ($test_type eq "bisect") {
1950 $run_type = $opt{"BISECT_TYPE[$i]"};
1951 } elsif ($test_type eq "config_bisect") {
1952 $run_type = $opt{"CONFIG_BISECT_TYPE[$i]"};
1953 }
1954
1955 # mistake in config file?
1956 if (!defined($run_type)) {
1957 $run_type = "ERROR";
1958 }
1959
1960 doprint "\n\n";
1961 doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type\n\n";
1962
1963 unlink $dmesg;
1964 unlink $buildlog;
1965
1966 if (!defined($minconfig)) {
1967 $minconfig = $addconfig;
1968
1969 } elsif (defined($addconfig)) {
1970 run_command "cat $addconfig $minconfig > $tmpdir/add_config" or
1971 dodie "Failed to create temp config";
1972 $minconfig = "$tmpdir/add_config";
1973 }
1974
1975 my $checkout = $opt{"CHECKOUT[$i]"};
1976 if (defined($checkout)) {
1977 run_command "git checkout $checkout" or
1978 die "failed to checkout $checkout";
1979 }
1980
1981 if ($test_type eq "bisect") {
1982 bisect $i;
1983 next;
1984 } elsif ($test_type eq "config_bisect") {
1985 config_bisect $i;
1986 next;
1987 } elsif ($test_type eq "patchcheck") {
1988 patchcheck $i;
1989 next;
1990 }
1991
1992 if ($build_type ne "nobuild") {
1993 build $build_type or next;
1994 }
1995
1996 if ($test_type ne "build") {
1997 get_grub_index;
1998 get_version;
1999 install;
2000
2001 my $failed = 0;
2002 start_monitor;
2003 monitor or $failed = 1;;
2004
2005 if (!$failed && $test_type ne "boot" && defined($run_test)) {
2006 do_run_test or $failed = 1;
2007 }
2008 end_monitor;
2009 next if ($failed);
2010 }
2011
2012 success $i;
2013}
2014
2015if ($opt{"POWEROFF_ON_SUCCESS"}) {
2016 halt;
2017} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot) {
2018 reboot;
2019}
2020
2021doprint "\n $successes of $opt{NUM_TESTS} tests were successful\n\n";
2022
2023exit 0;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
new file mode 100644
index 000000000000..3408c594b2de
--- /dev/null
+++ b/tools/testing/ktest/sample.conf
@@ -0,0 +1,622 @@
1#
2# Config file for ktest.pl
3#
4# Note, all paths must be absolute
5#
6
7# Options set in the beginning of the file are considered to be
8# default options. These options can be overriden by test specific
9# options, with the following exceptions:
10#
11# LOG_FILE
12# CLEAR_LOG
13# POWEROFF_ON_SUCCESS
14# REBOOT_ON_SUCCESS
15#
16# Test specific options are set after the label:
17#
18# TEST_START
19#
20# The options after a TEST_START label are specific to that test.
21# Each TEST_START label will set up a new test. If you want to
22# perform a test more than once, you can add the ITERATE label
23# to it followed by the number of times you want that test
24# to iterate. If the ITERATE is left off, the test will only
25# be performed once.
26#
27# TEST_START ITERATE 10
28#
29# You can skip a test by adding SKIP (before or after the ITERATE
30# and number)
31#
32# TEST_START SKIP
33#
34# TEST_START SKIP ITERATE 10
35#
36# TEST_START ITERATE 10 SKIP
37#
38# The SKIP label causes the options and the test itself to be ignored.
39# This is useful to set up several different tests in one config file, and
40# only enabling the ones you want to use for a current test run.
41#
42# You can add default options anywhere in the file as well
43# with the DEFAULTS tag. This allows you to have default options
44# after the test options to keep the test options at the top
45# of the file. You can even place the DEFAULTS tag between
46# test cases (but not in the middle of a single test case)
47#
48# TEST_START
49# MIN_CONFIG = /home/test/config-test1
50#
51# DEFAULTS
52# MIN_CONFIG = /home/test/config-default
53#
54# TEST_START ITERATE 10
55#
56# The above will run the first test with MIN_CONFIG set to
57# /home/test/config-test-1. Then 10 tests will be executed
58# with MIN_CONFIG with /home/test/config-default.
59#
60# You can also disable defaults with the SKIP option
61#
62# DEFAULTS SKIP
63# MIN_CONFIG = /home/test/config-use-sometimes
64#
65# DEFAULTS
66# MIN_CONFIG = /home/test/config-most-times
67#
68# The above will ignore the first MIN_CONFIG. If you want to
69# use the first MIN_CONFIG, remove the SKIP from the first
70# DEFAULTS tag and add it to the second. Be careful, options
71# may only be declared once per test or default. If you have
72# the same option name under the same test or as default
73# ktest will fail to execute, and no tests will run.
74#
75
76
77#### Mandatory Default Options ####
78
79# These options must be in the default section, although most
80# may be overridden by test options.
81
82# The machine hostname that you will test
83#MACHINE = target
84
85# The box is expected to have ssh on normal bootup, provide the user
86# (most likely root, since you need privileged operations)
87#SSH_USER = root
88
89# The directory that contains the Linux source code
90#BUILD_DIR = /home/test/linux.git
91
92# The directory that the objects will be built
93# (can not be same as BUILD_DIR)
94#OUTPUT_DIR = /home/test/build/target
95
96# The location of the compiled file to copy to the target
97# (relative to OUTPUT_DIR)
98#BUILD_TARGET = arch/x86/boot/bzImage
99
100# The place to put your image on the test machine
101#TARGET_IMAGE = /boot/vmlinuz-test
102
103# A script or command to reboot the box
104#
105# Here is a digital loggers power switch example
106#POWER_CYCLE = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=CCL'
107#
108# Here is an example to reboot a virtual box on the current host
109# with the name "Guest".
110#POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
111
112# The script or command that reads the console
113#
114# If you use ttywatch server, something like the following would work.
115#CONSOLE = nc -d localhost 3001
116#
117# For a virtual machine with guest name "Guest".
118#CONSOLE = virsh console Guest
119
120# Required version ending to differentiate the test
121# from other linux builds on the system.
122#LOCALVERSION = -test
123
124# The grub title name for the test kernel to boot
125# (Only mandatory if REBOOT_TYPE = grub)
126#
127# Note, ktest.pl will not update the grub menu.lst, you need to
128# manually add an option for the test. ktest.pl will search
129# the grub menu.lst for this option to find what kernel to
130# reboot into.
131#
132# For example, if in the /boot/grub/menu.lst the test kernel title has:
133# title Test Kernel
134# kernel vmlinuz-test
135#GRUB_MENU = Test Kernel
136
137# A script to reboot the target into the test kernel
138# (Only mandatory if REBOOT_TYPE = script)
139#REBOOT_SCRIPT =
140
141#### Optional Config Options (all have defaults) ####
142
143# Start a test setup. If you leave this off, all options
144# will be default and the test will run once.
145# This is a label and not really an option (it takes no value).
146# You can append ITERATE and a number after it to iterate the
147# test a number of times, or SKIP to ignore this test.
148#
149#TEST_START
150#TEST_START ITERATE 5
151#TEST_START SKIP
152
153# Have the following options as default again. Used after tests
154# have already been defined by TEST_START. Optionally, you can
155# just define all default options before the first TEST_START
156# and you do not need this option.
157#
158# This is a label and not really an option (it takes no value).
159# You can append SKIP to this label and the options within this
160# section will be ignored.
161#
162# DEFAULTS
163# DEFAULTS SKIP
164
165# The default test type (default test)
166# The test types may be:
167# build - only build the kernel, do nothing else
168# boot - build and boot the kernel
169# test - build, boot and if TEST is set, run the test script
170# (If TEST is not set, it defaults back to boot)
171# bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
172# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
173#TEST_TYPE = test
174
175# Test to run if there is a successful boot and TEST_TYPE is test.
176# Must exit with 0 on success and non zero on error
177# default (undefined)
178#TEST = ssh user@machine /root/run_test
179
180# The build type is any make config type or special command
181# (default randconfig)
182# nobuild - skip the clean and build step
183# useconfig:/path/to/config - use the given config and run
184# oldconfig on it.
185# This option is ignored if TEST_TYPE is patchcheck or bisect
186#BUILD_TYPE = randconfig
187
188# The make command (default make)
189# If you are building a 32bit x86 on a 64 bit host
190#MAKE_CMD = CC=i386-gcc AS=i386-as make ARCH=i386
191
192# Any build options for the make of the kernel (not for other makes, like configs)
193# (default "")
194#BUILD_OPTIONS = -j20
195
196# If you need an initrd, you can add a script or code here to install
197# it. The environment variable KERNEL_VERSION will be set to the
198# kernel version that is used. Remember to add the initrd line
199# to your grub menu.lst file.
200#
201# Here's a couple of examples to use:
202#POST_INSTALL = ssh user@target /sbin/mkinitrd --allow-missing -f /boot/initramfs-test.img $KERNEL_VERSION
203#
204# or on some systems:
205#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
206
207# Way to reboot the box to the test kernel.
208# Only valid options so far are "grub" and "script"
209# (default grub)
210# If you specify grub, it will assume grub version 1
211# and will search in /boot/grub/menu.lst for the title $GRUB_MENU
212# and select that target to reboot to the kernel. If this is not
213# your setup, then specify "script" and have a command or script
214# specified in REBOOT_SCRIPT to boot to the target.
215#
216# The entry in /boot/grub/menu.lst must be entered in manually.
217# The test will not modify that file.
218#REBOOT_TYPE = grub
219
220# The min config that is needed to build for the machine
221# A nice way to create this is with the following:
222#
223# $ ssh target
224# $ lsmod > mymods
225# $ scp mymods host:/tmp
226# $ exit
227# $ cd linux.git
228# $ rm .config
229# $ make LSMOD=mymods localyesconfig
230# $ grep '^CONFIG' .config > /home/test/config-min
231#
232# If you want even less configs:
233#
234# log in directly to target (do not ssh)
235#
236# $ su
237# # lsmod | cut -d' ' -f1 | xargs rmmod
238#
239# repeat the above several times
240#
241# # lsmod > mymods
242# # reboot
243#
244# May need to reboot to get your network back to copy the mymods
245# to the host, and then remove the previous .config and run the
246# localyesconfig again. The CONFIG_MIN generated like this will
247# not guarantee network activity to the box so the TEST_TYPE of
248# test may fail.
249#
250# You might also want to set:
251# CONFIG_CMDLINE="<your options here>"
252# randconfig may set the above and override your real command
253# line options.
254# (default undefined)
255#MIN_CONFIG = /home/test/config-min
256
257# Sometimes there's options that just break the boot and
258# you do not care about. Here are a few:
259# # CONFIG_STAGING is not set
260# Staging drivers are horrible, and can break the build.
261# # CONFIG_SCSI_DEBUG is not set
262# SCSI_DEBUG may change your root partition
263# # CONFIG_KGDB_SERIAL_CONSOLE is not set
264# KGDB may cause oops waiting for a connection that's not there.
265# This option points to the file containing config options that will be prepended
266# to the MIN_CONFIG (or be the MIN_CONFIG if it is not set)
267#
268# Note, config options in MIN_CONFIG will override these options.
269#
270# (default undefined)
271#ADD_CONFIG = /home/test/config-broken
272
273# The location on the host where to write temp files
274# (default /tmp/ktest)
275#TMP_DIR = /tmp/ktest
276
277# Optional log file to write the status (recommended)
278# Note, this is a DEFAULT section only option.
279# (default undefined)
280#LOG_FILE = /home/test/logfiles/target.log
281
282# Remove old logfile if it exists before starting all tests.
283# Note, this is a DEFAULT section only option.
284# (default 0)
285#CLEAR_LOG = 0
286
287# Line to define a successful boot up in console output.
288# This is what the line contains, not the entire line. If you need
289# the entire line to match, then use regural expression syntax like:
290# (do not add any quotes around it)
291#
292# SUCCESS_LINE = ^MyBox Login:$
293#
294# (default "login:")
295#SUCCESS_LINE = login:
296
297# In case the console constantly fills the screen, having
298# a specified time to stop the test after success is recommended.
299# (in seconds)
300# (default 10)
301#STOP_AFTER_SUCCESS = 10
302
303# In case the console constantly fills the screen, having
304# a specified time to stop the test after failure is recommended.
305# (in seconds)
306# (default 60)
307#STOP_AFTER_FAILURE = 60
308
309# Stop testing if a build fails. If set, the script will end if
310# a failure is detected, otherwise it will save off the .config,
311# dmesg and bootlog in a directory called
312# MACHINE-TEST_TYPE_BUILD_TYPE-fail-yyyymmddhhmmss
313# if the STORE_FAILURES directory is set.
314# (default 1)
315# Note, even if this is set to zero, there are some errors that still
316# stop the tests.
317#DIE_ON_FAILURE = 1
318
319# Directory to store failure directories on failure. If this is not
320# set, DIE_ON_FAILURE=0 will not save off the .config, dmesg and
321# bootlog. This option is ignored if DIE_ON_FAILURE is not set.
322# (default undefined)
323#STORE_FAILURES = /home/test/failures
324
325# Build without doing a make mrproper, or removing .config
326# (default 0)
327#BUILD_NOCLEAN = 0
328
329# As the test reads the console, after it hits the SUCCESS_LINE
330# the time it waits for the monitor to settle down between reads
331# can usually be lowered.
332# (in seconds) (default 1)
333#BOOTED_TIMEOUT = 1
334
335# The timeout in seconds when we consider the box hung after
336# the console stop producing output. Be sure to leave enough
337# time here to get pass a reboot. Some machines may not produce
338# any console output for a long time during a reboot. You do
339# not want the test to fail just because the system was in
340# the process of rebooting to the test kernel.
341# (default 120)
342#TIMEOUT = 120
343
344# In between tests, a reboot of the box may occur, and this
345# is the time to wait for the console after it stops producing
346# output. Some machines may not produce a large lag on reboot
347# so this should accommodate it.
348# The difference between this and TIMEOUT, is that TIMEOUT happens
349# when rebooting to the test kernel. This sleep time happens
350# after a test has completed and we are about to start running
351# another test. If a reboot to the reliable kernel happens,
352# we wait SLEEP_TIME for the console to stop producing output
353# before starting the next test.
354# (default 60)
355#SLEEP_TIME = 60
356
357# The time in between bisects to sleep (in seconds)
358# (default 60)
359#BISECT_SLEEP_TIME = 60
360
361# Reboot the target box on error (default 0)
362#REBOOT_ON_ERROR = 0
363
364# Power off the target on error (ignored if REBOOT_ON_ERROR is set)
365# Note, this is a DEFAULT section only option.
366# (default 0)
367#POWEROFF_ON_ERROR = 0
368
369# Power off the target after all tests have completed successfully
370# Note, this is a DEFAULT section only option.
371# (default 0)
372#POWEROFF_ON_SUCCESS = 0
373
374# Reboot the target after all test completed successfully (default 1)
375# (ignored if POWEROFF_ON_SUCCESS is set)
376#REBOOT_ON_SUCCESS = 1
377
378# In case there are isses with rebooting, you can specify this
379# to always powercycle after this amount of time after calling
380# reboot.
381# Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
382# makes it powercycle immediately after rebooting. Do not define
383# it if you do not want it.
384# (default undefined)
385#POWERCYCLE_AFTER_REBOOT = 5
386
387# In case there's isses with halting, you can specify this
388# to always poweroff after this amount of time after calling
389# halt.
390# Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
391# makes it poweroff immediately after halting. Do not define
392# it if you do not want it.
393# (default undefined)
394#POWEROFF_AFTER_HALT = 20
395
396# A script or command to power off the box (default undefined)
397# Needed for POWEROFF_ON_ERROR and SUCCESS
398#
399# Example for digital loggers power switch:
400#POWER_OFF = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=OFF'
401#
402# Example for a virtual guest call "Guest".
403#POWER_OFF = virsh destroy Guest
404
405# The way to execute a command on the target
406# (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";)
407# The variables SSH_USER, MACHINE and SSH_COMMAND are defined
408#SSH_EXEC = ssh $SSH_USER@$MACHINE $SSH_COMMAND";
409
410# The way to copy a file to the target
411# (default scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE)
412# The variables SSH_USER, MACHINE, SRC_FILE and DST_FILE are defined.
413#SCP_TO_TARGET = scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE
414
415# The nice way to reboot the target
416# (default ssh $SSH_USER@$MACHINE reboot)
417# The variables SSH_USER and MACHINE are defined.
418#REBOOT = ssh $SSH_USER@$MACHINE reboot
419
420#### Per test run options ####
421# The following options are only allowed in TEST_START sections.
422# They are ignored in the DEFAULTS sections.
423#
424# All of these are optional and undefined by default, although
425# some of these options are required for TEST_TYPE of patchcheck
426# and bisect.
427#
428#
429# CHECKOUT = branch
430#
431# If the BUILD_DIR is a git repository, then you can set this option
432# to checkout the given branch before running the TEST. If you
433# specify this for the first run, that branch will be used for
434# all preceding tests until a new CHECKOUT is set.
435#
436#
437#
438# For TEST_TYPE = patchcheck
439#
440# This expects the BUILD_DIR to be a git repository, and
441# will checkout the PATCHCHECK_START commit.
442#
443# The option BUILD_TYPE will be ignored.
444#
445# The MIN_CONFIG will be used for all builds of the patchcheck. The build type
446# used for patchcheck is oldconfig.
447#
448# PATCHCHECK_START is required and is the first patch to
449# test (the SHA1 of the commit). You may also specify anything
450# that git checkout allows (branch name, tage, HEAD~3).
451#
452# PATCHCHECK_END is the last patch to check (default HEAD)
453#
454# PATCHCHECK_TYPE is required and is the type of test to run:
455# build, boot, test.
456#
457# Note, the build test will look for warnings, if a warning occurred
458# in a file that a commit touches, the build will fail.
459#
460# If BUILD_NOCLEAN is set, then make mrproper will not be run on
461# any of the builds, just like all other TEST_TYPE tests. But
462# what makes patchcheck different from the other tests, is if
463# BUILD_NOCLEAN is not set, only the first and last patch run
464# make mrproper. This helps speed up the test.
465#
466# Example:
467# TEST_START
468# TEST_TYPE = patchcheck
469# CHECKOUT = mybranch
470# PATCHCHECK_TYPE = boot
471# PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7
472# PATCHCHECK_END = HEAD~2
473#
474#
475#
476# For TEST_TYPE = bisect
477#
478# You can specify a git bisect if the BUILD_DIR is a git repository.
479# The MIN_CONFIG will be used for all builds of the bisect. The build type
480# used for bisecting is oldconfig.
481#
482# The option BUILD_TYPE will be ignored.
483#
484# BISECT_TYPE is the type of test to perform:
485# build - bad fails to build
486# boot - bad builds but fails to boot
487# test - bad boots but fails a test
488#
489# BISECT_GOOD is the commit (SHA1) to label as good (accepts all git good commit types)
490# BISECT_BAD is the commit to label as bad (accepts all git bad commit types)
491#
492# The above three options are required for a bisect operation.
493#
494# BISECT_REPLAY = /path/to/replay/file (optional, default undefined)
495#
496# If an operation failed in the bisect that was not expected to
497# fail. Then the test ends. The state of the BUILD_DIR will be
498# left off at where the failure occurred. You can examine the
499# reason for the failure, and perhaps even find a git commit
500# that would work to continue with. You can run:
501#
502# git bisect log > /path/to/replay/file
503#
504# The adding:
505#
506# BISECT_REPLAY= /path/to/replay/file
507#
508# And running the test again. The test will perform the initial
509# git bisect start, git bisect good, and git bisect bad, and
510# then it will run git bisect replay on this file, before
511# continuing with the bisect.
512#
513# BISECT_START = commit (optional, default undefined)
514#
515# As with BISECT_REPLAY, if the test failed on a commit that
516# just happen to have a bad commit in the middle of the bisect,
517# and you need to skip it. If BISECT_START is defined, it
518# will checkout that commit after doing the initial git bisect start,
519# git bisect good, git bisect bad, and running the git bisect replay
520# if the BISECT_REPLAY is set.
521#
522# BISECT_REVERSE = 1 (optional, default 0)
523#
524# In those strange instances where it was broken forever
525# and you are trying to find where it started to work!
526# Set BISECT_GOOD to the commit that was last known to fail
527# Set BISECT_BAD to the commit that is known to start working.
528# With BISECT_REVERSE = 1, The test will consider failures as
529# good, and success as bad.
530#
531# BISECT_CHECK = 1 (optional, default 0)
532#
533# Just to be sure the good is good and bad is bad, setting
534# BISECT_CHECK to 1 will start the bisect by first checking
535# out BISECT_BAD and makes sure it fails, then it will check
536# out BISECT_GOOD and makes sure it succeeds before starting
537# the bisect (it works for BISECT_REVERSE too).
538#
539# You can limit the test to just check BISECT_GOOD or
540# BISECT_BAD with BISECT_CHECK = good or
541# BISECT_CHECK = bad, respectively.
542#
543# Example:
544# TEST_START
545# TEST_TYPE = bisect
546# BISECT_GOOD = v2.6.36
547# BISECT_BAD = b5153163ed580e00c67bdfecb02b2e3843817b3e
548# BISECT_TYPE = build
549# MIN_CONFIG = /home/test/config-bisect
550#
551#
552#
553# For TEST_TYPE = config_bisect
554#
555# In those cases that you have two different configs. One of them
556# work, the other does not, and you do not know what config causes
557# the problem.
558# The TEST_TYPE config_bisect will bisect the bad config looking for
559# what config causes the failure.
560#
561# The way it works is this:
562#
563# First it finds a config to work with. Since a different version, or
564# MIN_CONFIG may cause different dependecies, it must run through this
565# preparation.
566#
567# Overwrites any config set in the bad config with a config set in
568# either the MIN_CONFIG or ADD_CONFIG. Thus, make sure these configs
569# are minimal and do not disable configs you want to test:
570# (ie. # CONFIG_FOO is not set).
571#
572# An oldconfig is run on the bad config and any new config that
573# appears will be added to the configs to test.
574#
575# Finally, it generates a config with the above result and runs it
576# again through make oldconfig to produce a config that should be
577# satisfied by kconfig.
578#
579# Then it starts the bisect.
580#
581# The configs to test are cut in half. If all the configs in this
582# half depend on a config in the other half, then the other half
583# is tested instead. If no configs are enabled by either half, then
584# this means a circular dependency exists and the test fails.
585#
586# A config is created with the test half, and the bisect test is run.
587#
588# If the bisect succeeds, then all configs in the generated config
589# are removed from the configs to test and added to the configs that
590# will be enabled for all builds (they will be enabled, but not be part
591# of the configs to examine).
592#
593# If the bisect fails, then all test configs that were not enabled by
594# the config file are removed from the test. These configs will not
595# be enabled in future tests. Since current config failed, we consider
596# this to be a subset of the config that we started with.
597#
598# When we are down to one config, it is considered the bad config.
599#
600# Note, the config chosen may not be the true bad config. Due to
601# dependencies and selections of the kbuild system, mulitple
602# configs may be needed to cause a failure. If you disable the
603# config that was found and restart the test, if the test fails
604# again, it is recommended to rerun the config_bisect with a new
605# bad config without the found config enabled.
606#
607# The option BUILD_TYPE will be ignored.
608#
609# CONFIG_BISECT_TYPE is the type of test to perform:
610# build - bad fails to build
611# boot - bad builds but fails to boot
612# test - bad boots but fails a test
613#
614# CONFIG_BISECT is the config that failed to boot
615#
616# Example:
617# TEST_START
618# TEST_TYPE = config_bisect
619# CONFIG_BISECT_TYPE = build
620# CONFIG_BISECT = /home/test/˘onfig-bad
621# MIN_CONFIG = /home/test/config-min
622#