aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c11
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/cp_intc.c11
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c9
-rw-r--r--arch/arm/mach-davinci/dm365.c1
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c2
-rw-r--r--arch/arm/mach-mx2/mxt_td60.c24
-rw-r--r--arch/arm/mach-mx25/clock.c2
-rw-r--r--arch/arm/mach-mx25/devices.c19
-rw-r--r--arch/arm/mach-mx25/devices.h1
-rw-r--r--arch/arm/mach-mx25/mx25pdk.c46
-rw-r--r--arch/arm/mach-mx3/Kconfig3
-rw-r--r--arch/arm/mach-mx3/mm.c5
-rw-r--r--arch/arm/mach-mx3/mx31ads.c5
-rw-r--r--arch/arm/mach-mx3/mx31lite.c9
-rw-r--r--arch/arm/mach-mx3/mx31moboard-devboard.c2
-rw-r--r--arch/arm/mach-mx3/mx31moboard-marxbot.c2
-rw-r--r--arch/arm/mach-mx3/mx31moboard.c7
-rw-r--r--arch/arm/mach-mx3/mx31pdk.c5
-rw-r--r--arch/arm/mach-mx3/pcm037.c32
-rw-r--r--arch/arm/mach-pxa/Kconfig2
-rw-r--r--arch/arm/mach-pxa/include/mach/zylonite.h7
-rw-r--r--arch/arm/mach-pxa/littleton.c6
-rw-r--r--arch/arm/mach-pxa/poodle.c2
-rw-r--r--arch/arm/mach-pxa/zeus.c36
-rw-r--r--arch/arm/mach-pxa/zylonite.c87
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa300.c12
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa320.c4
-rw-r--r--arch/arm/mach-w90x900/include/mach/system.h15
-rw-r--r--arch/arm/mach-w90x900/time.c64
-rw-r--r--arch/arm/mm/tlb-v7.S1
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx25.h58
-rw-r--r--arch/arm/plat-mxc/include/mach/mx25.h4
-rw-r--r--arch/arm/plat-pxa/pwm.c8
-rw-r--r--arch/blackfin/include/asm/page.h5
-rw-r--r--arch/frv/include/asm/page.h2
-rw-r--r--arch/ia64/include/asm/ftrace.h1
-rw-r--r--arch/ia64/include/asm/kprobes.h5
-rw-r--r--arch/ia64/include/asm/tlb.h2
-rw-r--r--arch/ia64/include/asm/topology.h4
-rw-r--r--arch/ia64/include/asm/types.h5
-rw-r--r--arch/ia64/kernel/mca.c5
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/mm/tlb.c32
-rw-r--r--arch/sparc/kernel/nmi.c3
-rw-r--r--arch/sparc/kernel/perf_event.c11
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/include/asm/uaccess_32.h5
-rw-r--r--arch/x86/include/asm/uaccess_64.h5
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/e820.c4
-rw-r--r--arch/x86/pci/intel_bus.c4
-rw-r--r--drivers/gpu/drm/ati_pcigart.c10
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c14
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c174
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h123
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c251
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c46
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c32
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c414
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c83
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c10
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/net/arm/Kconfig1
-rw-r--r--drivers/net/sky2.c1
-rw-r--r--drivers/pci/pci-sysfs.c6
-rw-r--r--drivers/pci/pci.c19
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c28
-rw-r--r--drivers/pci/pcie/portdrv_core.c16
-rw-r--r--drivers/pci/pcie/portdrv_pci.c17
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/sbus/char/bbc_envctrl.c64
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c4
-rwxr-xr-xdrivers/scsi/lpfc/lpfc_hbadisc.c25
-rwxr-xr-x[-rw-r--r--]drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c48
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/pmcraid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/stex.c5
-rw-r--r--drivers/video/pxafb.c4
-rw-r--r--fs/binfmt_elf_fdpic.c13
-rw-r--r--fs/exofs/inode.c17
-rw-r--r--fs/exofs/pnfs.h10
-rw-r--r--fs/nfs/dir.c1
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/ocfs2/file.c21
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/i915_drm.h54
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/pci.h1
-rw-r--r--kernel/module.c17
-rw-r--r--mm/nommu.c17
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c17
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c4
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c2
-rw-r--r--net/sunrpc/svc_xprt.c3
-rw-r--r--sound/soc/codecs/ac97.c6
-rw-r--r--sound/soc/codecs/wm8510.c14
-rw-r--r--sound/soc/codecs/wm8940.c14
-rw-r--r--sound/soc/codecs/wm8974.c14
-rw-r--r--sound/soc/sh/fsi-ak4642.c30
138 files changed, 1722 insertions, 1043 deletions
diff --git a/Makefile b/Makefile
index c628a5cfe422..12310947156e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 33 3SUBLEVEL = 33
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 233a222752c0..4f8760d7b7a7 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -688,6 +688,7 @@ config ARCH_DAVINCI
688 select HAVE_IDE 688 select HAVE_IDE
689 select COMMON_CLKDEV 689 select COMMON_CLKDEV
690 select GENERIC_ALLOCATOR 690 select GENERIC_ALLOCATOR
691 select ARCH_HAS_HOLES_MEMORYMODEL
691 help 692 help
692 Support for TI's DaVinci platform. 693 Support for TI's DaVinci platform.
693 694
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index a9b650dcc172..077ecf4fecda 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -236,6 +236,7 @@ static struct vpfe_subdev_info vpfe_sub_devs[] = {
236 236
237static struct vpfe_config vpfe_cfg = { 237static struct vpfe_config vpfe_cfg = {
238 .num_subdevs = ARRAY_SIZE(vpfe_sub_devs), 238 .num_subdevs = ARRAY_SIZE(vpfe_sub_devs),
239 .i2c_adapter_id = 1,
239 .sub_devs = vpfe_sub_devs, 240 .sub_devs = vpfe_sub_devs,
240 .card_name = "DM355 EVM", 241 .card_name = "DM355 EVM",
241 .ccdc = "DM355 CCDC", 242 .ccdc = "DM355 CCDC",
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 289fe1b7d25a..b476395d2cd4 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -192,7 +192,11 @@ static struct davinci_i2c_platform_data i2c_pdata = {
192 .bus_delay = 0 /* usec */, 192 .bus_delay = 0 /* usec */,
193}; 193};
194 194
195#ifdef CONFIG_KEYBOARD_DAVINCI 195static int dm365evm_keyscan_enable(struct device *dev)
196{
197 return davinci_cfg_reg(DM365_KEYSCAN);
198}
199
196static unsigned short dm365evm_keymap[] = { 200static unsigned short dm365evm_keymap[] = {
197 KEY_KP2, 201 KEY_KP2,
198 KEY_LEFT, 202 KEY_LEFT,
@@ -214,6 +218,7 @@ static unsigned short dm365evm_keymap[] = {
214}; 218};
215 219
216static struct davinci_ks_platform_data dm365evm_ks_data = { 220static struct davinci_ks_platform_data dm365evm_ks_data = {
221 .device_enable = dm365evm_keyscan_enable,
217 .keymap = dm365evm_keymap, 222 .keymap = dm365evm_keymap,
218 .keymapsize = ARRAY_SIZE(dm365evm_keymap), 223 .keymapsize = ARRAY_SIZE(dm365evm_keymap),
219 .rep = 1, 224 .rep = 1,
@@ -222,7 +227,6 @@ static struct davinci_ks_platform_data dm365evm_ks_data = {
222 .interval = 0x2, 227 .interval = 0x2,
223 .matrix_type = DAVINCI_KEYSCAN_MATRIX_4X4, 228 .matrix_type = DAVINCI_KEYSCAN_MATRIX_4X4,
224}; 229};
225#endif
226 230
227static int cpld_mmc_get_cd(int module) 231static int cpld_mmc_get_cd(int module)
228{ 232{
@@ -511,10 +515,7 @@ static __init void dm365_evm_init(void)
511 515
512 dm365_init_asp(&dm365_evm_snd_data); 516 dm365_init_asp(&dm365_evm_snd_data);
513 dm365_init_rtc(); 517 dm365_init_rtc();
514
515#ifdef CONFIG_KEYBOARD_DAVINCI
516 dm365_init_ks(&dm365evm_ks_data); 518 dm365_init_ks(&dm365evm_ks_data);
517#endif
518} 519}
519 520
520static __init void dm365_evm_irq_init(void) 521static __init void dm365_evm_irq_init(void)
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index fd0398bc6db3..e9612cf727b7 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -247,6 +247,7 @@ static struct vpfe_subdev_info vpfe_sub_devs[] = {
247 247
248static struct vpfe_config vpfe_cfg = { 248static struct vpfe_config vpfe_cfg = {
249 .num_subdevs = ARRAY_SIZE(vpfe_sub_devs), 249 .num_subdevs = ARRAY_SIZE(vpfe_sub_devs),
250 .i2c_adapter_id = 1,
250 .sub_devs = vpfe_sub_devs, 251 .sub_devs = vpfe_sub_devs,
251 .card_name = "DM6446 EVM", 252 .card_name = "DM6446 EVM",
252 .ccdc = "DM6446 CCDC", 253 .ccdc = "DM6446 CCDC",
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index 52b287cf3a42..37311d1830eb 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -81,12 +81,23 @@ static int cp_intc_set_irq_type(unsigned int irq, unsigned int flow_type)
81 return 0; 81 return 0;
82} 82}
83 83
84/*
85 * Faking this allows us to to work with suspend functions of
86 * generic drivers which call {enable|disable}_irq_wake for
87 * wake up interrupt sources (eg RTC on DA850).
88 */
89static int cp_intc_set_wake(unsigned int irq, unsigned int on)
90{
91 return 0;
92}
93
84static struct irq_chip cp_intc_irq_chip = { 94static struct irq_chip cp_intc_irq_chip = {
85 .name = "cp_intc", 95 .name = "cp_intc",
86 .ack = cp_intc_ack_irq, 96 .ack = cp_intc_ack_irq,
87 .mask = cp_intc_mask_irq, 97 .mask = cp_intc_mask_irq,
88 .unmask = cp_intc_unmask_irq, 98 .unmask = cp_intc_unmask_irq,
89 .set_type = cp_intc_set_irq_type, 99 .set_type = cp_intc_set_irq_type,
100 .set_wake = cp_intc_set_wake,
90}; 101};
91 102
92void __init cp_intc_init(void __iomem *base, unsigned short num_irq, 103void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index dd2d32c4ce86..a5105f03fd86 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -481,11 +481,18 @@ static struct platform_device da8xx_rtc_device = {
481 481
482int da8xx_register_rtc(void) 482int da8xx_register_rtc(void)
483{ 483{
484 int ret;
485
484 /* Unlock the rtc's registers */ 486 /* Unlock the rtc's registers */
485 __raw_writel(0x83e70b13, IO_ADDRESS(DA8XX_RTC_BASE + 0x6c)); 487 __raw_writel(0x83e70b13, IO_ADDRESS(DA8XX_RTC_BASE + 0x6c));
486 __raw_writel(0x95a4f1e0, IO_ADDRESS(DA8XX_RTC_BASE + 0x70)); 488 __raw_writel(0x95a4f1e0, IO_ADDRESS(DA8XX_RTC_BASE + 0x70));
487 489
488 return platform_device_register(&da8xx_rtc_device); 490 ret = platform_device_register(&da8xx_rtc_device);
491 if (!ret)
492 /* Atleast on DA850, RTC is a wakeup source */
493 device_init_wakeup(&da8xx_rtc_device.dev, true);
494
495 return ret;
489} 496}
490 497
491static struct resource da8xx_cpuidle_resources[] = { 498static struct resource da8xx_cpuidle_resources[] = {
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 2ec619ec1657..f53735cb922e 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -993,7 +993,6 @@ void __init dm365_init_asp(struct snd_platform_data *pdata)
993 993
994void __init dm365_init_ks(struct davinci_ks_platform_data *pdata) 994void __init dm365_init_ks(struct davinci_ks_platform_data *pdata)
995{ 995{
996 davinci_cfg_reg(DM365_KEYSCAN);
997 dm365_ks_device.dev.platform_data = pdata; 996 dm365_ks_device.dev.platform_data = pdata;
998 platform_device_register(&dm365_ks_device); 997 platform_device_register(&dm365_ks_device);
999} 998}
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 8f49b2b12608..b22dec4abf78 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -24,8 +24,6 @@
24 24
25#include "common.h" 25#include "common.h"
26 26
27#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
28
29static unsigned long ttc_dkb_pin_config[] __initdata = { 27static unsigned long ttc_dkb_pin_config[] __initdata = {
30 /* UART2 */ 28 /* UART2 */
31 GPIO47_UART2_RXD, 29 GPIO47_UART2_RXD,
diff --git a/arch/arm/mach-mx2/mxt_td60.c b/arch/arm/mach-mx2/mxt_td60.c
index 03dbbdc98955..8bcc1a5b8829 100644
--- a/arch/arm/mach-mx2/mxt_td60.c
+++ b/arch/arm/mach-mx2/mxt_td60.c
@@ -58,21 +58,6 @@ static unsigned int mxt_td60_pins[] __initdata = {
58 PE9_PF_UART3_RXD, 58 PE9_PF_UART3_RXD,
59 PE10_PF_UART3_CTS, 59 PE10_PF_UART3_CTS,
60 PE11_PF_UART3_RTS, 60 PE11_PF_UART3_RTS,
61 /* UART3 */
62 PB26_AF_UART4_RTS,
63 PB28_AF_UART4_TXD,
64 PB29_AF_UART4_CTS,
65 PB31_AF_UART4_RXD,
66 /* UART4 */
67 PB18_AF_UART5_TXD,
68 PB19_AF_UART5_RXD,
69 PB20_AF_UART5_CTS,
70 PB21_AF_UART5_RTS,
71 /* UART5 */
72 PB10_AF_UART6_TXD,
73 PB12_AF_UART6_CTS,
74 PB11_AF_UART6_RXD,
75 PB13_AF_UART6_RTS,
76 /* FEC */ 61 /* FEC */
77 PD0_AIN_FEC_TXD0, 62 PD0_AIN_FEC_TXD0,
78 PD1_AIN_FEC_TXD1, 63 PD1_AIN_FEC_TXD1,
@@ -261,12 +246,6 @@ static struct imxuart_platform_data uart_pdata[] = {
261 .flags = IMXUART_HAVE_RTSCTS, 246 .flags = IMXUART_HAVE_RTSCTS,
262 }, { 247 }, {
263 .flags = IMXUART_HAVE_RTSCTS, 248 .flags = IMXUART_HAVE_RTSCTS,
264 }, {
265 .flags = IMXUART_HAVE_RTSCTS,
266 }, {
267 .flags = IMXUART_HAVE_RTSCTS,
268 }, {
269 .flags = IMXUART_HAVE_RTSCTS,
270 }, 249 },
271}; 250};
272 251
@@ -278,9 +257,6 @@ static void __init mxt_td60_board_init(void)
278 mxc_register_device(&mxc_uart_device0, &uart_pdata[0]); 257 mxc_register_device(&mxc_uart_device0, &uart_pdata[0]);
279 mxc_register_device(&mxc_uart_device1, &uart_pdata[1]); 258 mxc_register_device(&mxc_uart_device1, &uart_pdata[1]);
280 mxc_register_device(&mxc_uart_device2, &uart_pdata[2]); 259 mxc_register_device(&mxc_uart_device2, &uart_pdata[2]);
281 mxc_register_device(&mxc_uart_device3, &uart_pdata[3]);
282 mxc_register_device(&mxc_uart_device4, &uart_pdata[4]);
283 mxc_register_device(&mxc_uart_device5, &uart_pdata[5]);
284 mxc_register_device(&mxc_nand_device, &mxt_td60_nand_board_info); 260 mxc_register_device(&mxc_nand_device, &mxt_td60_nand_board_info);
285 261
286 i2c_register_board_info(0, mxt_td60_i2c_devices, 262 i2c_register_board_info(0, mxt_td60_i2c_devices,
diff --git a/arch/arm/mach-mx25/clock.c b/arch/arm/mach-mx25/clock.c
index ef26951a5275..6e838b857712 100644
--- a/arch/arm/mach-mx25/clock.c
+++ b/arch/arm/mach-mx25/clock.c
@@ -173,6 +173,7 @@ DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL);
173DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL); 173DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL);
174DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL); 174DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL);
175DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL); 175DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL);
176DEFINE_CLOCK(fec_clk, 0, CCM_CGCR0, 23, get_rate_ipg, NULL);
176 177
177#define _REGISTER_CLOCK(d, n, c) \ 178#define _REGISTER_CLOCK(d, n, c) \
178 { \ 179 { \
@@ -204,6 +205,7 @@ static struct clk_lookup lookups[] = {
204 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk) 205 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
205 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk) 206 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk)
206 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk) 207 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk)
208 _REGISTER_CLOCK("fec.0", NULL, fec_clk)
207}; 209};
208 210
209int __init mx25_clocks_init(unsigned long fref) 211int __init mx25_clocks_init(unsigned long fref)
diff --git a/arch/arm/mach-mx25/devices.c b/arch/arm/mach-mx25/devices.c
index 63511de3a559..9fdeea1c083b 100644
--- a/arch/arm/mach-mx25/devices.c
+++ b/arch/arm/mach-mx25/devices.c
@@ -419,3 +419,22 @@ int __init mxc_register_gpios(void)
419 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports)); 419 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports));
420} 420}
421 421
422static struct resource mx25_fec_resources[] = {
423 {
424 .start = MX25_FEC_BASE_ADDR,
425 .end = MX25_FEC_BASE_ADDR + 0xfff,
426 .flags = IORESOURCE_MEM,
427 },
428 {
429 .start = MX25_INT_FEC,
430 .end = MX25_INT_FEC,
431 .flags = IORESOURCE_IRQ,
432 },
433};
434
435struct platform_device mx25_fec_device = {
436 .name = "fec",
437 .id = 0,
438 .num_resources = ARRAY_SIZE(mx25_fec_resources),
439 .resource = mx25_fec_resources,
440};
diff --git a/arch/arm/mach-mx25/devices.h b/arch/arm/mach-mx25/devices.h
index fe6bf88ad1dd..fe5420fcd11f 100644
--- a/arch/arm/mach-mx25/devices.h
+++ b/arch/arm/mach-mx25/devices.h
@@ -17,3 +17,4 @@ extern struct platform_device mxc_keypad_device;
17extern struct platform_device mxc_i2c_device0; 17extern struct platform_device mxc_i2c_device0;
18extern struct platform_device mxc_i2c_device1; 18extern struct platform_device mxc_i2c_device1;
19extern struct platform_device mxc_i2c_device2; 19extern struct platform_device mxc_i2c_device2;
20extern struct platform_device mx25_fec_device;
diff --git a/arch/arm/mach-mx25/mx25pdk.c b/arch/arm/mach-mx25/mx25pdk.c
index d23ae571c03f..921bc99ea231 100644
--- a/arch/arm/mach-mx25/mx25pdk.c
+++ b/arch/arm/mach-mx25/mx25pdk.c
@@ -18,10 +18,11 @@
18 18
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/delay.h>
21#include <linux/clk.h> 22#include <linux/clk.h>
22#include <linux/irq.h> 23#include <linux/irq.h>
23#include <linux/gpio.h> 24#include <linux/gpio.h>
24#include <linux/smsc911x.h> 25#include <linux/fec.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
26 27
27#include <mach/hardware.h> 28#include <mach/hardware.h>
@@ -35,16 +36,57 @@
35#include <mach/mx25.h> 36#include <mach/mx25.h>
36#include <mach/mxc_nand.h> 37#include <mach/mxc_nand.h>
37#include "devices.h" 38#include "devices.h"
38#include <mach/iomux-v3.h> 39#include <mach/iomux.h>
39 40
40static struct imxuart_platform_data uart_pdata = { 41static struct imxuart_platform_data uart_pdata = {
41 .flags = IMXUART_HAVE_RTSCTS, 42 .flags = IMXUART_HAVE_RTSCTS,
42}; 43};
43 44
45static struct pad_desc mx25pdk_pads[] = {
46 MX25_PAD_FEC_MDC__FEC_MDC,
47 MX25_PAD_FEC_MDIO__FEC_MDIO,
48 MX25_PAD_FEC_TDATA0__FEC_TDATA0,
49 MX25_PAD_FEC_TDATA1__FEC_TDATA1,
50 MX25_PAD_FEC_TX_EN__FEC_TX_EN,
51 MX25_PAD_FEC_RDATA0__FEC_RDATA0,
52 MX25_PAD_FEC_RDATA1__FEC_RDATA1,
53 MX25_PAD_FEC_RX_DV__FEC_RX_DV,
54 MX25_PAD_FEC_TX_CLK__FEC_TX_CLK,
55 MX25_PAD_A17__GPIO_2_3, /* FEC_EN, GPIO 35 */
56 MX25_PAD_D12__GPIO_4_8, /* FEC_RESET_B, GPIO 104 */
57};
58
59static struct fec_platform_data mx25_fec_pdata = {
60 .phy = PHY_INTERFACE_MODE_RMII,
61};
62
63#define FEC_ENABLE_GPIO 35
64#define FEC_RESET_B_GPIO 104
65
66static void __init mx25pdk_fec_reset(void)
67{
68 gpio_request(FEC_ENABLE_GPIO, "FEC PHY enable");
69 gpio_request(FEC_RESET_B_GPIO, "FEC PHY reset");
70
71 gpio_direction_output(FEC_ENABLE_GPIO, 0); /* drop PHY power */
72 gpio_direction_output(FEC_RESET_B_GPIO, 0); /* assert reset */
73 udelay(2);
74
75 /* turn on PHY power and lift reset */
76 gpio_set_value(FEC_ENABLE_GPIO, 1);
77 gpio_set_value(FEC_RESET_B_GPIO, 1);
78}
79
44static void __init mx25pdk_init(void) 80static void __init mx25pdk_init(void)
45{ 81{
82 mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads,
83 ARRAY_SIZE(mx25pdk_pads));
84
46 mxc_register_device(&mxc_uart_device0, &uart_pdata); 85 mxc_register_device(&mxc_uart_device0, &uart_pdata);
47 mxc_register_device(&mxc_usbh2, NULL); 86 mxc_register_device(&mxc_usbh2, NULL);
87
88 mx25pdk_fec_reset();
89 mxc_register_device(&mx25_fec_device, &mx25_fec_pdata);
48} 90}
49 91
50static void __init mx25pdk_timer_init(void) 92static void __init mx25pdk_timer_init(void)
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig
index ea8ed109a7c2..28294416b0af 100644
--- a/arch/arm/mach-mx3/Kconfig
+++ b/arch/arm/mach-mx3/Kconfig
@@ -49,6 +49,7 @@ config MACH_PCM037_EET
49config MACH_MX31LITE 49config MACH_MX31LITE
50 bool "Support MX31 LITEKIT (LogicPD)" 50 bool "Support MX31 LITEKIT (LogicPD)"
51 select ARCH_MX31 51 select ARCH_MX31
52 select MXC_ULPI if USB_ULPI
52 help 53 help
53 Include support for MX31 LITEKIT platform. This includes specific 54 Include support for MX31 LITEKIT platform. This includes specific
54 configurations for the board and its peripherals. 55 configurations for the board and its peripherals.
@@ -63,7 +64,7 @@ config MACH_MX31_3DS
63config MACH_MX31MOBOARD 64config MACH_MX31MOBOARD
64 bool "Support mx31moboard platforms (EPFL Mobots group)" 65 bool "Support mx31moboard platforms (EPFL Mobots group)"
65 select ARCH_MX31 66 select ARCH_MX31
66 select MXC_ULPI 67 select MXC_ULPI if USB_ULPI
67 help 68 help
68 Include support for mx31moboard platform. This includes specific 69 Include support for mx31moboard platform. This includes specific
69 configurations for the board and its peripherals. 70 configurations for the board and its peripherals.
diff --git a/arch/arm/mach-mx3/mm.c b/arch/arm/mach-mx3/mm.c
index bedf5b8d976a..6858a4f9806c 100644
--- a/arch/arm/mach-mx3/mm.c
+++ b/arch/arm/mach-mx3/mm.c
@@ -65,6 +65,11 @@ static struct map_desc mxc_io_desc[] __initdata = {
65 .pfn = __phys_to_pfn(AIPS2_BASE_ADDR), 65 .pfn = __phys_to_pfn(AIPS2_BASE_ADDR),
66 .length = AIPS2_SIZE, 66 .length = AIPS2_SIZE,
67 .type = MT_DEVICE_NONSHARED 67 .type = MT_DEVICE_NONSHARED
68 }, {
69 .virtual = SPBA0_BASE_ADDR_VIRT,
70 .pfn = __phys_to_pfn(SPBA0_BASE_ADDR),
71 .length = SPBA0_SIZE,
72 .type = MT_DEVICE_NONSHARED
68 }, 73 },
69}; 74};
70 75
diff --git a/arch/arm/mach-mx3/mx31ads.c b/arch/arm/mach-mx3/mx31ads.c
index 0497c152be18..3e7bafa2ddbb 100644
--- a/arch/arm/mach-mx3/mx31ads.c
+++ b/arch/arm/mach-mx3/mx31ads.c
@@ -494,11 +494,6 @@ static void mxc_init_i2c(void)
494 */ 494 */
495static struct map_desc mx31ads_io_desc[] __initdata = { 495static struct map_desc mx31ads_io_desc[] __initdata = {
496 { 496 {
497 .virtual = SPBA0_BASE_ADDR_VIRT,
498 .pfn = __phys_to_pfn(SPBA0_BASE_ADDR),
499 .length = SPBA0_SIZE,
500 .type = MT_DEVICE_NONSHARED
501 }, {
502 .virtual = CS4_BASE_ADDR_VIRT, 497 .virtual = CS4_BASE_ADDR_VIRT,
503 .pfn = __phys_to_pfn(CS4_BASE_ADDR), 498 .pfn = __phys_to_pfn(CS4_BASE_ADDR),
504 .length = CS4_SIZE / 2, 499 .length = CS4_SIZE / 2,
diff --git a/arch/arm/mach-mx3/mx31lite.c b/arch/arm/mach-mx3/mx31lite.c
index def6b6736594..789b20d1730f 100644
--- a/arch/arm/mach-mx3/mx31lite.c
+++ b/arch/arm/mach-mx3/mx31lite.c
@@ -135,6 +135,7 @@ static struct spi_board_info mc13783_spi_dev __initdata = {
135 * USB 135 * USB
136 */ 136 */
137 137
138#if defined(CONFIG_USB_ULPI)
138#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ 139#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
139 PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU) 140 PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
140 141
@@ -180,6 +181,7 @@ static struct mxc_usbh_platform_data usbh2_pdata = {
180 .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT, 181 .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
181 .flags = MXC_EHCI_POWER_PINS_ENABLED, 182 .flags = MXC_EHCI_POWER_PINS_ENABLED,
182}; 183};
184#endif
183 185
184/* 186/*
185 * NOR flash 187 * NOR flash
@@ -212,11 +214,6 @@ static struct platform_device physmap_flash_device = {
212 */ 214 */
213static struct map_desc mx31lite_io_desc[] __initdata = { 215static struct map_desc mx31lite_io_desc[] __initdata = {
214 { 216 {
215 .virtual = SPBA0_BASE_ADDR_VIRT,
216 .pfn = __phys_to_pfn(SPBA0_BASE_ADDR),
217 .length = SPBA0_SIZE,
218 .type = MT_DEVICE_NONSHARED
219 }, {
220 .virtual = CS4_BASE_ADDR_VIRT, 217 .virtual = CS4_BASE_ADDR_VIRT,
221 .pfn = __phys_to_pfn(CS4_BASE_ADDR), 218 .pfn = __phys_to_pfn(CS4_BASE_ADDR),
222 .length = CS4_SIZE, 219 .length = CS4_SIZE,
@@ -261,11 +258,13 @@ static void __init mxc_board_init(void)
261 mxc_register_device(&mxc_spi_device1, &spi1_pdata); 258 mxc_register_device(&mxc_spi_device1, &spi1_pdata);
262 spi_register_board_info(&mc13783_spi_dev, 1); 259 spi_register_board_info(&mc13783_spi_dev, 1);
263 260
261#if defined(CONFIG_USB_ULPI)
264 /* USB */ 262 /* USB */
265 usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, 263 usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
266 USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); 264 USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
267 265
268 mxc_register_device(&mxc_usbh2, &usbh2_pdata); 266 mxc_register_device(&mxc_usbh2, &usbh2_pdata);
267#endif
269 268
270 /* SMSC9117 IRQ pin */ 269 /* SMSC9117 IRQ pin */
271 ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_SFS6), "sms9117-irq"); 270 ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_SFS6), "sms9117-irq");
diff --git a/arch/arm/mach-mx3/mx31moboard-devboard.c b/arch/arm/mach-mx3/mx31moboard-devboard.c
index 8fc624f141cb..438428eaf769 100644
--- a/arch/arm/mach-mx3/mx31moboard-devboard.c
+++ b/arch/arm/mach-mx3/mx31moboard-devboard.c
@@ -179,7 +179,7 @@ static int __init devboard_usbh1_init(void)
179 179
180 usbh1_pdata.otg = otg; 180 usbh1_pdata.otg = otg;
181 181
182 return mxc_register_device(&mx31_usbh1, &usbh1_pdata); 182 return mxc_register_device(&mxc_usbh1, &usbh1_pdata);
183} 183}
184 184
185/* 185/*
diff --git a/arch/arm/mach-mx3/mx31moboard-marxbot.c b/arch/arm/mach-mx3/mx31moboard-marxbot.c
index 85184a35e674..1f44b9ccbb0f 100644
--- a/arch/arm/mach-mx3/mx31moboard-marxbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-marxbot.c
@@ -294,7 +294,7 @@ static int __init marxbot_usbh1_init(void)
294 294
295 usbh1_pdata.otg = otg; 295 usbh1_pdata.otg = otg;
296 296
297 return mxc_register_device(&mx31_usbh1, &usbh1_pdata); 297 return mxc_register_device(&mxc_usbh1, &usbh1_pdata);
298} 298}
299 299
300/* 300/*
diff --git a/arch/arm/mach-mx3/mx31moboard.c b/arch/arm/mach-mx3/mx31moboard.c
index b70529145936..cfd605d078ec 100644
--- a/arch/arm/mach-mx3/mx31moboard.c
+++ b/arch/arm/mach-mx3/mx31moboard.c
@@ -346,6 +346,8 @@ static struct fsl_usb2_platform_data usb_pdata = {
346 .phy_mode = FSL_USB2_PHY_ULPI, 346 .phy_mode = FSL_USB2_PHY_ULPI,
347}; 347};
348 348
349#if defined(CONFIG_USB_ULPI)
350
349#define USBH2_EN_B IOMUX_TO_GPIO(MX31_PIN_SCK6) 351#define USBH2_EN_B IOMUX_TO_GPIO(MX31_PIN_SCK6)
350 352
351static int moboard_usbh2_hw_init(struct platform_device *pdev) 353static int moboard_usbh2_hw_init(struct platform_device *pdev)
@@ -392,8 +394,11 @@ static int __init moboard_usbh2_init(void)
392 usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, 394 usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
393 USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); 395 USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
394 396
395 return mxc_register_device(&mx31_usbh2, &usbh2_pdata); 397 return mxc_register_device(&mxc_usbh2, &usbh2_pdata);
396} 398}
399#else
400static inline int moboard_usbh2_init(void) { return 0; }
401#endif
397 402
398 403
399static struct gpio_led mx31moboard_leds[] = { 404static struct gpio_led mx31moboard_leds[] = {
diff --git a/arch/arm/mach-mx3/mx31pdk.c b/arch/arm/mach-mx3/mx31pdk.c
index 0f7a2f06bc2d..18715f1aa7eb 100644
--- a/arch/arm/mach-mx3/mx31pdk.c
+++ b/arch/arm/mach-mx3/mx31pdk.c
@@ -211,11 +211,6 @@ static int __init mx31pdk_init_expio(void)
211 */ 211 */
212static struct map_desc mx31pdk_io_desc[] __initdata = { 212static struct map_desc mx31pdk_io_desc[] __initdata = {
213 { 213 {
214 .virtual = SPBA0_BASE_ADDR_VIRT,
215 .pfn = __phys_to_pfn(SPBA0_BASE_ADDR),
216 .length = SPBA0_SIZE,
217 .type = MT_DEVICE_NONSHARED,
218 }, {
219 .virtual = CS5_BASE_ADDR_VIRT, 214 .virtual = CS5_BASE_ADDR_VIRT,
220 .pfn = __phys_to_pfn(CS5_BASE_ADDR), 215 .pfn = __phys_to_pfn(CS5_BASE_ADDR),
221 .length = CS5_SIZE, 216 .length = CS5_SIZE,
diff --git a/arch/arm/mach-mx3/pcm037.c b/arch/arm/mach-mx3/pcm037.c
index 6cbaabedf386..5be396917c99 100644
--- a/arch/arm/mach-mx3/pcm037.c
+++ b/arch/arm/mach-mx3/pcm037.c
@@ -322,16 +322,25 @@ static int pcm037_camera_power(struct device *dev, int on)
322 return 0; 322 return 0;
323} 323}
324 324
325static struct i2c_board_info pcm037_i2c_2_devices[] = { 325static struct i2c_board_info pcm037_i2c_camera[] = {
326 { 326 {
327 I2C_BOARD_INFO("mt9t031", 0x5d), 327 I2C_BOARD_INFO("mt9t031", 0x5d),
328 }, {
329 I2C_BOARD_INFO("mt9v022", 0x48),
328 }, 330 },
329}; 331};
330 332
331static struct soc_camera_link iclink = { 333static struct soc_camera_link iclink_mt9v022 = {
334 .bus_id = 0, /* Must match with the camera ID */
335 .board_info = &pcm037_i2c_camera[1],
336 .i2c_adapter_id = 2,
337 .module_name = "mt9v022",
338};
339
340static struct soc_camera_link iclink_mt9t031 = {
332 .bus_id = 0, /* Must match with the camera ID */ 341 .bus_id = 0, /* Must match with the camera ID */
333 .power = pcm037_camera_power, 342 .power = pcm037_camera_power,
334 .board_info = &pcm037_i2c_2_devices[0], 343 .board_info = &pcm037_i2c_camera[0],
335 .i2c_adapter_id = 2, 344 .i2c_adapter_id = 2,
336 .module_name = "mt9t031", 345 .module_name = "mt9t031",
337}; 346};
@@ -345,11 +354,19 @@ static struct i2c_board_info pcm037_i2c_devices[] = {
345 } 354 }
346}; 355};
347 356
348static struct platform_device pcm037_camera = { 357static struct platform_device pcm037_mt9t031 = {
349 .name = "soc-camera-pdrv", 358 .name = "soc-camera-pdrv",
350 .id = 0, 359 .id = 0,
351 .dev = { 360 .dev = {
352 .platform_data = &iclink, 361 .platform_data = &iclink_mt9t031,
362 },
363};
364
365static struct platform_device pcm037_mt9v022 = {
366 .name = "soc-camera-pdrv",
367 .id = 1,
368 .dev = {
369 .platform_data = &iclink_mt9v022,
353 }, 370 },
354}; 371};
355 372
@@ -449,7 +466,8 @@ static int __init pcm037_camera_alloc_dma(const size_t buf_size)
449static struct platform_device *devices[] __initdata = { 466static struct platform_device *devices[] __initdata = {
450 &pcm037_flash, 467 &pcm037_flash,
451 &pcm037_sram_device, 468 &pcm037_sram_device,
452 &pcm037_camera, 469 &pcm037_mt9t031,
470 &pcm037_mt9v022,
453}; 471};
454 472
455static struct ipu_platform_data mx3_ipu_data = { 473static struct ipu_platform_data mx3_ipu_data = {
@@ -599,7 +617,7 @@ static void __init mxc_board_init(void)
599 if (!ret) 617 if (!ret)
600 gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), 1); 618 gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), 1);
601 else 619 else
602 iclink.power = NULL; 620 iclink_mt9t031.power = NULL;
603 621
604 if (!pcm037_camera_alloc_dma(4 * 1024 * 1024)) 622 if (!pcm037_camera_alloc_dma(4 * 1024 * 1024))
605 mxc_register_device(&mx3_camera, &camera_pdata); 623 mxc_register_device(&mx3_camera, &camera_pdata);
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 8a0837ea0294..dee92182749b 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -37,6 +37,8 @@ config MACH_ZYLONITE320
37config MACH_LITTLETON 37config MACH_LITTLETON
38 bool "PXA3xx Form Factor Platform (aka Littleton)" 38 bool "PXA3xx Form Factor Platform (aka Littleton)"
39 select PXA3xx 39 select PXA3xx
40 select CPU_PXA300
41 select CPU_PXA310
40 select PXA_SSP 42 select PXA_SSP
41 43
42config MACH_TAVOREVB 44config MACH_TAVOREVB
diff --git a/arch/arm/mach-pxa/include/mach/zylonite.h b/arch/arm/mach-pxa/include/mach/zylonite.h
index bf6785adccf4..9edf645368d6 100644
--- a/arch/arm/mach-pxa/include/mach/zylonite.h
+++ b/arch/arm/mach-pxa/include/mach/zylonite.h
@@ -8,13 +8,6 @@
8/* the following variables are processor specific and initialized 8/* the following variables are processor specific and initialized
9 * by the corresponding zylonite_pxa3xx_init() 9 * by the corresponding zylonite_pxa3xx_init()
10 */ 10 */
11struct platform_mmc_slot {
12 int gpio_cd;
13 int gpio_wp;
14};
15
16extern struct platform_mmc_slot zylonite_mmc_slot[];
17
18extern int gpio_eth_irq; 11extern int gpio_eth_irq;
19extern int gpio_debug_led1; 12extern int gpio_debug_led1;
20extern int gpio_debug_led2; 13extern int gpio_debug_led2;
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index f28c1715b910..fa527b258d61 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -110,6 +110,12 @@ static mfp_cfg_t littleton_mfp_cfg[] __initdata = {
110 GPIO7_MMC1_CLK, 110 GPIO7_MMC1_CLK,
111 GPIO8_MMC1_CMD, 111 GPIO8_MMC1_CMD,
112 GPIO15_GPIO, /* card detect */ 112 GPIO15_GPIO, /* card detect */
113
114 /* UART3 */
115 GPIO107_UART3_CTS,
116 GPIO108_UART3_RTS,
117 GPIO109_UART3_TXD,
118 GPIO110_UART3_RXD,
113}; 119};
114 120
115static struct resource smc91x_resources[] = { 121static struct resource smc91x_resources[] = {
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index e5eeb3a62d01..c2b938a4d5c9 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -293,7 +293,7 @@ static struct pxamci_platform_data poodle_mci_platform_data = {
293 .init = poodle_mci_init, 293 .init = poodle_mci_init,
294 .setpower = poodle_mci_setpower, 294 .setpower = poodle_mci_setpower,
295 .exit = poodle_mci_exit, 295 .exit = poodle_mci_exit,
296 .gpio_card_detect = POODLE_IRQ_GPIO_nSD_DETECT, 296 .gpio_card_detect = POODLE_GPIO_nSD_DETECT,
297 .gpio_card_ro = POODLE_GPIO_nSD_WP, 297 .gpio_card_ro = POODLE_GPIO_nSD_WP,
298 .gpio_power = -1, 298 .gpio_power = -1,
299}; 299};
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 5b986a8bd9e6..75f2a37f945d 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -25,6 +25,7 @@
25#include <linux/mtd/physmap.h> 25#include <linux/mtd/physmap.h>
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/i2c/pca953x.h> 27#include <linux/i2c/pca953x.h>
28#include <linux/apm-emulation.h>
28 29
29#include <asm/mach-types.h> 30#include <asm/mach-types.h>
30#include <asm/mach/arch.h> 31#include <asm/mach/arch.h>
@@ -626,8 +627,27 @@ static void zeus_power_off(void)
626 pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP); 627 pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP);
627} 628}
628 629
629int zeus_get_pcb_info(struct i2c_client *client, unsigned gpio, 630#ifdef CONFIG_APM_EMULATION
630 unsigned ngpio, void *context) 631static void zeus_get_power_status(struct apm_power_info *info)
632{
633 /* Power supply is always present */
634 info->ac_line_status = APM_AC_ONLINE;
635 info->battery_status = APM_BATTERY_STATUS_NOT_PRESENT;
636 info->battery_flag = APM_BATTERY_FLAG_NOT_PRESENT;
637}
638
639static inline void zeus_setup_apm(void)
640{
641 apm_get_power_status = zeus_get_power_status;
642}
643#else
644static inline void zeus_setup_apm(void)
645{
646}
647#endif
648
649static int zeus_get_pcb_info(struct i2c_client *client, unsigned gpio,
650 unsigned ngpio, void *context)
631{ 651{
632 int i; 652 int i;
633 u8 pcb_info = 0; 653 u8 pcb_info = 0;
@@ -726,9 +746,18 @@ static mfp_cfg_t zeus_pin_config[] __initdata = {
726 GPIO99_GPIO, /* CF RDY */ 746 GPIO99_GPIO, /* CF RDY */
727}; 747};
728 748
749/*
750 * DM9k MSCx settings: SRAM, 16 bits
751 * 17 cycles delay first access
752 * 5 cycles delay next access
753 * 13 cycles recovery time
754 * faster device
755 */
756#define DM9K_MSC_VALUE 0xe4c9
757
729static void __init zeus_init(void) 758static void __init zeus_init(void)
730{ 759{
731 u16 dm9000_msc = 0xe279; 760 u16 dm9000_msc = DM9K_MSC_VALUE;
732 761
733 system_rev = __raw_readw(ZEUS_CPLD_VERSION); 762 system_rev = __raw_readw(ZEUS_CPLD_VERSION);
734 pr_info("Zeus CPLD V%dI%d\n", (system_rev & 0xf0) >> 4, (system_rev & 0x0f)); 763 pr_info("Zeus CPLD V%dI%d\n", (system_rev & 0xf0) >> 4, (system_rev & 0x0f));
@@ -738,6 +767,7 @@ static void __init zeus_init(void)
738 MSC1 = (MSC1 & 0xffff0000) | dm9000_msc; 767 MSC1 = (MSC1 & 0xffff0000) | dm9000_msc;
739 768
740 pm_power_off = zeus_power_off; 769 pm_power_off = zeus_power_off;
770 zeus_setup_apm();
741 771
742 pxa2xx_mfp_config(ARRAY_AND_SIZE(zeus_pin_config)); 772 pxa2xx_mfp_config(ARRAY_AND_SIZE(zeus_pin_config));
743 773
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index b66e9e2d06e7..2b4043c04d0c 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -36,9 +36,6 @@
36#include "devices.h" 36#include "devices.h"
37#include "generic.h" 37#include "generic.h"
38 38
39#define MAX_SLOTS 3
40struct platform_mmc_slot zylonite_mmc_slot[MAX_SLOTS];
41
42int gpio_eth_irq; 39int gpio_eth_irq;
43int gpio_debug_led1; 40int gpio_debug_led1;
44int gpio_debug_led2; 41int gpio_debug_led2;
@@ -220,84 +217,28 @@ static inline void zylonite_init_lcd(void) {}
220#endif 217#endif
221 218
222#if defined(CONFIG_MMC) 219#if defined(CONFIG_MMC)
223static int zylonite_mci_ro(struct device *dev)
224{
225 struct platform_device *pdev = to_platform_device(dev);
226
227 return gpio_get_value(zylonite_mmc_slot[pdev->id].gpio_wp);
228}
229
230static int zylonite_mci_init(struct device *dev,
231 irq_handler_t zylonite_detect_int,
232 void *data)
233{
234 struct platform_device *pdev = to_platform_device(dev);
235 int err, cd_irq, gpio_cd, gpio_wp;
236
237 cd_irq = gpio_to_irq(zylonite_mmc_slot[pdev->id].gpio_cd);
238 gpio_cd = zylonite_mmc_slot[pdev->id].gpio_cd;
239 gpio_wp = zylonite_mmc_slot[pdev->id].gpio_wp;
240
241 /*
242 * setup GPIO for Zylonite MMC controller
243 */
244 err = gpio_request(gpio_cd, "mmc card detect");
245 if (err)
246 goto err_request_cd;
247 gpio_direction_input(gpio_cd);
248
249 err = gpio_request(gpio_wp, "mmc write protect");
250 if (err)
251 goto err_request_wp;
252 gpio_direction_input(gpio_wp);
253
254 err = request_irq(cd_irq, zylonite_detect_int,
255 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
256 "MMC card detect", data);
257 if (err) {
258 printk(KERN_ERR "%s: MMC/SD/SDIO: "
259 "can't request card detect IRQ\n", __func__);
260 goto err_request_irq;
261 }
262
263 return 0;
264
265err_request_irq:
266 gpio_free(gpio_wp);
267err_request_wp:
268 gpio_free(gpio_cd);
269err_request_cd:
270 return err;
271}
272
273static void zylonite_mci_exit(struct device *dev, void *data)
274{
275 struct platform_device *pdev = to_platform_device(dev);
276 int cd_irq, gpio_cd, gpio_wp;
277
278 cd_irq = gpio_to_irq(zylonite_mmc_slot[pdev->id].gpio_cd);
279 gpio_cd = zylonite_mmc_slot[pdev->id].gpio_cd;
280 gpio_wp = zylonite_mmc_slot[pdev->id].gpio_wp;
281
282 free_irq(cd_irq, data);
283 gpio_free(gpio_cd);
284 gpio_free(gpio_wp);
285}
286
287static struct pxamci_platform_data zylonite_mci_platform_data = { 220static struct pxamci_platform_data zylonite_mci_platform_data = {
288 .detect_delay = 20, 221 .detect_delay = 20,
289 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, 222 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
290 .init = zylonite_mci_init, 223 .gpio_card_detect = EXT_GPIO(0),
291 .exit = zylonite_mci_exit, 224 .gpio_card_ro = EXT_GPIO(2),
292 .get_ro = zylonite_mci_ro,
293 .gpio_card_detect = -1,
294 .gpio_card_ro = -1,
295 .gpio_power = -1, 225 .gpio_power = -1,
296}; 226};
297 227
298static struct pxamci_platform_data zylonite_mci2_platform_data = { 228static struct pxamci_platform_data zylonite_mci2_platform_data = {
299 .detect_delay = 20, 229 .detect_delay = 20,
300 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, 230 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
231 .gpio_card_detect = EXT_GPIO(1),
232 .gpio_card_ro = EXT_GPIO(3),
233 .gpio_power = -1,
234};
235
236static struct pxamci_platform_data zylonite_mci3_platform_data = {
237 .detect_delay = 20,
238 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
239 .gpio_card_detect = EXT_GPIO(30),
240 .gpio_card_ro = EXT_GPIO(31),
241 .gpio_power = -1,
301}; 242};
302 243
303static void __init zylonite_init_mmc(void) 244static void __init zylonite_init_mmc(void)
@@ -305,7 +246,7 @@ static void __init zylonite_init_mmc(void)
305 pxa_set_mci_info(&zylonite_mci_platform_data); 246 pxa_set_mci_info(&zylonite_mci_platform_data);
306 pxa3xx_set_mci2_info(&zylonite_mci2_platform_data); 247 pxa3xx_set_mci2_info(&zylonite_mci2_platform_data);
307 if (cpu_is_pxa310()) 248 if (cpu_is_pxa310())
308 pxa3xx_set_mci3_info(&zylonite_mci_platform_data); 249 pxa3xx_set_mci3_info(&zylonite_mci3_platform_data);
309} 250}
310#else 251#else
311static inline void zylonite_init_mmc(void) {} 252static inline void zylonite_init_mmc(void) {}
diff --git a/arch/arm/mach-pxa/zylonite_pxa300.c b/arch/arm/mach-pxa/zylonite_pxa300.c
index 84095440a878..3aa73b3e33f2 100644
--- a/arch/arm/mach-pxa/zylonite_pxa300.c
+++ b/arch/arm/mach-pxa/zylonite_pxa300.c
@@ -129,8 +129,8 @@ static mfp_cfg_t common_mfp_cfg[] __initdata = {
129 GPIO22_I2C_SDA, 129 GPIO22_I2C_SDA,
130 130
131 /* GPIO */ 131 /* GPIO */
132 GPIO18_GPIO, /* GPIO Expander #0 INT_N */ 132 GPIO18_GPIO | MFP_PULL_HIGH, /* GPIO Expander #0 INT_N */
133 GPIO19_GPIO, /* GPIO Expander #1 INT_N */ 133 GPIO19_GPIO | MFP_PULL_HIGH, /* GPIO Expander #1 INT_N */
134}; 134};
135 135
136static mfp_cfg_t pxa300_mfp_cfg[] __initdata = { 136static mfp_cfg_t pxa300_mfp_cfg[] __initdata = {
@@ -258,10 +258,6 @@ void __init zylonite_pxa300_init(void)
258 /* detect LCD panel */ 258 /* detect LCD panel */
259 zylonite_detect_lcd_panel(); 259 zylonite_detect_lcd_panel();
260 260
261 /* MMC card detect & write protect for controller 0 */
262 zylonite_mmc_slot[0].gpio_cd = EXT_GPIO(0);
263 zylonite_mmc_slot[0].gpio_wp = EXT_GPIO(2);
264
265 /* WM9713 IRQ */ 261 /* WM9713 IRQ */
266 wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO26); 262 wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO26);
267 263
@@ -276,10 +272,6 @@ void __init zylonite_pxa300_init(void)
276 if (cpu_is_pxa310()) { 272 if (cpu_is_pxa310()) {
277 pxa3xx_mfp_config(ARRAY_AND_SIZE(pxa310_mfp_cfg)); 273 pxa3xx_mfp_config(ARRAY_AND_SIZE(pxa310_mfp_cfg));
278 gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO102); 274 gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO102);
279
280 /* MMC card detect & write protect for controller 2 */
281 zylonite_mmc_slot[2].gpio_cd = EXT_GPIO(30);
282 zylonite_mmc_slot[2].gpio_wp = EXT_GPIO(31);
283 } 275 }
284 276
285 /* GPIOs for Debug LEDs */ 277 /* GPIOs for Debug LEDs */
diff --git a/arch/arm/mach-pxa/zylonite_pxa320.c b/arch/arm/mach-pxa/zylonite_pxa320.c
index 60d08f23f5e4..9942bac4cf7d 100644
--- a/arch/arm/mach-pxa/zylonite_pxa320.c
+++ b/arch/arm/mach-pxa/zylonite_pxa320.c
@@ -209,10 +209,6 @@ void __init zylonite_pxa320_init(void)
209 gpio_debug_led1 = mfp_to_gpio(MFP_PIN_GPIO1_2); 209 gpio_debug_led1 = mfp_to_gpio(MFP_PIN_GPIO1_2);
210 gpio_debug_led2 = mfp_to_gpio(MFP_PIN_GPIO4_2); 210 gpio_debug_led2 = mfp_to_gpio(MFP_PIN_GPIO4_2);
211 211
212 /* MMC card detect & write protect for controller 0 */
213 zylonite_mmc_slot[0].gpio_cd = mfp_to_gpio(MFP_PIN_GPIO1);
214 zylonite_mmc_slot[0].gpio_wp = mfp_to_gpio(MFP_PIN_GPIO5);
215
216 /* WM9713 IRQ */ 212 /* WM9713 IRQ */
217 wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO15); 213 wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO15);
218 } 214 }
diff --git a/arch/arm/mach-w90x900/include/mach/system.h b/arch/arm/mach-w90x900/include/mach/system.h
index 940640066857..ce228bdc66dd 100644
--- a/arch/arm/mach-w90x900/include/mach/system.h
+++ b/arch/arm/mach-w90x900/include/mach/system.h
@@ -15,7 +15,15 @@
15 * 15 *
16 */ 16 */
17 17
18#include <linux/io.h>
18#include <asm/proc-fns.h> 19#include <asm/proc-fns.h>
20#include <mach/map.h>
21#include <mach/regs-timer.h>
22
23#define WTCR (TMR_BA + 0x1C)
24#define WTCLK (1 << 10)
25#define WTE (1 << 7)
26#define WTRE (1 << 1)
19 27
20static void arch_idle(void) 28static void arch_idle(void)
21{ 29{
@@ -23,6 +31,11 @@ static void arch_idle(void)
23 31
24static void arch_reset(char mode, const char *cmd) 32static void arch_reset(char mode, const char *cmd)
25{ 33{
26 cpu_reset(0); 34 if (mode == 's') {
35 /* Jump into ROM at address 0 */
36 cpu_reset(0);
37 } else {
38 __raw_writel(WTE | WTRE | WTCLK, WTCR);
39 }
27} 40}
28 41
diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c
index 4128af870b41..b80f769bc135 100644
--- a/arch/arm/mach-w90x900/time.c
+++ b/arch/arm/mach-w90x900/time.c
@@ -42,7 +42,10 @@
42#define TICKS_PER_SEC 100 42#define TICKS_PER_SEC 100
43#define PRESCALE 0x63 /* Divider = prescale + 1 */ 43#define PRESCALE 0x63 /* Divider = prescale + 1 */
44 44
45unsigned int timer0_load; 45#define TDR_SHIFT 24
46#define TDR_MASK ((1 << TDR_SHIFT) - 1)
47
48static unsigned int timer0_load;
46 49
47static void nuc900_clockevent_setmode(enum clock_event_mode mode, 50static void nuc900_clockevent_setmode(enum clock_event_mode mode,
48 struct clock_event_device *clk) 51 struct clock_event_device *clk)
@@ -88,7 +91,7 @@ static int nuc900_clockevent_setnextevent(unsigned long evt,
88static struct clock_event_device nuc900_clockevent_device = { 91static struct clock_event_device nuc900_clockevent_device = {
89 .name = "nuc900-timer0", 92 .name = "nuc900-timer0",
90 .shift = 32, 93 .shift = 32,
91 .features = CLOCK_EVT_MODE_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 94 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
92 .set_mode = nuc900_clockevent_setmode, 95 .set_mode = nuc900_clockevent_setmode,
93 .set_next_event = nuc900_clockevent_setnextevent, 96 .set_next_event = nuc900_clockevent_setnextevent,
94 .rating = 300, 97 .rating = 300,
@@ -112,8 +115,23 @@ static struct irqaction nuc900_timer0_irq = {
112 .handler = nuc900_timer0_interrupt, 115 .handler = nuc900_timer0_interrupt,
113}; 116};
114 117
115static void __init nuc900_clockevents_init(unsigned int rate) 118static void __init nuc900_clockevents_init(void)
116{ 119{
120 unsigned int rate;
121 struct clk *clk = clk_get(NULL, "timer0");
122
123 BUG_ON(IS_ERR(clk));
124
125 __raw_writel(0x00, REG_TCSR0);
126
127 clk_enable(clk);
128 rate = clk_get_rate(clk) / (PRESCALE + 1);
129
130 timer0_load = (rate / TICKS_PER_SEC);
131
132 __raw_writel(RESETINT, REG_TISR);
133 setup_irq(IRQ_TIMER0, &nuc900_timer0_irq);
134
117 nuc900_clockevent_device.mult = div_sc(rate, NSEC_PER_SEC, 135 nuc900_clockevent_device.mult = div_sc(rate, NSEC_PER_SEC,
118 nuc900_clockevent_device.shift); 136 nuc900_clockevent_device.shift);
119 nuc900_clockevent_device.max_delta_ns = clockevent_delta2ns(0xffffffff, 137 nuc900_clockevent_device.max_delta_ns = clockevent_delta2ns(0xffffffff,
@@ -127,26 +145,35 @@ static void __init nuc900_clockevents_init(unsigned int rate)
127 145
128static cycle_t nuc900_get_cycles(struct clocksource *cs) 146static cycle_t nuc900_get_cycles(struct clocksource *cs)
129{ 147{
130 return ~__raw_readl(REG_TDR1); 148 return (~__raw_readl(REG_TDR1)) & TDR_MASK;
131} 149}
132 150
133static struct clocksource clocksource_nuc900 = { 151static struct clocksource clocksource_nuc900 = {
134 .name = "nuc900-timer1", 152 .name = "nuc900-timer1",
135 .rating = 200, 153 .rating = 200,
136 .read = nuc900_get_cycles, 154 .read = nuc900_get_cycles,
137 .mask = CLOCKSOURCE_MASK(32), 155 .mask = CLOCKSOURCE_MASK(TDR_SHIFT),
138 .shift = 20, 156 .shift = 10,
139 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 157 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
140}; 158};
141 159
142static void __init nuc900_clocksource_init(unsigned int rate) 160static void __init nuc900_clocksource_init(void)
143{ 161{
144 unsigned int val; 162 unsigned int val;
163 unsigned int rate;
164 struct clk *clk = clk_get(NULL, "timer1");
165
166 BUG_ON(IS_ERR(clk));
167
168 __raw_writel(0x00, REG_TCSR1);
169
170 clk_enable(clk);
171 rate = clk_get_rate(clk) / (PRESCALE + 1);
145 172
146 __raw_writel(0xffffffff, REG_TICR1); 173 __raw_writel(0xffffffff, REG_TICR1);
147 174
148 val = __raw_readl(REG_TCSR1); 175 val = __raw_readl(REG_TCSR1);
149 val |= (COUNTEN | PERIOD); 176 val |= (COUNTEN | PERIOD | PRESCALE);
150 __raw_writel(val, REG_TCSR1); 177 __raw_writel(val, REG_TCSR1);
151 178
152 clocksource_nuc900.mult = 179 clocksource_nuc900.mult =
@@ -156,25 +183,8 @@ static void __init nuc900_clocksource_init(unsigned int rate)
156 183
157static void __init nuc900_timer_init(void) 184static void __init nuc900_timer_init(void)
158{ 185{
159 struct clk *ck_ext = clk_get(NULL, "ext"); 186 nuc900_clocksource_init();
160 unsigned int rate; 187 nuc900_clockevents_init();
161
162 BUG_ON(IS_ERR(ck_ext));
163
164 rate = clk_get_rate(ck_ext);
165 clk_put(ck_ext);
166 rate = rate / (PRESCALE + 0x01);
167
168 /* set a known state */
169 __raw_writel(0x00, REG_TCSR0);
170 __raw_writel(0x00, REG_TCSR1);
171 __raw_writel(RESETINT, REG_TISR);
172 timer0_load = (rate / TICKS_PER_SEC);
173
174 setup_irq(IRQ_TIMER0, &nuc900_timer0_irq);
175
176 nuc900_clocksource_init(rate);
177 nuc900_clockevents_init(rate);
178} 188}
179 189
180struct sys_timer nuc900_timer = { 190struct sys_timer nuc900_timer = {
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index a26a605b73bd..0cb1848bd876 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -40,7 +40,6 @@ ENTRY(v7wbi_flush_user_tlb_range)
40 asid r3, r3 @ mask ASID 40 asid r3, r3 @ mask ASID
41 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA 41 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
42 mov r1, r1, lsl #PAGE_SHIFT 42 mov r1, r1, lsl #PAGE_SHIFT
43 vma_vm_flags r2, r2 @ get vma->vm_flags
441: 431:
45#ifdef CONFIG_SMP 44#ifdef CONFIG_SMP
46 mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) 45 mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx25.h b/arch/arm/plat-mxc/include/mach/iomux-mx25.h
index 810c47f56e77..9af494f0ab3d 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx25.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx25.h
@@ -58,19 +58,19 @@
58 58
59#define MX25_PAD_A18__A18 IOMUX_PAD(0x23c, 0x020, 0x10, 0, 0, NO_PAD_CTRL) 59#define MX25_PAD_A18__A18 IOMUX_PAD(0x23c, 0x020, 0x10, 0, 0, NO_PAD_CTRL)
60#define MX25_PAD_A18__GPIO_2_4 IOMUX_PAD(0x23c, 0x020, 0x15, 0, 0, NO_PAD_CTRL) 60#define MX25_PAD_A18__GPIO_2_4 IOMUX_PAD(0x23c, 0x020, 0x15, 0, 0, NO_PAD_CTRL)
61#define MX25_PAD_A18__FEC_COL IOMUX_PAD(0x23c, 0x020, 0x17, 0x504, 0, NO_PAD_CTL) 61#define MX25_PAD_A18__FEC_COL IOMUX_PAD(0x23c, 0x020, 0x17, 0x504, 0, NO_PAD_CTRL)
62 62
63#define MX25_PAD_A19__A19 IOMUX_PAD(0x240, 0x024, 0x10, 0, 0, NO_PAD_CTRL) 63#define MX25_PAD_A19__A19 IOMUX_PAD(0x240, 0x024, 0x10, 0, 0, NO_PAD_CTRL)
64#define MX25_PAD_A19__FEC_RX_ER IOMUX_PAD(0x240, 0x024, 0x17, 0x518, 0, NO_PAD_CTL) 64#define MX25_PAD_A19__FEC_RX_ER IOMUX_PAD(0x240, 0x024, 0x17, 0x518, 0, NO_PAD_CTRL)
65#define MX25_PAD_A19__GPIO_2_5 IOMUX_PAD(0x240, 0x024, 0x15, 0, 0, NO_PAD_CTRL) 65#define MX25_PAD_A19__GPIO_2_5 IOMUX_PAD(0x240, 0x024, 0x15, 0, 0, NO_PAD_CTRL)
66 66
67#define MX25_PAD_A20__A20 IOMUX_PAD(0x244, 0x028, 0x10, 0, 0, NO_PAD_CTRL) 67#define MX25_PAD_A20__A20 IOMUX_PAD(0x244, 0x028, 0x10, 0, 0, NO_PAD_CTRL)
68#define MX25_PAD_A20__GPIO_2_6 IOMUX_PAD(0x244, 0x028, 0x15, 0, 0, NO_PAD_CTRL) 68#define MX25_PAD_A20__GPIO_2_6 IOMUX_PAD(0x244, 0x028, 0x15, 0, 0, NO_PAD_CTRL)
69#define MX25_PAD_A20__FEC_RDATA2 IOMUX_PAD(0x244, 0x028, 0x17, 0x50c, 0, NO_PAD_CTL) 69#define MX25_PAD_A20__FEC_RDATA2 IOMUX_PAD(0x244, 0x028, 0x17, 0x50c, 0, NO_PAD_CTRL)
70 70
71#define MX25_PAD_A21__A21 IOMUX_PAD(0x248, 0x02c, 0x10, 0, 0, NO_PAD_CTRL) 71#define MX25_PAD_A21__A21 IOMUX_PAD(0x248, 0x02c, 0x10, 0, 0, NO_PAD_CTRL)
72#define MX25_PAD_A21__GPIO_2_7 IOMUX_PAD(0x248, 0x02c, 0x15, 0, 0, NO_PAD_CTRL) 72#define MX25_PAD_A21__GPIO_2_7 IOMUX_PAD(0x248, 0x02c, 0x15, 0, 0, NO_PAD_CTRL)
73#define MX25_PAD_A21__FEC_RDATA3 IOMUX_PAD(0x248, 0x02c, 0x17, 0x510, 0, NO_PAD_CTL) 73#define MX25_PAD_A21__FEC_RDATA3 IOMUX_PAD(0x248, 0x02c, 0x17, 0x510, 0, NO_PAD_CTRL)
74 74
75#define MX25_PAD_A22__A22 IOMUX_PAD(0x000, 0x030, 0x10, 0, 0, NO_PAD_CTRL) 75#define MX25_PAD_A22__A22 IOMUX_PAD(0x000, 0x030, 0x10, 0, 0, NO_PAD_CTRL)
76#define MX25_PAD_A22__GPIO_2_8 IOMUX_PAD(0x000, 0x030, 0x15, 0, 0, NO_PAD_CTRL) 76#define MX25_PAD_A22__GPIO_2_8 IOMUX_PAD(0x000, 0x030, 0x15, 0, 0, NO_PAD_CTRL)
@@ -80,11 +80,11 @@
80 80
81#define MX25_PAD_A24__A24 IOMUX_PAD(0x250, 0x038, 0x10, 0, 0, NO_PAD_CTRL) 81#define MX25_PAD_A24__A24 IOMUX_PAD(0x250, 0x038, 0x10, 0, 0, NO_PAD_CTRL)
82#define MX25_PAD_A24__GPIO_2_10 IOMUX_PAD(0x250, 0x038, 0x15, 0, 0, NO_PAD_CTRL) 82#define MX25_PAD_A24__GPIO_2_10 IOMUX_PAD(0x250, 0x038, 0x15, 0, 0, NO_PAD_CTRL)
83#define MX25_PAD_A24__FEC_RX_CLK IOMUX_PAD(0x250, 0x038, 0x17, 0x514, 0, NO_PAD_CTL) 83#define MX25_PAD_A24__FEC_RX_CLK IOMUX_PAD(0x250, 0x038, 0x17, 0x514, 0, NO_PAD_CTRL)
84 84
85#define MX25_PAD_A25__A25 IOMUX_PAD(0x254, 0x03c, 0x10, 0, 0, NO_PAD_CTRL) 85#define MX25_PAD_A25__A25 IOMUX_PAD(0x254, 0x03c, 0x10, 0, 0, NO_PAD_CTRL)
86#define MX25_PAD_A25__GPIO_2_11 IOMUX_PAD(0x254, 0x03c, 0x15, 0, 0, NO_PAD_CTRL) 86#define MX25_PAD_A25__GPIO_2_11 IOMUX_PAD(0x254, 0x03c, 0x15, 0, 0, NO_PAD_CTRL)
87#define MX25_PAD_A25__FEC_CRS IOMUX_PAD(0x254, 0x03c, 0x17, 0x508, 0, NO_PAD_CTL) 87#define MX25_PAD_A25__FEC_CRS IOMUX_PAD(0x254, 0x03c, 0x17, 0x508, 0, NO_PAD_CTRL)
88 88
89#define MX25_PAD_EB0__EB0 IOMUX_PAD(0x258, 0x040, 0x10, 0, 0, NO_PAD_CTRL) 89#define MX25_PAD_EB0__EB0 IOMUX_PAD(0x258, 0x040, 0x10, 0, 0, NO_PAD_CTRL)
90#define MX25_PAD_EB0__AUD4_TXD IOMUX_PAD(0x258, 0x040, 0x14, 0x464, 0, NO_PAD_CTRL) 90#define MX25_PAD_EB0__AUD4_TXD IOMUX_PAD(0x258, 0x040, 0x14, 0x464, 0, NO_PAD_CTRL)
@@ -112,7 +112,7 @@
112#define MX25_PAD_CS5__UART5_RTS IOMUX_PAD(0x268, 0x058, 0x13, 0x574, 0, NO_PAD_CTRL) 112#define MX25_PAD_CS5__UART5_RTS IOMUX_PAD(0x268, 0x058, 0x13, 0x574, 0, NO_PAD_CTRL)
113#define MX25_PAD_CS5__GPIO_3_21 IOMUX_PAD(0x268, 0x058, 0x15, 0, 0, NO_PAD_CTRL) 113#define MX25_PAD_CS5__GPIO_3_21 IOMUX_PAD(0x268, 0x058, 0x15, 0, 0, NO_PAD_CTRL)
114 114
115#define MX25_PAD_NF_CE0__NF_CE0 IOMUX_PAD(0x26c, 0x05c, 0x10, 0, 0, NO_PAD_CTL) 115#define MX25_PAD_NF_CE0__NF_CE0 IOMUX_PAD(0x26c, 0x05c, 0x10, 0, 0, NO_PAD_CTRL)
116#define MX25_PAD_NF_CE0__GPIO_3_22 IOMUX_PAD(0x26c, 0x05c, 0x15, 0, 0, NO_PAD_CTRL) 116#define MX25_PAD_NF_CE0__GPIO_3_22 IOMUX_PAD(0x26c, 0x05c, 0x15, 0, 0, NO_PAD_CTRL)
117 117
118#define MX25_PAD_ECB__ECB IOMUX_PAD(0x270, 0x060, 0x10, 0, 0, NO_PAD_CTRL) 118#define MX25_PAD_ECB__ECB IOMUX_PAD(0x270, 0x060, 0x10, 0, 0, NO_PAD_CTRL)
@@ -229,28 +229,28 @@
229#define MX25_PAD_LD7__GPIO_1_21 IOMUX_PAD(0x2dc, 0x0e4, 0x15, 0, 0, NO_PAD_CTRL) 229#define MX25_PAD_LD7__GPIO_1_21 IOMUX_PAD(0x2dc, 0x0e4, 0x15, 0, 0, NO_PAD_CTRL)
230 230
231#define MX25_PAD_LD8__LD8 IOMUX_PAD(0x2e0, 0x0e8, 0x10, 0, 0, NO_PAD_CTRL) 231#define MX25_PAD_LD8__LD8 IOMUX_PAD(0x2e0, 0x0e8, 0x10, 0, 0, NO_PAD_CTRL)
232#define MX25_PAD_LD8__FEC_TX_ERR IOMUX_PAD(0x2e0, 0x0e8, 0x15, 0, 0, NO_PAD_CTL) 232#define MX25_PAD_LD8__FEC_TX_ERR IOMUX_PAD(0x2e0, 0x0e8, 0x15, 0, 0, NO_PAD_CTRL)
233 233
234#define MX25_PAD_LD9__LD9 IOMUX_PAD(0x2e4, 0x0ec, 0x10, 0, 0, NO_PAD_CTRL) 234#define MX25_PAD_LD9__LD9 IOMUX_PAD(0x2e4, 0x0ec, 0x10, 0, 0, NO_PAD_CTRL)
235#define MX25_PAD_LD9__FEC_COL IOMUX_PAD(0x2e4, 0x0ec, 0x15, 0x504, 1, NO_PAD_CTL) 235#define MX25_PAD_LD9__FEC_COL IOMUX_PAD(0x2e4, 0x0ec, 0x15, 0x504, 1, NO_PAD_CTRL)
236 236
237#define MX25_PAD_LD10__LD10 IOMUX_PAD(0x2e8, 0x0f0, 0x10, 0, 0, NO_PAD_CTRL) 237#define MX25_PAD_LD10__LD10 IOMUX_PAD(0x2e8, 0x0f0, 0x10, 0, 0, NO_PAD_CTRL)
238#define MX25_PAD_LD10__FEC_RX_ER IOMUX_PAD(0x2e8, 0x0f0, 0x15, 0x518, 1, NO_PAD_CTL) 238#define MX25_PAD_LD10__FEC_RX_ER IOMUX_PAD(0x2e8, 0x0f0, 0x15, 0x518, 1, NO_PAD_CTRL)
239 239
240#define MX25_PAD_LD11__LD11 IOMUX_PAD(0x2ec, 0x0f4, 0x10, 0, 0, NO_PAD_CTRL) 240#define MX25_PAD_LD11__LD11 IOMUX_PAD(0x2ec, 0x0f4, 0x10, 0, 0, NO_PAD_CTRL)
241#define MX25_PAD_LD11__FEC_RDATA2 IOMUX_PAD(0x2ec, 0x0f4, 0x15, 0x50c, 1, NO_PAD_CTL) 241#define MX25_PAD_LD11__FEC_RDATA2 IOMUX_PAD(0x2ec, 0x0f4, 0x15, 0x50c, 1, NO_PAD_CTRL)
242 242
243#define MX25_PAD_LD12__LD12 IOMUX_PAD(0x2f0, 0x0f8, 0x10, 0, 0, NO_PAD_CTRL) 243#define MX25_PAD_LD12__LD12 IOMUX_PAD(0x2f0, 0x0f8, 0x10, 0, 0, NO_PAD_CTRL)
244#define MX25_PAD_LD12__FEC_RDATA3 IOMUX_PAD(0x2f0, 0x0f8, 0x15, 0x510, 1, NO_PAD_CTL) 244#define MX25_PAD_LD12__FEC_RDATA3 IOMUX_PAD(0x2f0, 0x0f8, 0x15, 0x510, 1, NO_PAD_CTRL)
245 245
246#define MX25_PAD_LD13__LD13 IOMUX_PAD(0x2f4, 0x0fc, 0x10, 0, 0, NO_PAD_CTRL) 246#define MX25_PAD_LD13__LD13 IOMUX_PAD(0x2f4, 0x0fc, 0x10, 0, 0, NO_PAD_CTRL)
247#define MX25_PAD_LD13__FEC_TDATA2 IOMUX_PAD(0x2f4, 0x0fc, 0x15, 0, 0, NO_PAD_CTL) 247#define MX25_PAD_LD13__FEC_TDATA2 IOMUX_PAD(0x2f4, 0x0fc, 0x15, 0, 0, NO_PAD_CTRL)
248 248
249#define MX25_PAD_LD14__LD14 IOMUX_PAD(0x2f8, 0x100, 0x10, 0, 0, NO_PAD_CTRL) 249#define MX25_PAD_LD14__LD14 IOMUX_PAD(0x2f8, 0x100, 0x10, 0, 0, NO_PAD_CTRL)
250#define MX25_PAD_LD14__FEC_TDATA3 IOMUX_PAD(0x2f8, 0x100, 0x15, 0, 0, NO_PAD_CTL) 250#define MX25_PAD_LD14__FEC_TDATA3 IOMUX_PAD(0x2f8, 0x100, 0x15, 0, 0, NO_PAD_CTRL)
251 251
252#define MX25_PAD_LD15__LD15 IOMUX_PAD(0x2fc, 0x104, 0x10, 0, 0, NO_PAD_CTRL) 252#define MX25_PAD_LD15__LD15 IOMUX_PAD(0x2fc, 0x104, 0x10, 0, 0, NO_PAD_CTRL)
253#define MX25_PAD_LD15__FEC_RX_CLK IOMUX_PAD(0x2fc, 0x104, 0x15, 0x514, 1, NO_PAD_CTL) 253#define MX25_PAD_LD15__FEC_RX_CLK IOMUX_PAD(0x2fc, 0x104, 0x15, 0x514, 1, NO_PAD_CTRL)
254 254
255#define MX25_PAD_HSYNC__HSYNC IOMUX_PAD(0x300, 0x108, 0x10, 0, 0, NO_PAD_CTRL) 255#define MX25_PAD_HSYNC__HSYNC IOMUX_PAD(0x300, 0x108, 0x10, 0, 0, NO_PAD_CTRL)
256#define MX25_PAD_HSYNC__GPIO_1_22 IOMUX_PAD(0x300, 0x108, 0x15, 0, 0, NO_PAD_CTRL) 256#define MX25_PAD_HSYNC__GPIO_1_22 IOMUX_PAD(0x300, 0x108, 0x15, 0, 0, NO_PAD_CTRL)
@@ -265,7 +265,7 @@
265#define MX25_PAD_OE_ACD__GPIO_1_25 IOMUX_PAD(0x30c, 0x114, 0x15, 0, 0, NO_PAD_CTRL) 265#define MX25_PAD_OE_ACD__GPIO_1_25 IOMUX_PAD(0x30c, 0x114, 0x15, 0, 0, NO_PAD_CTRL)
266 266
267#define MX25_PAD_CONTRAST__CONTRAST IOMUX_PAD(0x310, 0x118, 0x10, 0, 0, NO_PAD_CTRL) 267#define MX25_PAD_CONTRAST__CONTRAST IOMUX_PAD(0x310, 0x118, 0x10, 0, 0, NO_PAD_CTRL)
268#define MX25_PAD_CONTRAST__FEC_CRS IOMUX_PAD(0x310, 0x118, 0x15, 0x508, 1, NO_PAD_CTL) 268#define MX25_PAD_CONTRAST__FEC_CRS IOMUX_PAD(0x310, 0x118, 0x15, 0x508, 1, NO_PAD_CTRL)
269 269
270#define MX25_PAD_PWM__PWM IOMUX_PAD(0x314, 0x11c, 0x10, 0, 0, NO_PAD_CTRL) 270#define MX25_PAD_PWM__PWM IOMUX_PAD(0x314, 0x11c, 0x10, 0, 0, NO_PAD_CTRL)
271#define MX25_PAD_PWM__GPIO_1_26 IOMUX_PAD(0x314, 0x11c, 0x15, 0, 0, NO_PAD_CTRL) 271#define MX25_PAD_PWM__GPIO_1_26 IOMUX_PAD(0x314, 0x11c, 0x15, 0, 0, NO_PAD_CTRL)
@@ -354,19 +354,19 @@
354#define MX25_PAD_UART2_TXD__GPIO_4_27 IOMUX_PAD(0x37c, 0x184, 0x15, 0, 0, NO_PAD_CTRL) 354#define MX25_PAD_UART2_TXD__GPIO_4_27 IOMUX_PAD(0x37c, 0x184, 0x15, 0, 0, NO_PAD_CTRL)
355 355
356#define MX25_PAD_UART2_RTS__UART2_RTS IOMUX_PAD(0x380, 0x188, 0x10, 0, 0, NO_PAD_CTRL) 356#define MX25_PAD_UART2_RTS__UART2_RTS IOMUX_PAD(0x380, 0x188, 0x10, 0, 0, NO_PAD_CTRL)
357#define MX25_PAD_UART2_RTS__FEC_COL IOMUX_PAD(0x380, 0x188, 0x12, 0x504, 2, NO_PAD_CTL) 357#define MX25_PAD_UART2_RTS__FEC_COL IOMUX_PAD(0x380, 0x188, 0x12, 0x504, 2, NO_PAD_CTRL)
358#define MX25_PAD_UART2_RTS__GPIO_4_28 IOMUX_PAD(0x380, 0x188, 0x15, 0, 0, NO_PAD_CTRL) 358#define MX25_PAD_UART2_RTS__GPIO_4_28 IOMUX_PAD(0x380, 0x188, 0x15, 0, 0, NO_PAD_CTRL)
359 359
360#define MX25_PAD_UART2_CTS__FEC_RX_ER IOMUX_PAD(0x384, 0x18c, 0x12, 0x518, 2, NO_PAD_CTL) 360#define MX25_PAD_UART2_CTS__FEC_RX_ER IOMUX_PAD(0x384, 0x18c, 0x12, 0x518, 2, NO_PAD_CTRL)
361#define MX25_PAD_UART2_CTS__UART2_CTS IOMUX_PAD(0x384, 0x18c, 0x10, 0, 0, NO_PAD_CTRL) 361#define MX25_PAD_UART2_CTS__UART2_CTS IOMUX_PAD(0x384, 0x18c, 0x10, 0, 0, NO_PAD_CTRL)
362#define MX25_PAD_UART2_CTS__GPIO_4_29 IOMUX_PAD(0x384, 0x18c, 0x15, 0, 0, NO_PAD_CTRL) 362#define MX25_PAD_UART2_CTS__GPIO_4_29 IOMUX_PAD(0x384, 0x18c, 0x15, 0, 0, NO_PAD_CTRL)
363 363
364#define MX25_PAD_SD1_CMD__SD1_CMD IOMUX_PAD(0x388, 0x190, 0x10, 0, 0, PAD_CTL_PUS_47K_UP) 364#define MX25_PAD_SD1_CMD__SD1_CMD IOMUX_PAD(0x388, 0x190, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
365#define MX25_PAD_SD1_CMD__FEC_RDATA2 IOMUX_PAD(0x388, 0x190, 0x12, 0x50c, 2, NO_PAD_CTL) 365#define MX25_PAD_SD1_CMD__FEC_RDATA2 IOMUX_PAD(0x388, 0x190, 0x12, 0x50c, 2, NO_PAD_CTRL)
366#define MX25_PAD_SD1_CMD__GPIO_2_23 IOMUX_PAD(0x388, 0x190, 0x15, 0, 0, NO_PAD_CTRL) 366#define MX25_PAD_SD1_CMD__GPIO_2_23 IOMUX_PAD(0x388, 0x190, 0x15, 0, 0, NO_PAD_CTRL)
367 367
368#define MX25_PAD_SD1_CLK__SD1_CLK IOMUX_PAD(0x38c, 0x194, 0x10, 0, 0, PAD_CTL_PUS_47K_UP) 368#define MX25_PAD_SD1_CLK__SD1_CLK IOMUX_PAD(0x38c, 0x194, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
369#define MX25_PAD_SD1_CLK__FEC_RDATA3 IOMUX_PAD(0x38c, 0x194, 0x12, 0x510, 2, NO_PAD_CTL) 369#define MX25_PAD_SD1_CLK__FEC_RDATA3 IOMUX_PAD(0x38c, 0x194, 0x12, 0x510, 2, NO_PAD_CTRL)
370#define MX25_PAD_SD1_CLK__GPIO_2_24 IOMUX_PAD(0x38c, 0x194, 0x15, 0, 0, NO_PAD_CTRL) 370#define MX25_PAD_SD1_CLK__GPIO_2_24 IOMUX_PAD(0x38c, 0x194, 0x15, 0, 0, NO_PAD_CTRL)
371 371
372#define MX25_PAD_SD1_DATA0__SD1_DATA0 IOMUX_PAD(0x390, 0x198, 0x10, 0, 0, PAD_CTL_PUS_47K_UP) 372#define MX25_PAD_SD1_DATA0__SD1_DATA0 IOMUX_PAD(0x390, 0x198, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
@@ -377,11 +377,11 @@
377#define MX25_PAD_SD1_DATA1__GPIO_2_26 IOMUX_PAD(0x394, 0x19c, 0x15, 0, 0, NO_PAD_CTRL) 377#define MX25_PAD_SD1_DATA1__GPIO_2_26 IOMUX_PAD(0x394, 0x19c, 0x15, 0, 0, NO_PAD_CTRL)
378 378
379#define MX25_PAD_SD1_DATA2__SD1_DATA2 IOMUX_PAD(0x398, 0x1a0, 0x10, 0, 0, PAD_CTL_PUS_47K_UP) 379#define MX25_PAD_SD1_DATA2__SD1_DATA2 IOMUX_PAD(0x398, 0x1a0, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
380#define MX25_PAD_SD1_DATA2__FEC_RX_CLK IOMUX_PAD(0x398, 0x1a0, 0x15, 0x514, 2, NO_PAD_CTL) 380#define MX25_PAD_SD1_DATA2__FEC_RX_CLK IOMUX_PAD(0x398, 0x1a0, 0x15, 0x514, 2, NO_PAD_CTRL)
381#define MX25_PAD_SD1_DATA2__GPIO_2_27 IOMUX_PAD(0x398, 0x1a0, 0x15, 0, 0, NO_PAD_CTRL) 381#define MX25_PAD_SD1_DATA2__GPIO_2_27 IOMUX_PAD(0x398, 0x1a0, 0x15, 0, 0, NO_PAD_CTRL)
382 382
383#define MX25_PAD_SD1_DATA3__SD1_DATA3 IOMUX_PAD(0x39c, 0x1a4, 0x10, 0, 0, PAD_CTL_PUS_47K_UP) 383#define MX25_PAD_SD1_DATA3__SD1_DATA3 IOMUX_PAD(0x39c, 0x1a4, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
384#define MX25_PAD_SD1_DATA3__FEC_CRS IOMUX_PAD(0x39c, 0x1a4, 0x10, 0x508, 2, NO_PAD_CTL) 384#define MX25_PAD_SD1_DATA3__FEC_CRS IOMUX_PAD(0x39c, 0x1a4, 0x10, 0x508, 2, NO_PAD_CTRL)
385#define MX25_PAD_SD1_DATA3__GPIO_2_28 IOMUX_PAD(0x39c, 0x1a4, 0x15, 0, 0, NO_PAD_CTRL) 385#define MX25_PAD_SD1_DATA3__GPIO_2_28 IOMUX_PAD(0x39c, 0x1a4, 0x15, 0, 0, NO_PAD_CTRL)
386 386
387#define MX25_PAD_KPP_ROW0__KPP_ROW0 IOMUX_PAD(0x3a0, 0x1a8, 0x10, 0, 0, PAD_CTL_PKE) 387#define MX25_PAD_KPP_ROW0__KPP_ROW0 IOMUX_PAD(0x3a0, 0x1a8, 0x10, 0, 0, PAD_CTL_PKE)
@@ -410,7 +410,7 @@
410#define MX25_PAD_KPP_COL3__KPP_COL3 IOMUX_PAD(0x3bc, 0x1c4, 0x10, 0, 0, PAD_CTL_PKE | PAD_CTL_ODE) 410#define MX25_PAD_KPP_COL3__KPP_COL3 IOMUX_PAD(0x3bc, 0x1c4, 0x10, 0, 0, PAD_CTL_PKE | PAD_CTL_ODE)
411#define MX25_PAD_KPP_COL3__GPIO_3_4 IOMUX_PAD(0x3bc, 0x1c4, 0x15, 0, 0, NO_PAD_CTRL) 411#define MX25_PAD_KPP_COL3__GPIO_3_4 IOMUX_PAD(0x3bc, 0x1c4, 0x15, 0, 0, NO_PAD_CTRL)
412 412
413#define MX25_PAD_FEC_MDC__FEC_MDC IOMUX_PAD(0x3c0, 0x1c8, 0x10, 0, 0, NO_PAD_CTL) 413#define MX25_PAD_FEC_MDC__FEC_MDC IOMUX_PAD(0x3c0, 0x1c8, 0x10, 0, 0, NO_PAD_CTRL)
414#define MX25_PAD_FEC_MDC__AUD4_TXD IOMUX_PAD(0x3c0, 0x1c8, 0x12, 0x464, 1, NO_PAD_CTRL) 414#define MX25_PAD_FEC_MDC__AUD4_TXD IOMUX_PAD(0x3c0, 0x1c8, 0x12, 0x464, 1, NO_PAD_CTRL)
415#define MX25_PAD_FEC_MDC__GPIO_3_5 IOMUX_PAD(0x3c0, 0x1c8, 0x15, 0, 0, NO_PAD_CTRL) 415#define MX25_PAD_FEC_MDC__GPIO_3_5 IOMUX_PAD(0x3c0, 0x1c8, 0x15, 0, 0, NO_PAD_CTRL)
416 416
@@ -418,23 +418,23 @@
418#define MX25_PAD_FEC_MDIO__AUD4_RXD IOMUX_PAD(0x3c4, 0x1cc, 0x12, 0x460, 1, NO_PAD_CTRL) 418#define MX25_PAD_FEC_MDIO__AUD4_RXD IOMUX_PAD(0x3c4, 0x1cc, 0x12, 0x460, 1, NO_PAD_CTRL)
419#define MX25_PAD_FEC_MDIO__GPIO_3_6 IOMUX_PAD(0x3c4, 0x1cc, 0x15, 0, 0, NO_PAD_CTRL) 419#define MX25_PAD_FEC_MDIO__GPIO_3_6 IOMUX_PAD(0x3c4, 0x1cc, 0x15, 0, 0, NO_PAD_CTRL)
420 420
421#define MX25_PAD_FEC_TDATA0__FEC_TDATA0 IOMUX_PAD(0x3c8, 0x1d0, 0x10, 0, 0, NO_PAD_CTL) 421#define MX25_PAD_FEC_TDATA0__FEC_TDATA0 IOMUX_PAD(0x3c8, 0x1d0, 0x10, 0, 0, NO_PAD_CTRL)
422#define MX25_PAD_FEC_TDATA0__GPIO_3_7 IOMUX_PAD(0x3c8, 0x1d0, 0x15, 0, 0, NO_PAD_CTRL) 422#define MX25_PAD_FEC_TDATA0__GPIO_3_7 IOMUX_PAD(0x3c8, 0x1d0, 0x15, 0, 0, NO_PAD_CTRL)
423 423
424#define MX25_PAD_FEC_TDATA1__FEC_TDATA1 IOMUX_PAD(0x3cc, 0x1d4, 0x10, 0, 0, NO_PAD_CTL) 424#define MX25_PAD_FEC_TDATA1__FEC_TDATA1 IOMUX_PAD(0x3cc, 0x1d4, 0x10, 0, 0, NO_PAD_CTRL)
425#define MX25_PAD_FEC_TDATA1__AUD4_TXFS IOMUX_PAD(0x3cc, 0x1d4, 0x12, 0x474, 1, NO_PAD_CTRL) 425#define MX25_PAD_FEC_TDATA1__AUD4_TXFS IOMUX_PAD(0x3cc, 0x1d4, 0x12, 0x474, 1, NO_PAD_CTRL)
426#define MX25_PAD_FEC_TDATA1__GPIO_3_8 IOMUX_PAD(0x3cc, 0x1d4, 0x15, 0, 0, NO_PAD_CTRL) 426#define MX25_PAD_FEC_TDATA1__GPIO_3_8 IOMUX_PAD(0x3cc, 0x1d4, 0x15, 0, 0, NO_PAD_CTRL)
427 427
428#define MX25_PAD_FEC_TX_EN__FEC_TX_EN IOMUX_PAD(0x3d0, 0x1d8, 0x10, 0, 0, NO_PAD_CTL) 428#define MX25_PAD_FEC_TX_EN__FEC_TX_EN IOMUX_PAD(0x3d0, 0x1d8, 0x10, 0, 0, NO_PAD_CTRL)
429#define MX25_PAD_FEC_TX_EN__GPIO_3_9 IOMUX_PAD(0x3d0, 0x1d8, 0x15, 0, 0, NO_PAD_CTRL) 429#define MX25_PAD_FEC_TX_EN__GPIO_3_9 IOMUX_PAD(0x3d0, 0x1d8, 0x15, 0, 0, NO_PAD_CTRL)
430 430
431#define MX25_PAD_FEC_RDATA0__FEC_RDATA0 IOMUX_PAD(0x3d4, 0x1dc, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTL) 431#define MX25_PAD_FEC_RDATA0__FEC_RDATA0 IOMUX_PAD(0x3d4, 0x1dc, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTRL)
432#define MX25_PAD_FEC_RDATA0__GPIO_3_10 IOMUX_PAD(0x3d4, 0x1dc, 0x15, 0, 0, NO_PAD_CTRL) 432#define MX25_PAD_FEC_RDATA0__GPIO_3_10 IOMUX_PAD(0x3d4, 0x1dc, 0x15, 0, 0, NO_PAD_CTRL)
433 433
434#define MX25_PAD_FEC_RDATA1__FEC_RDATA1 IOMUX_PAD(0x3d8, 0x1e0, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTL) 434#define MX25_PAD_FEC_RDATA1__FEC_RDATA1 IOMUX_PAD(0x3d8, 0x1e0, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTRL)
435#define MX25_PAD_FEC_RDATA1__GPIO_3_11 IOMUX_PAD(0x3d8, 0x1e0, 0x15, 0, 0, NO_PAD_CTRL) 435#define MX25_PAD_FEC_RDATA1__GPIO_3_11 IOMUX_PAD(0x3d8, 0x1e0, 0x15, 0, 0, NO_PAD_CTRL)
436 436
437#define MX25_PAD_FEC_RX_DV__FEC_RX_DV IOMUX_PAD(0x3dc, 0x1e4, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTL) 437#define MX25_PAD_FEC_RX_DV__FEC_RX_DV IOMUX_PAD(0x3dc, 0x1e4, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTRL)
438#define MX25_PAD_FEC_RX_DV__CAN2_RX IOMUX_PAD(0x3dc, 0x1e4, 0x14, 0x484, 0, PAD_CTL_PUS_22K_UP) 438#define MX25_PAD_FEC_RX_DV__CAN2_RX IOMUX_PAD(0x3dc, 0x1e4, 0x14, 0x484, 0, PAD_CTL_PUS_22K_UP)
439#define MX25_PAD_FEC_RX_DV__GPIO_3_12 IOMUX_PAD(0x3dc, 0x1e4, 0x15, 0, 0, NO_PAD_CTRL) 439#define MX25_PAD_FEC_RX_DV__GPIO_3_12 IOMUX_PAD(0x3dc, 0x1e4, 0x15, 0, 0, NO_PAD_CTRL)
440 440
diff --git a/arch/arm/plat-mxc/include/mach/mx25.h b/arch/arm/plat-mxc/include/mach/mx25.h
index 91e738144804..854e2dc58481 100644
--- a/arch/arm/plat-mxc/include/mach/mx25.h
+++ b/arch/arm/plat-mxc/include/mach/mx25.h
@@ -41,4 +41,8 @@
41#define UART1_BASE_ADDR 0x43f90000 41#define UART1_BASE_ADDR 0x43f90000
42#define UART2_BASE_ADDR 0x43f94000 42#define UART2_BASE_ADDR 0x43f94000
43 43
44#define MX25_FEC_BASE_ADDR 0x50038000
45
46#define MX25_INT_FEC 57
47
44#endif /* __MACH_MX25_H__ */ 48#endif /* __MACH_MX25_H__ */
diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c
index a9eabdcfa163..51dc5c8106c0 100644
--- a/arch/arm/plat-pxa/pwm.c
+++ b/arch/arm/plat-pxa/pwm.c
@@ -204,14 +204,14 @@ static int __devinit pwm_probe(struct platform_device *pdev)
204 goto err_free_clk; 204 goto err_free_clk;
205 } 205 }
206 206
207 r = request_mem_region(r->start, r->end - r->start + 1, pdev->name); 207 r = request_mem_region(r->start, resource_size(r), pdev->name);
208 if (r == NULL) { 208 if (r == NULL) {
209 dev_err(&pdev->dev, "failed to request memory resource\n"); 209 dev_err(&pdev->dev, "failed to request memory resource\n");
210 ret = -EBUSY; 210 ret = -EBUSY;
211 goto err_free_clk; 211 goto err_free_clk;
212 } 212 }
213 213
214 pwm->mmio_base = ioremap(r->start, r->end - r->start + 1); 214 pwm->mmio_base = ioremap(r->start, resource_size(r));
215 if (pwm->mmio_base == NULL) { 215 if (pwm->mmio_base == NULL) {
216 dev_err(&pdev->dev, "failed to ioremap() registers\n"); 216 dev_err(&pdev->dev, "failed to ioremap() registers\n");
217 ret = -ENODEV; 217 ret = -ENODEV;
@@ -241,7 +241,7 @@ static int __devinit pwm_probe(struct platform_device *pdev)
241 return 0; 241 return 0;
242 242
243err_free_mem: 243err_free_mem:
244 release_mem_region(r->start, r->end - r->start + 1); 244 release_mem_region(r->start, resource_size(r));
245err_free_clk: 245err_free_clk:
246 clk_put(pwm->clk); 246 clk_put(pwm->clk);
247err_free: 247err_free:
@@ -271,7 +271,7 @@ static int __devexit pwm_remove(struct platform_device *pdev)
271 iounmap(pwm->mmio_base); 271 iounmap(pwm->mmio_base);
272 272
273 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 273 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
274 release_mem_region(r->start, r->end - r->start + 1); 274 release_mem_region(r->start, resource_size(r));
275 275
276 clk_put(pwm->clk); 276 clk_put(pwm->clk);
277 kfree(pwm); 277 kfree(pwm);
diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
index 944a07c6cfd6..1d04e4078340 100644
--- a/arch/blackfin/include/asm/page.h
+++ b/arch/blackfin/include/asm/page.h
@@ -10,4 +10,9 @@
10#include <asm-generic/page.h> 10#include <asm-generic/page.h>
11#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) 11#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
12 12
13#define VM_DATA_DEFAULT_FLAGS \
14 (VM_READ | VM_WRITE | \
15 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
16 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
17
13#endif 18#endif
diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h
index 25c6a5002355..8c97068ac8fc 100644
--- a/arch/frv/include/asm/page.h
+++ b/arch/frv/include/asm/page.h
@@ -63,12 +63,10 @@ extern unsigned long max_pfn;
63#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 63#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
64 64
65 65
66#ifdef CONFIG_MMU
67#define VM_DATA_DEFAULT_FLAGS \ 66#define VM_DATA_DEFAULT_FLAGS \
68 (VM_READ | VM_WRITE | \ 67 (VM_READ | VM_WRITE | \
69 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ 68 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
70 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 69 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
71#endif
72 70
73#endif /* __ASSEMBLY__ */ 71#endif /* __ASSEMBLY__ */
74 72
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
index d20db3c2a656..fbd1a2470cae 100644
--- a/arch/ia64/include/asm/ftrace.h
+++ b/arch/ia64/include/asm/ftrace.h
@@ -8,7 +8,6 @@
8extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0); 8extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
9#define mcount _mcount 9#define mcount _mcount
10 10
11#include <asm/kprobes.h>
12/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */ 11/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
13#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip) 12#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
14#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip) 13#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
index dbf83fb28db3..d5505d6f2382 100644
--- a/arch/ia64/include/asm/kprobes.h
+++ b/arch/ia64/include/asm/kprobes.h
@@ -103,11 +103,6 @@ typedef struct kprobe_opcode {
103 bundle_t bundle; 103 bundle_t bundle;
104} kprobe_opcode_t; 104} kprobe_opcode_t;
105 105
106struct fnptr {
107 unsigned long ip;
108 unsigned long gp;
109};
110
111/* Architecture specific copy of original instruction*/ 106/* Architecture specific copy of original instruction*/
112struct arch_specific_insn { 107struct arch_specific_insn {
113 /* copy of the instruction to be emulated */ 108 /* copy of the instruction to be emulated */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 85d965cb19a0..23cce999eb1c 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -74,7 +74,7 @@ struct ia64_tr_entry {
74extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); 74extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
75extern void ia64_ptr_entry(u64 target_mask, int slot); 75extern void ia64_ptr_entry(u64 target_mask, int slot);
76 76
77extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; 77extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
78 78
79/* 79/*
80 region register macros 80 region register macros
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 3ddb4e709dba..d323071d0f91 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,9 @@
33/* 33/*
34 * Returns a bitmask of CPUs on Node 'node'. 34 * Returns a bitmask of CPUs on Node 'node'.
35 */ 35 */
36#define cpumask_of_node(node) (&node_to_cpu_mask[node]) 36#define cpumask_of_node(node) ((node) == -1 ? \
37 cpu_all_mask : \
38 &node_to_cpu_mask[node])
37 39
38/* 40/*
39 * Returns the number of the node containing Node 'nid'. 41 * Returns the number of the node containing Node 'nid'.
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index bcd260e597de..b8e5d97be158 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -30,6 +30,11 @@
30 30
31typedef unsigned int umode_t; 31typedef unsigned int umode_t;
32 32
33struct fnptr {
34 unsigned long ip;
35 unsigned long gp;
36};
37
33/* 38/*
34 * These aren't exported outside the kernel to avoid name space clashes 39 * These aren't exported outside the kernel to avoid name space clashes
35 */ 40 */
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 32f2639e9b0a..378b4833024f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1225,9 +1225,12 @@ static void mca_insert_tr(u64 iord)
1225 unsigned long psr; 1225 unsigned long psr;
1226 int cpu = smp_processor_id(); 1226 int cpu = smp_processor_id();
1227 1227
1228 if (!ia64_idtrs[cpu])
1229 return;
1230
1228 psr = ia64_clear_ic(); 1231 psr = ia64_clear_ic();
1229 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { 1232 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
1230 p = &__per_cpu_idtrs[cpu][iord-1][i]; 1233 p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
1231 if (p->pte & 0x1) { 1234 if (p->pte & 0x1) {
1232 old_rr = ia64_get_rr(p->ifa); 1235 old_rr = ia64_get_rr(p->ifa);
1233 if (old_rr != p->rr) { 1236 if (old_rr != p->rr) {
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 5246285a95fb..6bcbe215b9a4 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2293,7 +2293,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
2293 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) 2293 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2294 * return -ENOMEM; 2294 * return -ENOMEM;
2295 */ 2295 */
2296 if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) 2296 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2297 return -ENOMEM; 2297 return -ENOMEM;
2298 2298
2299 /* 2299 /*
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b9609c69343a..7c0d4814a68d 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -91,7 +91,7 @@ dma_mark_clean(void *addr, size_t size)
91inline void 91inline void
92ia64_set_rbs_bot (void) 92ia64_set_rbs_bot (void)
93{ 93{
94 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16; 94 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
95 95
96 if (stack_size > MAX_USER_STACK_SIZE) 96 if (stack_size > MAX_USER_STACK_SIZE)
97 stack_size = MAX_USER_STACK_SIZE; 97 stack_size = MAX_USER_STACK_SIZE;
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index ee09d261f2e6..f3de9d7a98b4 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -48,7 +48,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
48DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ 48DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
49DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ 49DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
50 50
51struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; 51struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
52 52
53/* 53/*
54 * Initializes the ia64_ctx.bitmap array based on max_ctx+1. 54 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -429,10 +429,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
429 struct ia64_tr_entry *p; 429 struct ia64_tr_entry *p;
430 int cpu = smp_processor_id(); 430 int cpu = smp_processor_id();
431 431
432 if (!ia64_idtrs[cpu]) {
433 ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
434 sizeof (struct ia64_tr_entry), GFP_KERNEL);
435 if (!ia64_idtrs[cpu])
436 return -ENOMEM;
437 }
432 r = -EINVAL; 438 r = -EINVAL;
433 /*Check overlap with existing TR entries*/ 439 /*Check overlap with existing TR entries*/
434 if (target_mask & 0x1) { 440 if (target_mask & 0x1) {
435 p = &__per_cpu_idtrs[cpu][0][0]; 441 p = ia64_idtrs[cpu];
436 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 442 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
437 i++, p++) { 443 i++, p++) {
438 if (p->pte & 0x1) 444 if (p->pte & 0x1)
@@ -444,7 +450,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
444 } 450 }
445 } 451 }
446 if (target_mask & 0x2) { 452 if (target_mask & 0x2) {
447 p = &__per_cpu_idtrs[cpu][1][0]; 453 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
448 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 454 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
449 i++, p++) { 455 i++, p++) {
450 if (p->pte & 0x1) 456 if (p->pte & 0x1)
@@ -459,16 +465,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
459 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { 465 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
460 switch (target_mask & 0x3) { 466 switch (target_mask & 0x3) {
461 case 1: 467 case 1:
462 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) 468 if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
463 goto found; 469 goto found;
464 continue; 470 continue;
465 case 2: 471 case 2:
466 if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 472 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
467 goto found; 473 goto found;
468 continue; 474 continue;
469 case 3: 475 case 3:
470 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && 476 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
471 !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 477 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
472 goto found; 478 goto found;
473 continue; 479 continue;
474 default: 480 default:
@@ -488,7 +494,7 @@ found:
488 if (target_mask & 0x1) { 494 if (target_mask & 0x1) {
489 ia64_itr(0x1, i, va, pte, log_size); 495 ia64_itr(0x1, i, va, pte, log_size);
490 ia64_srlz_i(); 496 ia64_srlz_i();
491 p = &__per_cpu_idtrs[cpu][0][i]; 497 p = ia64_idtrs[cpu] + i;
492 p->ifa = va; 498 p->ifa = va;
493 p->pte = pte; 499 p->pte = pte;
494 p->itir = log_size << 2; 500 p->itir = log_size << 2;
@@ -497,7 +503,7 @@ found:
497 if (target_mask & 0x2) { 503 if (target_mask & 0x2) {
498 ia64_itr(0x2, i, va, pte, log_size); 504 ia64_itr(0x2, i, va, pte, log_size);
499 ia64_srlz_i(); 505 ia64_srlz_i();
500 p = &__per_cpu_idtrs[cpu][1][i]; 506 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
501 p->ifa = va; 507 p->ifa = va;
502 p->pte = pte; 508 p->pte = pte;
503 p->itir = log_size << 2; 509 p->itir = log_size << 2;
@@ -528,7 +534,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
528 return; 534 return;
529 535
530 if (target_mask & 0x1) { 536 if (target_mask & 0x1) {
531 p = &__per_cpu_idtrs[cpu][0][slot]; 537 p = ia64_idtrs[cpu] + slot;
532 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 538 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
533 p->pte = 0; 539 p->pte = 0;
534 ia64_ptr(0x1, p->ifa, p->itir>>2); 540 ia64_ptr(0x1, p->ifa, p->itir>>2);
@@ -537,7 +543,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
537 } 543 }
538 544
539 if (target_mask & 0x2) { 545 if (target_mask & 0x2) {
540 p = &__per_cpu_idtrs[cpu][1][slot]; 546 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
541 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 547 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
542 p->pte = 0; 548 p->pte = 0;
543 ia64_ptr(0x2, p->ifa, p->itir>>2); 549 ia64_ptr(0x2, p->ifa, p->itir>>2);
@@ -546,8 +552,8 @@ void ia64_ptr_entry(u64 target_mask, int slot)
546 } 552 }
547 553
548 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { 554 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
549 if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || 555 if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
550 (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 556 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
551 break; 557 break;
552 } 558 }
553 per_cpu(ia64_tr_used, cpu) = i; 559 per_cpu(ia64_tr_used, cpu) = i;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index f30f4a1ead23..d242a7340541 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -96,7 +96,6 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
96 int cpu = smp_processor_id(); 96 int cpu = smp_processor_id();
97 97
98 clear_softint(1 << irq); 98 clear_softint(1 << irq);
99 pcr_ops->write(PCR_PIC_PRIV);
100 99
101 local_cpu_data().__nmi_count++; 100 local_cpu_data().__nmi_count++;
102 101
@@ -105,6 +104,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
105 if (notify_die(DIE_NMI, "nmi", regs, 0, 104 if (notify_die(DIE_NMI, "nmi", regs, 0,
106 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) 105 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
107 touched = 1; 106 touched = 1;
107 else
108 pcr_ops->write(PCR_PIC_PRIV);
108 109
109 sum = kstat_irqs_cpu(0, cpu); 110 sum = kstat_irqs_cpu(0, cpu);
110 if (__get_cpu_var(nmi_touch)) { 111 if (__get_cpu_var(nmi_touch)) {
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index fa5936e1c3b9..198fb4e79ba2 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -986,6 +986,17 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
986 data.addr = 0; 986 data.addr = 0;
987 987
988 cpuc = &__get_cpu_var(cpu_hw_events); 988 cpuc = &__get_cpu_var(cpu_hw_events);
989
990 /* If the PMU has the TOE IRQ enable bits, we need to do a
991 * dummy write to the %pcr to clear the overflow bits and thus
992 * the interrupt.
993 *
994 * Do this before we peek at the counters to determine
995 * overflow so we don't lose any events.
996 */
997 if (sparc_pmu->irq_bit)
998 pcr_ops->write(cpuc->pcr);
999
989 for (idx = 0; idx < MAX_HWEVENTS; idx++) { 1000 for (idx = 0; idx < MAX_HWEVENTS; idx++) {
990 struct perf_event *event = cpuc->events[idx]; 1001 struct perf_event *event = cpuc->events[idx];
991 struct hw_perf_event *hwc; 1002 struct hw_perf_event *hwc;
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 08e442bc3ab9..f20ddf84a893 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -396,7 +396,7 @@ config X86_TSC
396 396
397config X86_CMPXCHG64 397config X86_CMPXCHG64
398 def_bool y 398 def_bool y
399 depends on !M386 && !M486 399 depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
400 400
401# this should be set for all -march=.. options where the compiler 401# this should be set for all -march=.. options where the compiler
402# generates cmov. 402# generates cmov.
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 0c9825e97f36..088d09fb1615 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -205,14 +205,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
205 unsigned long n) 205 unsigned long n)
206{ 206{
207 int sz = __compiletime_object_size(to); 207 int sz = __compiletime_object_size(to);
208 int ret = -EFAULT;
209 208
210 if (likely(sz == -1 || sz >= n)) 209 if (likely(sz == -1 || sz >= n))
211 ret = _copy_from_user(to, from, n); 210 n = _copy_from_user(to, from, n);
212 else 211 else
213 copy_from_user_overflow(); 212 copy_from_user_overflow();
214 213
215 return ret; 214 return n;
216} 215}
217 216
218long __must_check strncpy_from_user(char *dst, const char __user *src, 217long __must_check strncpy_from_user(char *dst, const char __user *src,
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 46324c6a4f6e..535e421498f6 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -30,16 +30,15 @@ static inline unsigned long __must_check copy_from_user(void *to,
30 unsigned long n) 30 unsigned long n)
31{ 31{
32 int sz = __compiletime_object_size(to); 32 int sz = __compiletime_object_size(to);
33 int ret = -EFAULT;
34 33
35 might_fault(); 34 might_fault();
36 if (likely(sz == -1 || sz >= n)) 35 if (likely(sz == -1 || sz >= n))
37 ret = _copy_from_user(to, from, n); 36 n = _copy_from_user(to, from, n);
38#ifdef CONFIG_DEBUG_VM 37#ifdef CONFIG_DEBUG_VM
39 else 38 else
40 WARN(1, "Buffer overflow detected!\n"); 39 WARN(1, "Buffer overflow detected!\n");
41#endif 40#endif
42 return ret; 41 return n;
43} 42}
44 43
45static __always_inline __must_check 44static __always_inline __must_check
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index de00c4619a55..53243ca7816d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2434,6 +2434,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2434 cfg = irq_cfg(irq); 2434 cfg = irq_cfg(irq);
2435 raw_spin_lock(&desc->lock); 2435 raw_spin_lock(&desc->lock);
2436 2436
2437 /*
2438 * Check if the irq migration is in progress. If so, we
2439 * haven't received the cleanup request yet for this irq.
2440 */
2441 if (cfg->move_in_progress)
2442 goto unlock;
2443
2437 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2444 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2438 goto unlock; 2445 goto unlock;
2439 2446
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 05ed7ab2ca48..a1a7876cadcb 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -733,13 +733,13 @@ struct early_res {
733}; 733};
734static struct early_res early_res[MAX_EARLY_RES] __initdata = { 734static struct early_res early_res[MAX_EARLY_RES] __initdata = {
735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */ 735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
736#ifdef CONFIG_X86_32 736#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
737 /* 737 /*
738 * But first pinch a few for the stack/trampoline stuff 738 * But first pinch a few for the stack/trampoline stuff
739 * FIXME: Don't need the extra page at 4K, but need to fix 739 * FIXME: Don't need the extra page at 4K, but need to fix
740 * trampoline before removing it. (see the GDT stuff) 740 * trampoline before removing it. (see the GDT stuff)
741 */ 741 */
742 { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 }, 742 { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
743#endif 743#endif
744 744
745 {} 745 {}
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c
index b7a55dc55d13..f81a2fa8fe25 100644
--- a/arch/x86/pci/intel_bus.c
+++ b/arch/x86/pci/intel_bus.c
@@ -49,6 +49,10 @@ static void __devinit pci_root_bus_res(struct pci_dev *dev)
49 u64 mmioh_base, mmioh_end; 49 u64 mmioh_base, mmioh_end;
50 int bus_base, bus_end; 50 int bus_base, bus_end;
51 51
52 /* some sys doesn't get mmconf enabled */
53 if (dev->cfg_size < 0x120)
54 return;
55
52 if (pci_root_num >= PCI_ROOT_NR) { 56 if (pci_root_num >= PCI_ROOT_NR) {
53 printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n"); 57 printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
54 return; 58 return;
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 628eae3e9b83..a1fce68e3bbe 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -39,8 +39,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
39 struct drm_ati_pcigart_info *gart_info) 39 struct drm_ati_pcigart_info *gart_info)
40{ 40{
41 gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, 41 gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
42 PAGE_SIZE, 42 PAGE_SIZE);
43 gart_info->table_mask);
44 if (gart_info->table_handle == NULL) 43 if (gart_info->table_handle == NULL)
45 return -ENOMEM; 44 return -ENOMEM;
46 45
@@ -112,6 +111,13 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
112 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { 111 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
113 DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); 112 DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
114 113
114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
115 DRM_ERROR("fail to set dma mask to 0x%Lx\n",
116 gart_info->table_mask);
117 ret = 1;
118 goto done;
119 }
120
115 ret = drm_ati_alloc_pcigart_table(dev, gart_info); 121 ret = drm_ati_alloc_pcigart_table(dev, gart_info);
116 if (ret) { 122 if (ret) {
117 DRM_ERROR("cannot allocate PCI GART page!\n"); 123 DRM_ERROR("cannot allocate PCI GART page!\n");
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3d09e304f6f4..8417cc4c43f1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
326 * As we're limiting the address to 2^32-1 (or less), 326 * As we're limiting the address to 2^32-1 (or less),
327 * casting it down to 32 bits is no problem, but we 327 * casting it down to 32 bits is no problem, but we
328 * need to point to a 64bit variable first. */ 328 * need to point to a 64bit variable first. */
329 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); 329 dmah = drm_pci_alloc(dev, map->size, map->size);
330 if (!dmah) { 330 if (!dmah) {
331 kfree(map); 331 kfree(map);
332 return -ENOMEM; 332 return -ENOMEM;
@@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
885 885
886 while (entry->buf_count < count) { 886 while (entry->buf_count < count) {
887 887
888 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); 888 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
889 889
890 if (!dmah) { 890 if (!dmah) {
891 /* Set count correctly so we free the proper amount. */ 891 /* Set count correctly so we free the proper amount. */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5c9f79877cbf..defcaf108460 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -911,23 +911,27 @@ static int drm_cvt_modes(struct drm_connector *connector,
911 struct drm_device *dev = connector->dev; 911 struct drm_device *dev = connector->dev;
912 struct cvt_timing *cvt; 912 struct cvt_timing *cvt;
913 const int rates[] = { 60, 85, 75, 60, 50 }; 913 const int rates[] = { 60, 85, 75, 60, 50 };
914 const u8 empty[3] = { 0, 0, 0 };
914 915
915 for (i = 0; i < 4; i++) { 916 for (i = 0; i < 4; i++) {
916 int uninitialized_var(width), height; 917 int uninitialized_var(width), height;
917 cvt = &(timing->data.other_data.data.cvt[i]); 918 cvt = &(timing->data.other_data.data.cvt[i]);
918 919
919 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; 920 if (!memcmp(cvt->code, empty, 3))
920 switch (cvt->code[1] & 0xc0) { 921 continue;
922
923 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
924 switch (cvt->code[1] & 0x0c) {
921 case 0x00: 925 case 0x00:
922 width = height * 4 / 3; 926 width = height * 4 / 3;
923 break; 927 break;
924 case 0x40: 928 case 0x04:
925 width = height * 16 / 9; 929 width = height * 16 / 9;
926 break; 930 break;
927 case 0x80: 931 case 0x08:
928 width = height * 16 / 10; 932 width = height * 16 / 10;
929 break; 933 break;
930 case 0xc0: 934 case 0x0c:
931 width = height * 15 / 9; 935 width = height * 15 / 9;
932 break; 936 break;
933 } 937 }
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1b49fa055f4f..100ee48760b7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -156,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
156 force = DRM_FORCE_ON; 156 force = DRM_FORCE_ON;
157 break; 157 break;
158 case 'D': 158 case 'D':
159 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) || 159 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
160 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) 160 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
161 force = DRM_FORCE_ON; 161 force = DRM_FORCE_ON;
162 else 162 else
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 577094fb1995..e68ebf92fa2a 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -47,8 +47,7 @@
47/** 47/**
48 * \brief Allocate a PCI consistent memory block, for DMA. 48 * \brief Allocate a PCI consistent memory block, for DMA.
49 */ 49 */
50drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, 50drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
51 dma_addr_t maxaddr)
52{ 51{
53 drm_dma_handle_t *dmah; 52 drm_dma_handle_t *dmah;
54#if 1 53#if 1
@@ -63,11 +62,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
63 if (align > size) 62 if (align > size)
64 return NULL; 63 return NULL;
65 64
66 if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
67 DRM_ERROR("Setting pci dma mask failed\n");
68 return NULL;
69 }
70
71 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); 65 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
72 if (!dmah) 66 if (!dmah)
73 return NULL; 67 return NULL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 18476bf0b580..9c9998c4dceb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
272 mem = kmap_atomic(pages[page], KM_USER0); 272 mem = kmap_atomic(pages[page], KM_USER0);
273 for (i = 0; i < PAGE_SIZE; i += 4) 273 for (i = 0; i < PAGE_SIZE; i += 4)
274 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 274 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
275 kunmap_atomic(pages[page], KM_USER0); 275 kunmap_atomic(mem, KM_USER0);
276 } 276 }
277} 277}
278 278
@@ -386,34 +386,6 @@ out:
386 return 0; 386 return 0;
387} 387}
388 388
389static int i915_registers_info(struct seq_file *m, void *data) {
390 struct drm_info_node *node = (struct drm_info_node *) m->private;
391 struct drm_device *dev = node->minor->dev;
392 drm_i915_private_t *dev_priv = dev->dev_private;
393 uint32_t reg;
394
395#define DUMP_RANGE(start, end) \
396 for (reg=start; reg < end; reg += 4) \
397 seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
398
399 DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
400 DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
401 DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
402 DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
403 DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
404 DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
405 DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
406 DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
407 DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
408 DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
409 DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
410 DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
411 DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
412 DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
413
414 return 0;
415}
416
417static int 389static int
418i915_wedged_open(struct inode *inode, 390i915_wedged_open(struct inode *inode,
419 struct file *filp) 391 struct file *filp)
@@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
519} 491}
520 492
521static struct drm_info_list i915_debugfs_list[] = { 493static struct drm_info_list i915_debugfs_list[] = {
522 {"i915_regs", i915_registers_info, 0},
523 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 494 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
524 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 495 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
525 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 496 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 701bfeac7f57..bbe47812e4b6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
123 drm_i915_private_t *dev_priv = dev->dev_private; 123 drm_i915_private_t *dev_priv = dev->dev_private;
124 /* Program Hardware Status Page */ 124 /* Program Hardware Status Page */
125 dev_priv->status_page_dmah = 125 dev_priv->status_page_dmah =
126 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 126 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
127 127
128 if (!dev_priv->status_page_dmah) { 128 if (!dev_priv->status_page_dmah) {
129 DRM_ERROR("Can not allocate hardware status page\n"); 129 DRM_ERROR("Can not allocate hardware status page\n");
@@ -813,9 +813,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
813 case I915_PARAM_HAS_PAGEFLIPPING: 813 case I915_PARAM_HAS_PAGEFLIPPING:
814 value = 1; 814 value = 1;
815 break; 815 break;
816 case I915_PARAM_HAS_EXECBUF2:
817 /* depends on GEM */
818 value = dev_priv->has_gem;
819 break;
816 default: 820 default:
817 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 821 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
818 param->param); 822 param->param);
819 return -EINVAL; 823 return -EINVAL;
820 } 824 }
821 825
@@ -1117,7 +1121,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1117{ 1121{
1118 struct drm_i915_private *dev_priv = dev->dev_private; 1122 struct drm_i915_private *dev_priv = dev->dev_private;
1119 struct drm_mm_node *compressed_fb, *compressed_llb; 1123 struct drm_mm_node *compressed_fb, *compressed_llb;
1120 unsigned long cfb_base, ll_base; 1124 unsigned long cfb_base;
1125 unsigned long ll_base = 0;
1121 1126
1122 /* Leave 1M for line length buffer & misc. */ 1127 /* Leave 1M for line length buffer & misc. */
1123 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1128 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
@@ -1200,14 +1205,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
1200 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 1205 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
1201 0xff000000; 1206 0xff000000;
1202 1207
1203 if (IS_MOBILE(dev) || IS_I9XX(dev))
1204 dev_priv->cursor_needs_physical = true;
1205 else
1206 dev_priv->cursor_needs_physical = false;
1207
1208 if (IS_I965G(dev) || IS_G33(dev))
1209 dev_priv->cursor_needs_physical = false;
1210
1211 /* Basic memrange allocator for stolen space (aka vram) */ 1208 /* Basic memrange allocator for stolen space (aka vram) */
1212 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1209 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1213 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); 1210 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
@@ -1257,6 +1254,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
1257 if (ret) 1254 if (ret)
1258 goto destroy_ringbuffer; 1255 goto destroy_ringbuffer;
1259 1256
1257 intel_modeset_init(dev);
1258
1260 ret = drm_irq_install(dev); 1259 ret = drm_irq_install(dev);
1261 if (ret) 1260 if (ret)
1262 goto destroy_ringbuffer; 1261 goto destroy_ringbuffer;
@@ -1271,8 +1270,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
1271 1270
1272 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1271 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1273 1272
1274 intel_modeset_init(dev);
1275
1276 drm_helper_initial_config(dev); 1273 drm_helper_initial_config(dev);
1277 1274
1278 return 0; 1275 return 0;
@@ -1360,7 +1357,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1360{ 1357{
1361 struct drm_i915_private *dev_priv = dev->dev_private; 1358 struct drm_i915_private *dev_priv = dev->dev_private;
1362 resource_size_t base, size; 1359 resource_size_t base, size;
1363 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1360 int ret = 0, mmio_bar;
1364 uint32_t agp_size, prealloc_size, prealloc_start; 1361 uint32_t agp_size, prealloc_size, prealloc_start;
1365 1362
1366 /* i915 has 4 more counters */ 1363 /* i915 has 4 more counters */
@@ -1376,8 +1373,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1376 1373
1377 dev->dev_private = (void *)dev_priv; 1374 dev->dev_private = (void *)dev_priv;
1378 dev_priv->dev = dev; 1375 dev_priv->dev = dev;
1376 dev_priv->info = (struct intel_device_info *) flags;
1379 1377
1380 /* Add register map (needed for suspend/resume) */ 1378 /* Add register map (needed for suspend/resume) */
1379 mmio_bar = IS_I9XX(dev) ? 0 : 1;
1381 base = drm_get_resource_start(dev, mmio_bar); 1380 base = drm_get_resource_start(dev, mmio_bar);
1382 size = drm_get_resource_len(dev, mmio_bar); 1381 size = drm_get_resource_len(dev, mmio_bar);
1383 1382
@@ -1652,6 +1651,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
1652 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1651 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1653 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1652 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1654 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 1653 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1654 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
1655 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1655 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1656 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1656 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1657 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), 1657 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 24286ca168fc..2ffffd7ae09a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,7 +33,6 @@
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35 35
36#include "drm_pciids.h"
37#include <linux/console.h> 36#include <linux/console.h>
38#include "drm_crtc_helper.h" 37#include "drm_crtc_helper.h"
39 38
@@ -48,8 +47,124 @@ module_param_named(powersave, i915_powersave, int, 0400);
48 47
49static struct drm_driver driver; 48static struct drm_driver driver;
50 49
51static struct pci_device_id pciidlist[] = { 50#define INTEL_VGA_DEVICE(id, info) { \
52 i915_PCI_IDS 51 .class = PCI_CLASS_DISPLAY_VGA << 8, \
52 .class_mask = 0xffff00, \
53 .vendor = 0x8086, \
54 .device = id, \
55 .subvendor = PCI_ANY_ID, \
56 .subdevice = PCI_ANY_ID, \
57 .driver_data = (unsigned long) info }
58
59const static struct intel_device_info intel_i830_info = {
60 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
61};
62
63const static struct intel_device_info intel_845g_info = {
64 .is_i8xx = 1,
65};
66
67const static struct intel_device_info intel_i85x_info = {
68 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
69};
70
71const static struct intel_device_info intel_i865g_info = {
72 .is_i8xx = 1,
73};
74
75const static struct intel_device_info intel_i915g_info = {
76 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
77};
78const static struct intel_device_info intel_i915gm_info = {
79 .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
80 .cursor_needs_physical = 1,
81};
82const static struct intel_device_info intel_i945g_info = {
83 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
84};
85const static struct intel_device_info intel_i945gm_info = {
86 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
87 .has_hotplug = 1, .cursor_needs_physical = 1,
88};
89
90const static struct intel_device_info intel_i965g_info = {
91 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
92};
93
94const static struct intel_device_info intel_i965gm_info = {
95 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
96 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
97 .has_hotplug = 1,
98};
99
100const static struct intel_device_info intel_g33_info = {
101 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
102 .has_hotplug = 1,
103};
104
105const static struct intel_device_info intel_g45_info = {
106 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
107 .has_pipe_cxsr = 1,
108 .has_hotplug = 1,
109};
110
111const static struct intel_device_info intel_gm45_info = {
112 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
113 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
114 .has_pipe_cxsr = 1,
115 .has_hotplug = 1,
116};
117
118const static struct intel_device_info intel_pineview_info = {
119 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
120 .has_pipe_cxsr = 1,
121 .has_hotplug = 1,
122};
123
124const static struct intel_device_info intel_ironlake_d_info = {
125 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
126 .has_pipe_cxsr = 1,
127 .has_hotplug = 1,
128};
129
130const static struct intel_device_info intel_ironlake_m_info = {
131 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
132 .need_gfx_hws = 1, .has_rc6 = 1,
133 .has_hotplug = 1,
134};
135
136const static struct pci_device_id pciidlist[] = {
137 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
138 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
139 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
140 INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
141 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
142 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
143 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
144 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
145 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
146 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
147 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
148 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
149 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
150 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
151 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
152 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
153 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
154 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
155 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
156 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
157 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
158 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
159 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
160 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
161 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
162 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
163 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
164 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
165 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
166 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
167 {0, 0, 0}
53}; 168};
54 169
55#if defined(CONFIG_DRM_I915_KMS) 170#if defined(CONFIG_DRM_I915_KMS)
@@ -284,6 +399,52 @@ i915_pci_resume(struct pci_dev *pdev)
284 return i915_resume(dev); 399 return i915_resume(dev);
285} 400}
286 401
402static int
403i915_pm_suspend(struct device *dev)
404{
405 return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
406}
407
408static int
409i915_pm_resume(struct device *dev)
410{
411 return i915_pci_resume(to_pci_dev(dev));
412}
413
414static int
415i915_pm_freeze(struct device *dev)
416{
417 return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
418}
419
420static int
421i915_pm_thaw(struct device *dev)
422{
423 /* thaw during hibernate, do nothing! */
424 return 0;
425}
426
427static int
428i915_pm_poweroff(struct device *dev)
429{
430 return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
431}
432
433static int
434i915_pm_restore(struct device *dev)
435{
436 return i915_pci_resume(to_pci_dev(dev));
437}
438
439const struct dev_pm_ops i915_pm_ops = {
440 .suspend = i915_pm_suspend,
441 .resume = i915_pm_resume,
442 .freeze = i915_pm_freeze,
443 .thaw = i915_pm_thaw,
444 .poweroff = i915_pm_poweroff,
445 .restore = i915_pm_restore,
446};
447
287static struct vm_operations_struct i915_gem_vm_ops = { 448static struct vm_operations_struct i915_gem_vm_ops = {
288 .fault = i915_gem_fault, 449 .fault = i915_gem_fault,
289 .open = drm_gem_vm_open, 450 .open = drm_gem_vm_open,
@@ -303,8 +464,6 @@ static struct drm_driver driver = {
303 .lastclose = i915_driver_lastclose, 464 .lastclose = i915_driver_lastclose,
304 .preclose = i915_driver_preclose, 465 .preclose = i915_driver_preclose,
305 .postclose = i915_driver_postclose, 466 .postclose = i915_driver_postclose,
306 .suspend = i915_suspend,
307 .resume = i915_resume,
308 .device_is_agp = i915_driver_device_is_agp, 467 .device_is_agp = i915_driver_device_is_agp,
309 .enable_vblank = i915_enable_vblank, 468 .enable_vblank = i915_enable_vblank,
310 .disable_vblank = i915_disable_vblank, 469 .disable_vblank = i915_disable_vblank,
@@ -344,10 +503,7 @@ static struct drm_driver driver = {
344 .id_table = pciidlist, 503 .id_table = pciidlist,
345 .probe = i915_pci_probe, 504 .probe = i915_pci_probe,
346 .remove = i915_pci_remove, 505 .remove = i915_pci_remove,
347#ifdef CONFIG_PM 506 .driver.pm = &i915_pm_ops,
348 .resume = i915_pci_resume,
349 .suspend = i915_pci_suspend,
350#endif
351 }, 507 },
352 508
353 .name = DRIVER_NAME, 509 .name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbecac72f5bb..29dd67626967 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -172,9 +172,31 @@ struct drm_i915_display_funcs {
172 172
173struct intel_overlay; 173struct intel_overlay;
174 174
175struct intel_device_info {
176 u8 is_mobile : 1;
177 u8 is_i8xx : 1;
178 u8 is_i915g : 1;
179 u8 is_i9xx : 1;
180 u8 is_i945gm : 1;
181 u8 is_i965g : 1;
182 u8 is_i965gm : 1;
183 u8 is_g33 : 1;
184 u8 need_gfx_hws : 1;
185 u8 is_g4x : 1;
186 u8 is_pineview : 1;
187 u8 is_ironlake : 1;
188 u8 has_fbc : 1;
189 u8 has_rc6 : 1;
190 u8 has_pipe_cxsr : 1;
191 u8 has_hotplug : 1;
192 u8 cursor_needs_physical : 1;
193};
194
175typedef struct drm_i915_private { 195typedef struct drm_i915_private {
176 struct drm_device *dev; 196 struct drm_device *dev;
177 197
198 const struct intel_device_info *info;
199
178 int has_gem; 200 int has_gem;
179 201
180 void __iomem *regs; 202 void __iomem *regs;
@@ -232,8 +254,6 @@ typedef struct drm_i915_private {
232 int hangcheck_count; 254 int hangcheck_count;
233 uint32_t last_acthd; 255 uint32_t last_acthd;
234 256
235 bool cursor_needs_physical;
236
237 struct drm_mm vram; 257 struct drm_mm vram;
238 258
239 unsigned long cfb_size; 259 unsigned long cfb_size;
@@ -287,8 +307,6 @@ typedef struct drm_i915_private {
287 u32 saveDSPACNTR; 307 u32 saveDSPACNTR;
288 u32 saveDSPBCNTR; 308 u32 saveDSPBCNTR;
289 u32 saveDSPARB; 309 u32 saveDSPARB;
290 u32 saveRENDERSTANDBY;
291 u32 savePWRCTXA;
292 u32 saveHWS; 310 u32 saveHWS;
293 u32 savePIPEACONF; 311 u32 savePIPEACONF;
294 u32 savePIPEBCONF; 312 u32 savePIPEBCONF;
@@ -561,6 +579,7 @@ typedef struct drm_i915_private {
561 u16 orig_clock; 579 u16 orig_clock;
562 int child_dev_num; 580 int child_dev_num;
563 struct child_device_config *child_dev; 581 struct child_device_config *child_dev;
582 struct drm_connector *int_lvds_connector;
564} drm_i915_private_t; 583} drm_i915_private_t;
565 584
566/** driver private structure attached to each drm_gem_object */ 585/** driver private structure attached to each drm_gem_object */
@@ -794,6 +813,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
794 struct drm_file *file_priv); 813 struct drm_file *file_priv);
795int i915_gem_execbuffer(struct drm_device *dev, void *data, 814int i915_gem_execbuffer(struct drm_device *dev, void *data,
796 struct drm_file *file_priv); 815 struct drm_file *file_priv);
816int i915_gem_execbuffer2(struct drm_device *dev, void *data,
817 struct drm_file *file_priv);
797int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 818int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
798 struct drm_file *file_priv); 819 struct drm_file *file_priv);
799int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 820int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -860,6 +881,9 @@ void i915_gem_shrinker_exit(void);
860void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 881void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
861void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 882void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
862void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 883void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
884bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
885 int tiling_mode);
886bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
863 887
864/* i915_gem_debug.c */ 888/* i915_gem_debug.c */
865void i915_gem_dump_object(struct drm_gem_object *obj, int len, 889void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -982,67 +1006,33 @@ extern void g4x_disable_fbc(struct drm_device *dev);
982extern int i915_wrap_ring(struct drm_device * dev); 1006extern int i915_wrap_ring(struct drm_device * dev);
983extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 1007extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
984 1008
985#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1009#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
986#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1010
987#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1011#define IS_I830(dev) ((dev)->pci_device == 0x3577)
988#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1012#define IS_845G(dev) ((dev)->pci_device == 0x2562)
989#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) 1013#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
990 1014#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
991#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 1015#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
992#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1016#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
993#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1017#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
994#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ 1018#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
995 (dev)->pci_device == 0x27AE) 1019#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
996#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 1020#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
997 (dev)->pci_device == 0x2982 || \ 1021#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
998 (dev)->pci_device == 0x2992 || \ 1022#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
999 (dev)->pci_device == 0x29A2 || \ 1023#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1000 (dev)->pci_device == 0x2A02 || \ 1024#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1001 (dev)->pci_device == 0x2A12 || \ 1025#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1002 (dev)->pci_device == 0x2A42 || \ 1026#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1003 (dev)->pci_device == 0x2E02 || \ 1027#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1004 (dev)->pci_device == 0x2E12 || \
1005 (dev)->pci_device == 0x2E22 || \
1006 (dev)->pci_device == 0x2E32 || \
1007 (dev)->pci_device == 0x2E42 || \
1008 (dev)->pci_device == 0x0042 || \
1009 (dev)->pci_device == 0x0046)
1010
1011#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
1012 (dev)->pci_device == 0x2A12)
1013
1014#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1015
1016#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
1017 (dev)->pci_device == 0x2E12 || \
1018 (dev)->pci_device == 0x2E22 || \
1019 (dev)->pci_device == 0x2E32 || \
1020 (dev)->pci_device == 0x2E42 || \
1021 IS_GM45(dev))
1022
1023#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1024#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1025#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
1026
1027#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
1028 (dev)->pci_device == 0x29B2 || \
1029 (dev)->pci_device == 0x29D2 || \
1030 (IS_PINEVIEW(dev)))
1031
1032#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1028#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1033#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1029#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1034#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) 1030#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
1035 1031#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1036#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 1032#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1037 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
1038 IS_IRONLAKE(dev))
1039 1033
1040#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 1034#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1041 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
1042 IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
1043 1035
1044#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
1045 IS_IRONLAKE(dev))
1046/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1036/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1047 * rows, which changed the alignment requirements and fence programming. 1037 * rows, which changed the alignment requirements and fence programming.
1048 */ 1038 */
@@ -1054,17 +1044,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1054#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1044#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1055#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ 1045#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
1056 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) 1046 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
1057#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) 1047#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1058/* dsparb controlled by hw only */ 1048/* dsparb controlled by hw only */
1059#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1049#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1060 1050
1061#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) 1051#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
1062#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1052#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1063#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ 1053#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1064 (IS_I9XX(dev) || IS_GM45(dev)) && \ 1054#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1065 !IS_PINEVIEW(dev) && \
1066 !IS_IRONLAKE(dev))
1067#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
1068 1055
1069#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1056#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1070 1057
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c463cf2050a..2748609f05b3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2021,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2021 /* blow away mappings if mapped through GTT */ 2021 /* blow away mappings if mapped through GTT */
2022 i915_gem_release_mmap(obj); 2022 i915_gem_release_mmap(obj);
2023 2023
2024 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2025 i915_gem_clear_fence_reg(obj);
2026
2027 /* Move the object to the CPU domain to ensure that 2024 /* Move the object to the CPU domain to ensure that
2028 * any possible CPU writes while it's not in the GTT 2025 * any possible CPU writes while it's not in the GTT
2029 * are flushed when we go to remap it. This will 2026 * are flushed when we go to remap it. This will
@@ -2039,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2039 2036
2040 BUG_ON(obj_priv->active); 2037 BUG_ON(obj_priv->active);
2041 2038
2039 /* release the fence reg _after_ flushing */
2040 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2041 i915_gem_clear_fence_reg(obj);
2042
2042 if (obj_priv->agp_mem != NULL) { 2043 if (obj_priv->agp_mem != NULL) {
2043 drm_unbind_agp(obj_priv->agp_mem); 2044 drm_unbind_agp(obj_priv->agp_mem);
2044 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 2045 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2581,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2581 bool retry_alloc = false; 2582 bool retry_alloc = false;
2582 int ret; 2583 int ret;
2583 2584
2584 if (dev_priv->mm.suspended)
2585 return -EBUSY;
2586
2587 if (obj_priv->madv != I915_MADV_WILLNEED) { 2585 if (obj_priv->madv != I915_MADV_WILLNEED) {
2588 DRM_ERROR("Attempting to bind a purgeable object\n"); 2586 DRM_ERROR("Attempting to bind a purgeable object\n");
2589 return -EINVAL; 2587 return -EINVAL;
@@ -3198,7 +3196,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3198static int 3196static int
3199i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 3197i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3200 struct drm_file *file_priv, 3198 struct drm_file *file_priv,
3201 struct drm_i915_gem_exec_object *entry, 3199 struct drm_i915_gem_exec_object2 *entry,
3202 struct drm_i915_gem_relocation_entry *relocs) 3200 struct drm_i915_gem_relocation_entry *relocs)
3203{ 3201{
3204 struct drm_device *dev = obj->dev; 3202 struct drm_device *dev = obj->dev;
@@ -3206,12 +3204,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3206 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3204 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3207 int i, ret; 3205 int i, ret;
3208 void __iomem *reloc_page; 3206 void __iomem *reloc_page;
3207 bool need_fence;
3208
3209 need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3210 obj_priv->tiling_mode != I915_TILING_NONE;
3211
3212 /* Check fence reg constraints and rebind if necessary */
3213 if (need_fence && !i915_obj_fenceable(dev, obj))
3214 i915_gem_object_unbind(obj);
3209 3215
3210 /* Choose the GTT offset for our buffer and put it there. */ 3216 /* Choose the GTT offset for our buffer and put it there. */
3211 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3217 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3212 if (ret) 3218 if (ret)
3213 return ret; 3219 return ret;
3214 3220
3221 /*
3222 * Pre-965 chips need a fence register set up in order to
3223 * properly handle blits to/from tiled surfaces.
3224 */
3225 if (need_fence) {
3226 ret = i915_gem_object_get_fence_reg(obj);
3227 if (ret != 0) {
3228 if (ret != -EBUSY && ret != -ERESTARTSYS)
3229 DRM_ERROR("Failure to install fence: %d\n",
3230 ret);
3231 i915_gem_object_unpin(obj);
3232 return ret;
3233 }
3234 }
3235
3215 entry->offset = obj_priv->gtt_offset; 3236 entry->offset = obj_priv->gtt_offset;
3216 3237
3217 /* Apply the relocations, using the GTT aperture to avoid cache 3238 /* Apply the relocations, using the GTT aperture to avoid cache
@@ -3373,7 +3394,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3373 */ 3394 */
3374static int 3395static int
3375i915_dispatch_gem_execbuffer(struct drm_device *dev, 3396i915_dispatch_gem_execbuffer(struct drm_device *dev,
3376 struct drm_i915_gem_execbuffer *exec, 3397 struct drm_i915_gem_execbuffer2 *exec,
3377 struct drm_clip_rect *cliprects, 3398 struct drm_clip_rect *cliprects,
3378 uint64_t exec_offset) 3399 uint64_t exec_offset)
3379{ 3400{
@@ -3463,7 +3484,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3463} 3484}
3464 3485
3465static int 3486static int
3466i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, 3487i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
3467 uint32_t buffer_count, 3488 uint32_t buffer_count,
3468 struct drm_i915_gem_relocation_entry **relocs) 3489 struct drm_i915_gem_relocation_entry **relocs)
3469{ 3490{
@@ -3478,8 +3499,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3478 } 3499 }
3479 3500
3480 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); 3501 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3481 if (*relocs == NULL) 3502 if (*relocs == NULL) {
3503 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
3482 return -ENOMEM; 3504 return -ENOMEM;
3505 }
3483 3506
3484 for (i = 0; i < buffer_count; i++) { 3507 for (i = 0; i < buffer_count; i++) {
3485 struct drm_i915_gem_relocation_entry __user *user_relocs; 3508 struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3503,7 +3526,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3503} 3526}
3504 3527
3505static int 3528static int
3506i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, 3529i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3507 uint32_t buffer_count, 3530 uint32_t buffer_count,
3508 struct drm_i915_gem_relocation_entry *relocs) 3531 struct drm_i915_gem_relocation_entry *relocs)
3509{ 3532{
@@ -3536,7 +3559,7 @@ err:
3536} 3559}
3537 3560
3538static int 3561static int
3539i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, 3562i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
3540 uint64_t exec_offset) 3563 uint64_t exec_offset)
3541{ 3564{
3542 uint32_t exec_start, exec_len; 3565 uint32_t exec_start, exec_len;
@@ -3589,18 +3612,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
3589} 3612}
3590 3613
3591int 3614int
3592i915_gem_execbuffer(struct drm_device *dev, void *data, 3615i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3593 struct drm_file *file_priv) 3616 struct drm_file *file_priv,
3617 struct drm_i915_gem_execbuffer2 *args,
3618 struct drm_i915_gem_exec_object2 *exec_list)
3594{ 3619{
3595 drm_i915_private_t *dev_priv = dev->dev_private; 3620 drm_i915_private_t *dev_priv = dev->dev_private;
3596 struct drm_i915_gem_execbuffer *args = data;
3597 struct drm_i915_gem_exec_object *exec_list = NULL;
3598 struct drm_gem_object **object_list = NULL; 3621 struct drm_gem_object **object_list = NULL;
3599 struct drm_gem_object *batch_obj; 3622 struct drm_gem_object *batch_obj;
3600 struct drm_i915_gem_object *obj_priv; 3623 struct drm_i915_gem_object *obj_priv;
3601 struct drm_clip_rect *cliprects = NULL; 3624 struct drm_clip_rect *cliprects = NULL;
3602 struct drm_i915_gem_relocation_entry *relocs; 3625 struct drm_i915_gem_relocation_entry *relocs;
3603 int ret, ret2, i, pinned = 0; 3626 int ret = 0, ret2, i, pinned = 0;
3604 uint64_t exec_offset; 3627 uint64_t exec_offset;
3605 uint32_t seqno, flush_domains, reloc_index; 3628 uint32_t seqno, flush_domains, reloc_index;
3606 int pin_tries, flips; 3629 int pin_tries, flips;
@@ -3614,25 +3637,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3614 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3637 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3615 return -EINVAL; 3638 return -EINVAL;
3616 } 3639 }
3617 /* Copy in the exec list from userland */
3618 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3619 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); 3640 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3620 if (exec_list == NULL || object_list == NULL) { 3641 if (object_list == NULL) {
3621 DRM_ERROR("Failed to allocate exec or object list " 3642 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3622 "for %d buffers\n",
3623 args->buffer_count); 3643 args->buffer_count);
3624 ret = -ENOMEM; 3644 ret = -ENOMEM;
3625 goto pre_mutex_err; 3645 goto pre_mutex_err;
3626 } 3646 }
3627 ret = copy_from_user(exec_list,
3628 (struct drm_i915_relocation_entry __user *)
3629 (uintptr_t) args->buffers_ptr,
3630 sizeof(*exec_list) * args->buffer_count);
3631 if (ret != 0) {
3632 DRM_ERROR("copy %d exec entries failed %d\n",
3633 args->buffer_count, ret);
3634 goto pre_mutex_err;
3635 }
3636 3647
3637 if (args->num_cliprects != 0) { 3648 if (args->num_cliprects != 0) {
3638 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3649 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
@@ -3884,20 +3895,6 @@ err:
3884 3895
3885 mutex_unlock(&dev->struct_mutex); 3896 mutex_unlock(&dev->struct_mutex);
3886 3897
3887 if (!ret) {
3888 /* Copy the new buffer offsets back to the user's exec list. */
3889 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3890 (uintptr_t) args->buffers_ptr,
3891 exec_list,
3892 sizeof(*exec_list) * args->buffer_count);
3893 if (ret) {
3894 ret = -EFAULT;
3895 DRM_ERROR("failed to copy %d exec entries "
3896 "back to user (%d)\n",
3897 args->buffer_count, ret);
3898 }
3899 }
3900
3901 /* Copy the updated relocations out regardless of current error 3898 /* Copy the updated relocations out regardless of current error
3902 * state. Failure to update the relocs would mean that the next 3899 * state. Failure to update the relocs would mean that the next
3903 * time userland calls execbuf, it would do so with presumed offset 3900 * time userland calls execbuf, it would do so with presumed offset
@@ -3914,12 +3911,158 @@ err:
3914 3911
3915pre_mutex_err: 3912pre_mutex_err:
3916 drm_free_large(object_list); 3913 drm_free_large(object_list);
3917 drm_free_large(exec_list);
3918 kfree(cliprects); 3914 kfree(cliprects);
3919 3915
3920 return ret; 3916 return ret;
3921} 3917}
3922 3918
3919/*
3920 * Legacy execbuffer just creates an exec2 list from the original exec object
3921 * list array and passes it to the real function.
3922 */
3923int
3924i915_gem_execbuffer(struct drm_device *dev, void *data,
3925 struct drm_file *file_priv)
3926{
3927 struct drm_i915_gem_execbuffer *args = data;
3928 struct drm_i915_gem_execbuffer2 exec2;
3929 struct drm_i915_gem_exec_object *exec_list = NULL;
3930 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3931 int ret, i;
3932
3933#if WATCH_EXEC
3934 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3935 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3936#endif
3937
3938 if (args->buffer_count < 1) {
3939 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3940 return -EINVAL;
3941 }
3942
3943 /* Copy in the exec list from userland */
3944 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3945 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3946 if (exec_list == NULL || exec2_list == NULL) {
3947 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3948 args->buffer_count);
3949 drm_free_large(exec_list);
3950 drm_free_large(exec2_list);
3951 return -ENOMEM;
3952 }
3953 ret = copy_from_user(exec_list,
3954 (struct drm_i915_relocation_entry __user *)
3955 (uintptr_t) args->buffers_ptr,
3956 sizeof(*exec_list) * args->buffer_count);
3957 if (ret != 0) {
3958 DRM_ERROR("copy %d exec entries failed %d\n",
3959 args->buffer_count, ret);
3960 drm_free_large(exec_list);
3961 drm_free_large(exec2_list);
3962 return -EFAULT;
3963 }
3964
3965 for (i = 0; i < args->buffer_count; i++) {
3966 exec2_list[i].handle = exec_list[i].handle;
3967 exec2_list[i].relocation_count = exec_list[i].relocation_count;
3968 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3969 exec2_list[i].alignment = exec_list[i].alignment;
3970 exec2_list[i].offset = exec_list[i].offset;
3971 if (!IS_I965G(dev))
3972 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3973 else
3974 exec2_list[i].flags = 0;
3975 }
3976
3977 exec2.buffers_ptr = args->buffers_ptr;
3978 exec2.buffer_count = args->buffer_count;
3979 exec2.batch_start_offset = args->batch_start_offset;
3980 exec2.batch_len = args->batch_len;
3981 exec2.DR1 = args->DR1;
3982 exec2.DR4 = args->DR4;
3983 exec2.num_cliprects = args->num_cliprects;
3984 exec2.cliprects_ptr = args->cliprects_ptr;
3985 exec2.flags = 0;
3986
3987 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3988 if (!ret) {
3989 /* Copy the new buffer offsets back to the user's exec list. */
3990 for (i = 0; i < args->buffer_count; i++)
3991 exec_list[i].offset = exec2_list[i].offset;
3992 /* ... and back out to userspace */
3993 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3994 (uintptr_t) args->buffers_ptr,
3995 exec_list,
3996 sizeof(*exec_list) * args->buffer_count);
3997 if (ret) {
3998 ret = -EFAULT;
3999 DRM_ERROR("failed to copy %d exec entries "
4000 "back to user (%d)\n",
4001 args->buffer_count, ret);
4002 }
4003 } else {
4004 DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
4005 }
4006
4007 drm_free_large(exec_list);
4008 drm_free_large(exec2_list);
4009 return ret;
4010}
4011
4012int
4013i915_gem_execbuffer2(struct drm_device *dev, void *data,
4014 struct drm_file *file_priv)
4015{
4016 struct drm_i915_gem_execbuffer2 *args = data;
4017 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4018 int ret;
4019
4020#if WATCH_EXEC
4021 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4022 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4023#endif
4024
4025 if (args->buffer_count < 1) {
4026 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4027 return -EINVAL;
4028 }
4029
4030 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4031 if (exec2_list == NULL) {
4032 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4033 args->buffer_count);
4034 return -ENOMEM;
4035 }
4036 ret = copy_from_user(exec2_list,
4037 (struct drm_i915_relocation_entry __user *)
4038 (uintptr_t) args->buffers_ptr,
4039 sizeof(*exec2_list) * args->buffer_count);
4040 if (ret != 0) {
4041 DRM_ERROR("copy %d exec entries failed %d\n",
4042 args->buffer_count, ret);
4043 drm_free_large(exec2_list);
4044 return -EFAULT;
4045 }
4046
4047 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4048 if (!ret) {
4049 /* Copy the new buffer offsets back to the user's exec list. */
4050 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4051 (uintptr_t) args->buffers_ptr,
4052 exec2_list,
4053 sizeof(*exec2_list) * args->buffer_count);
4054 if (ret) {
4055 ret = -EFAULT;
4056 DRM_ERROR("failed to copy %d exec entries "
4057 "back to user (%d)\n",
4058 args->buffer_count, ret);
4059 }
4060 }
4061
4062 drm_free_large(exec2_list);
4063 return ret;
4064}
4065
3923int 4066int
3924i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 4067i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3925{ 4068{
@@ -3933,19 +4076,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3933 if (ret) 4076 if (ret)
3934 return ret; 4077 return ret;
3935 } 4078 }
3936 /* 4079
3937 * Pre-965 chips need a fence register set up in order to
3938 * properly handle tiled surfaces.
3939 */
3940 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3941 ret = i915_gem_object_get_fence_reg(obj);
3942 if (ret != 0) {
3943 if (ret != -EBUSY && ret != -ERESTARTSYS)
3944 DRM_ERROR("Failure to install fence: %d\n",
3945 ret);
3946 return ret;
3947 }
3948 }
3949 obj_priv->pin_count++; 4080 obj_priv->pin_count++;
3950 4081
3951 /* If the object is not active and not pending a flush, 4082 /* If the object is not active and not pending a flush,
@@ -4708,7 +4839,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
4708 4839
4709 phys_obj->id = id; 4840 phys_obj->id = id;
4710 4841
4711 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); 4842 phys_obj->handle = drm_pci_alloc(dev, size, 0);
4712 if (!phys_obj->handle) { 4843 if (!phys_obj->handle) {
4713 ret = -ENOMEM; 4844 ret = -ENOMEM;
4714 goto kfree_obj; 4845 goto kfree_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 30d6af6c09bb..df278b2685bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
304 304
305 305
306/** 306/**
307 * Returns the size of the fence for a tiled object of the given size. 307 * Returns whether an object is currently fenceable. If not, it may need
308 * to be unbound and have its pitch adjusted.
308 */ 309 */
309static int 310bool
310i915_get_fence_size(struct drm_device *dev, int size) 311i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
311{ 312{
312 int i; 313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
313 int start;
314 314
315 if (IS_I965G(dev)) { 315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */ 316 /* The 965 can have fences at any page boundary. */
317 return ALIGN(size, 4096); 317 if (obj->size & 4095)
318 return false;
319 return true;
320 } else if (IS_I9XX(dev)) {
321 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
322 return false;
318 } else { 323 } else {
319 /* Align the size to a power of two greater than the smallest 324 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
320 * fence size. 325 return false;
321 */ 326 }
322 if (IS_I9XX(dev))
323 start = 1024 * 1024;
324 else
325 start = 512 * 1024;
326 327
327 for (i = start; i < size; i <<= 1) 328 /* Power of two sized... */
328 ; 329 if (obj->size & (obj->size - 1))
330 return false;
329 331
330 return i; 332 /* Objects must be size aligned as well */
331 } 333 if (obj_priv->gtt_offset & (obj->size - 1))
334 return false;
335 return true;
332} 336}
333 337
334/* Check pitch constriants for all chips & tiling formats */ 338/* Check pitch constriants for all chips & tiling formats */
335static bool 339bool
336i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 340i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
337{ 341{
338 int tile_width; 342 int tile_width;
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
384 if (stride & (stride - 1)) 388 if (stride & (stride - 1))
385 return false; 389 return false;
386 390
387 /* We don't 0handle the aperture area covered by the fence being bigger
388 * than the object size.
389 */
390 if (i915_get_fence_size(dev, size) != size)
391 return false;
392
393 return true; 391 return true;
394} 392}
395 393
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85f4c5de97e2..7cd8110051b6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -313,6 +313,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
313 dev_priv->mm.irq_gem_seqno = seqno; 313 dev_priv->mm.irq_gem_seqno = seqno;
314 trace_i915_gem_request_complete(dev, seqno); 314 trace_i915_gem_request_complete(dev, seqno);
315 DRM_WAKEUP(&dev_priv->irq_queue); 315 DRM_WAKEUP(&dev_priv->irq_queue);
316 dev_priv->hangcheck_count = 0;
317 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
316 } 318 }
317 319
318 if (de_iir & DE_GSE) 320 if (de_iir & DE_GSE)
@@ -1084,6 +1086,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1084 (void) I915_READ(IER); 1086 (void) I915_READ(IER);
1085} 1087}
1086 1088
1089/*
1090 * Must be called after intel_modeset_init or hotplug interrupts won't be
1091 * enabled correctly.
1092 */
1087int i915_driver_irq_postinstall(struct drm_device *dev) 1093int i915_driver_irq_postinstall(struct drm_device *dev)
1088{ 1094{
1089 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1095 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1106,19 +1112,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1106 if (I915_HAS_HOTPLUG(dev)) { 1112 if (I915_HAS_HOTPLUG(dev)) {
1107 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1113 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1108 1114
1109 /* Leave other bits alone */ 1115 /* Note HDMI and DP share bits */
1110 hotplug_en |= HOTPLUG_EN_MASK; 1116 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1117 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1118 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1119 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1120 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1121 hotplug_en |= HDMID_HOTPLUG_INT_EN;
1122 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1123 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1124 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1125 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1126 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
1127 hotplug_en |= CRT_HOTPLUG_INT_EN;
1128 /* Ignore TV since it's buggy */
1129
1111 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1130 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1112 1131
1113 dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
1114 TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
1115 SDVOB_HOTPLUG_INT_STATUS;
1116 if (IS_G4X(dev)) {
1117 dev_priv->hotplug_supported_mask |=
1118 HDMIB_HOTPLUG_INT_STATUS |
1119 HDMIC_HOTPLUG_INT_STATUS |
1120 HDMID_HOTPLUG_INT_STATUS;
1121 }
1122 /* Enable in IER... */ 1132 /* Enable in IER... */
1123 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1133 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1124 /* and unmask in IMR */ 1134 /* and unmask in IMR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 974b3cf70618..149d360d64a3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -879,13 +879,6 @@
879#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 879#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
880#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 880#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
881#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f 881#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
882#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
883 HDMIC_HOTPLUG_INT_EN | \
884 HDMID_HOTPLUG_INT_EN | \
885 SDVOB_HOTPLUG_INT_EN | \
886 SDVOC_HOTPLUG_INT_EN | \
887 CRT_HOTPLUG_INT_EN)
888
889 882
890#define PORT_HOTPLUG_STAT 0x61114 883#define PORT_HOTPLUG_STAT 0x61114
891#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 884#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -982,6 +975,8 @@
982#define LVDS_PORT_EN (1 << 31) 975#define LVDS_PORT_EN (1 << 31)
983/* Selects pipe B for LVDS data. Must be set on pre-965. */ 976/* Selects pipe B for LVDS data. Must be set on pre-965. */
984#define LVDS_PIPEB_SELECT (1 << 30) 977#define LVDS_PIPEB_SELECT (1 << 30)
978/* LVDS dithering flag on 965/g4x platform */
979#define LVDS_ENABLE_DITHER (1 << 25)
985/* Enable border for unscaled (or aspect-scaled) display */ 980/* Enable border for unscaled (or aspect-scaled) display */
986#define LVDS_BORDER_ENABLE (1 << 15) 981#define LVDS_BORDER_ENABLE (1 << 15)
987/* 982/*
@@ -1751,6 +1746,8 @@
1751 1746
1752/* Display & cursor control */ 1747/* Display & cursor control */
1753 1748
1749/* dithering flag on Ironlake */
1750#define PIPE_ENABLE_DITHER (1 << 4)
1754/* Pipe A */ 1751/* Pipe A */
1755#define PIPEADSL 0x70000 1752#define PIPEADSL 0x70000
1756#define PIPEACONF 0x70008 1753#define PIPEACONF 0x70008
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d5ebb00a9d49..a3b90c9561dc 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev)
732 732
733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
734 734
735 /* Render Standby */
736 if (I915_HAS_RC6(dev)) {
737 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
738 dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
739 }
740
741 /* Hardware status page */ 735 /* Hardware status page */
742 dev_priv->saveHWS = I915_READ(HWS_PGA); 736 dev_priv->saveHWS = I915_READ(HWS_PGA);
743 737
@@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev)
793 787
794 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 788 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
795 789
796 /* Render Standby */
797 if (I915_HAS_RC6(dev)) {
798 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
799 I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
800 }
801
802 /* Hardware status page */ 790 /* Hardware status page */
803 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 791 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
804 792
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9f3d3e563414..ddefc871edfe 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -548,4 +548,6 @@ void intel_crt_init(struct drm_device *dev)
548 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 548 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
549 549
550 drm_sysfs_connector_add(connector); 550 drm_sysfs_connector_add(connector);
551
552 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
551} 553}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b006da2..002612fae717 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -262,6 +262,14 @@ struct intel_limit {
262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ 262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
264 264
265#define IRONLAKE_P_DISPLAY_PORT_MIN 10
266#define IRONLAKE_P_DISPLAY_PORT_MAX 20
267#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
268#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
269#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
270#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
271#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
272
265static bool 273static bool
266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 274intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
267 int target, int refclk, intel_clock_t *best_clock); 275 int target, int refclk, intel_clock_t *best_clock);
@@ -271,9 +279,6 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
271static bool 279static bool
272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 280intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
273 int target, int refclk, intel_clock_t *best_clock); 281 int target, int refclk, intel_clock_t *best_clock);
274static bool
275intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
276 int target, int refclk, intel_clock_t *best_clock);
277 282
278static bool 283static bool
279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 284intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -496,7 +501,7 @@ static const intel_limit_t intel_limits_ironlake_sdvo = {
496 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
497 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 502 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
498 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 503 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
499 .find_pll = intel_ironlake_find_best_PLL, 504 .find_pll = intel_g4x_find_best_PLL,
500}; 505};
501 506
502static const intel_limit_t intel_limits_ironlake_lvds = { 507static const intel_limit_t intel_limits_ironlake_lvds = {
@@ -511,7 +516,30 @@ static const intel_limit_t intel_limits_ironlake_lvds = {
511 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 516 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
512 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 517 .p2_slow = IRONLAKE_P2_LVDS_SLOW,
513 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 518 .p2_fast = IRONLAKE_P2_LVDS_FAST },
514 .find_pll = intel_ironlake_find_best_PLL, 519 .find_pll = intel_g4x_find_best_PLL,
520};
521
522static const intel_limit_t intel_limits_ironlake_display_port = {
523 .dot = { .min = IRONLAKE_DOT_MIN,
524 .max = IRONLAKE_DOT_MAX },
525 .vco = { .min = IRONLAKE_VCO_MIN,
526 .max = IRONLAKE_VCO_MAX},
527 .n = { .min = IRONLAKE_N_MIN,
528 .max = IRONLAKE_N_MAX },
529 .m = { .min = IRONLAKE_M_MIN,
530 .max = IRONLAKE_M_MAX },
531 .m1 = { .min = IRONLAKE_M1_MIN,
532 .max = IRONLAKE_M1_MAX },
533 .m2 = { .min = IRONLAKE_M2_MIN,
534 .max = IRONLAKE_M2_MAX },
535 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
536 .max = IRONLAKE_P_DISPLAY_PORT_MAX },
537 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
538 .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
539 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
540 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
541 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
542 .find_pll = intel_find_pll_ironlake_dp,
515}; 543};
516 544
517static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 545static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
@@ -519,6 +547,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
519 const intel_limit_t *limit; 547 const intel_limit_t *limit;
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 548 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
521 limit = &intel_limits_ironlake_lvds; 549 limit = &intel_limits_ironlake_lvds;
550 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
551 HAS_eDP)
552 limit = &intel_limits_ironlake_display_port;
522 else 553 else
523 limit = &intel_limits_ironlake_sdvo; 554 limit = &intel_limits_ironlake_sdvo;
524 555
@@ -791,7 +822,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
791 found = false; 822 found = false;
792 823
793 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 824 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
794 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 825 int lvds_reg;
826
827 if (IS_IRONLAKE(dev))
828 lvds_reg = PCH_LVDS;
829 else
830 lvds_reg = LVDS;
831 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
795 LVDS_CLKB_POWER_UP) 832 LVDS_CLKB_POWER_UP)
796 clock.p2 = limit->p2.p2_fast; 833 clock.p2 = limit->p2.p2_fast;
797 else 834 else
@@ -839,6 +876,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
839{ 876{
840 struct drm_device *dev = crtc->dev; 877 struct drm_device *dev = crtc->dev;
841 intel_clock_t clock; 878 intel_clock_t clock;
879
880 /* return directly when it is eDP */
881 if (HAS_eDP)
882 return true;
883
842 if (target < 200000) { 884 if (target < 200000) {
843 clock.n = 1; 885 clock.n = 1;
844 clock.p1 = 2; 886 clock.p1 = 2;
@@ -857,68 +899,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
857 return true; 899 return true;
858} 900}
859 901
860static bool
861intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
862 int target, int refclk, intel_clock_t *best_clock)
863{
864 struct drm_device *dev = crtc->dev;
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 intel_clock_t clock;
867 int err_most = 47;
868 int err_min = 10000;
869
870 /* eDP has only 2 clock choice, no n/m/p setting */
871 if (HAS_eDP)
872 return true;
873
874 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
875 return intel_find_pll_ironlake_dp(limit, crtc, target,
876 refclk, best_clock);
877
878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
879 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
880 LVDS_CLKB_POWER_UP)
881 clock.p2 = limit->p2.p2_fast;
882 else
883 clock.p2 = limit->p2.p2_slow;
884 } else {
885 if (target < limit->p2.dot_limit)
886 clock.p2 = limit->p2.p2_slow;
887 else
888 clock.p2 = limit->p2.p2_fast;
889 }
890
891 memset(best_clock, 0, sizeof(*best_clock));
892 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
893 /* based on hardware requriment prefer smaller n to precision */
894 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
895 /* based on hardware requirment prefere larger m1,m2 */
896 for (clock.m1 = limit->m1.max;
897 clock.m1 >= limit->m1.min; clock.m1--) {
898 for (clock.m2 = limit->m2.max;
899 clock.m2 >= limit->m2.min; clock.m2--) {
900 int this_err;
901
902 intel_clock(dev, refclk, &clock);
903 if (!intel_PLL_is_valid(crtc, &clock))
904 continue;
905 this_err = abs((10000 - (target*10000/clock.dot)));
906 if (this_err < err_most) {
907 *best_clock = clock;
908 /* found on first matching */
909 goto out;
910 } else if (this_err < err_min) {
911 *best_clock = clock;
912 err_min = this_err;
913 }
914 }
915 }
916 }
917 }
918out:
919 return true;
920}
921
922/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 902/* DisplayPort has only two frequencies, 162MHz and 270MHz */
923static bool 903static bool
924intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 904intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -1493,6 +1473,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1493 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1473 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1494 u32 temp; 1474 u32 temp;
1495 int tries = 5, j, n; 1475 int tries = 5, j, n;
1476 u32 pipe_bpc;
1477
1478 temp = I915_READ(pipeconf_reg);
1479 pipe_bpc = temp & PIPE_BPC_MASK;
1496 1480
1497 /* XXX: When our outputs are all unaware of DPMS modes other than off 1481 /* XXX: When our outputs are all unaware of DPMS modes other than off
1498 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1482 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1508,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1524 1508
1525 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1509 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1526 temp = I915_READ(fdi_rx_reg); 1510 temp = I915_READ(fdi_rx_reg);
1511 /*
1512 * make the BPC in FDI Rx be consistent with that in
1513 * pipeconf reg.
1514 */
1515 temp &= ~(0x7 << 16);
1516 temp |= (pipe_bpc << 11);
1527 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1517 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
1528 FDI_SEL_PCDCLK | 1518 FDI_SEL_PCDCLK |
1529 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ 1519 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1656,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1666 1656
1667 /* enable PCH transcoder */ 1657 /* enable PCH transcoder */
1668 temp = I915_READ(transconf_reg); 1658 temp = I915_READ(transconf_reg);
1659 /*
1660 * make the BPC in transcoder be consistent with
1661 * that in pipeconf reg.
1662 */
1663 temp &= ~PIPE_BPC_MASK;
1664 temp |= pipe_bpc;
1669 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1665 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1670 I915_READ(transconf_reg); 1666 I915_READ(transconf_reg);
1671 1667
@@ -1745,6 +1741,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1745 I915_READ(fdi_tx_reg); 1741 I915_READ(fdi_tx_reg);
1746 1742
1747 temp = I915_READ(fdi_rx_reg); 1743 temp = I915_READ(fdi_rx_reg);
1744 /* BPC in FDI rx is consistent with that in pipeconf */
1745 temp &= ~(0x07 << 16);
1746 temp |= (pipe_bpc << 11);
1748 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1747 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1749 I915_READ(fdi_rx_reg); 1748 I915_READ(fdi_rx_reg);
1750 1749
@@ -1789,7 +1788,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1789 } 1788 }
1790 } 1789 }
1791 } 1790 }
1792 1791 temp = I915_READ(transconf_reg);
1792 /* BPC in transcoder is consistent with that in pipeconf */
1793 temp &= ~PIPE_BPC_MASK;
1794 temp |= pipe_bpc;
1795 I915_WRITE(transconf_reg, temp);
1796 I915_READ(transconf_reg);
1793 udelay(100); 1797 udelay(100);
1794 1798
1795 /* disable PCH DPLL */ 1799 /* disable PCH DPLL */
@@ -2448,7 +2452,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2448 * A value of 5us seems to be a good balance; safe for very low end 2452 * A value of 5us seems to be a good balance; safe for very low end
2449 * platforms but not overly aggressive on lower latency configs. 2453 * platforms but not overly aggressive on lower latency configs.
2450 */ 2454 */
2451const static int latency_ns = 5000; 2455static const int latency_ns = 5000;
2452 2456
2453static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2457static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2454{ 2458{
@@ -2559,7 +2563,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2559 /* Calc sr entries for one plane configs */ 2563 /* Calc sr entries for one plane configs */
2560 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2564 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2561 /* self-refresh has much higher latency */ 2565 /* self-refresh has much higher latency */
2562 const static int sr_latency_ns = 12000; 2566 static const int sr_latency_ns = 12000;
2563 2567
2564 sr_clock = planea_clock ? planea_clock : planeb_clock; 2568 sr_clock = planea_clock ? planea_clock : planeb_clock;
2565 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2569 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2598,7 +2602,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2598 /* Calc sr entries for one plane configs */ 2602 /* Calc sr entries for one plane configs */
2599 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2603 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2600 /* self-refresh has much higher latency */ 2604 /* self-refresh has much higher latency */
2601 const static int sr_latency_ns = 12000; 2605 static const int sr_latency_ns = 12000;
2602 2606
2603 sr_clock = planea_clock ? planea_clock : planeb_clock; 2607 sr_clock = planea_clock ? planea_clock : planeb_clock;
2604 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2608 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2667,7 +2671,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2667 if (HAS_FW_BLC(dev) && sr_hdisplay && 2671 if (HAS_FW_BLC(dev) && sr_hdisplay &&
2668 (!planea_clock || !planeb_clock)) { 2672 (!planea_clock || !planeb_clock)) {
2669 /* self-refresh has much higher latency */ 2673 /* self-refresh has much higher latency */
2670 const static int sr_latency_ns = 6000; 2674 static const int sr_latency_ns = 6000;
2671 2675
2672 sr_clock = planea_clock ? planea_clock : planeb_clock; 2676 sr_clock = planea_clock ? planea_clock : planeb_clock;
2673 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 2677 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2969,6 +2973,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2969 2973
2970 /* determine panel color depth */ 2974 /* determine panel color depth */
2971 temp = I915_READ(pipeconf_reg); 2975 temp = I915_READ(pipeconf_reg);
2976 temp &= ~PIPE_BPC_MASK;
2977 if (is_lvds) {
2978 int lvds_reg = I915_READ(PCH_LVDS);
2979 /* the BPC will be 6 if it is 18-bit LVDS panel */
2980 if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
2981 temp |= PIPE_8BPC;
2982 else
2983 temp |= PIPE_6BPC;
2984 } else
2985 temp |= PIPE_8BPC;
2986 I915_WRITE(pipeconf_reg, temp);
2987 I915_READ(pipeconf_reg);
2972 2988
2973 switch (temp & PIPE_BPC_MASK) { 2989 switch (temp & PIPE_BPC_MASK) {
2974 case PIPE_8BPC: 2990 case PIPE_8BPC:
@@ -3195,7 +3211,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3195 * appropriately here, but we need to look more thoroughly into how 3211 * appropriately here, but we need to look more thoroughly into how
3196 * panels behave in the two modes. 3212 * panels behave in the two modes.
3197 */ 3213 */
3198 3214 /* set the dithering flag */
3215 if (IS_I965G(dev)) {
3216 if (dev_priv->lvds_dither) {
3217 if (IS_IRONLAKE(dev))
3218 pipeconf |= PIPE_ENABLE_DITHER;
3219 else
3220 lvds |= LVDS_ENABLE_DITHER;
3221 } else {
3222 if (IS_IRONLAKE(dev))
3223 pipeconf &= ~PIPE_ENABLE_DITHER;
3224 else
3225 lvds &= ~LVDS_ENABLE_DITHER;
3226 }
3227 }
3199 I915_WRITE(lvds_reg, lvds); 3228 I915_WRITE(lvds_reg, lvds);
3200 I915_READ(lvds_reg); 3229 I915_READ(lvds_reg);
3201 } 3230 }
@@ -3385,7 +3414,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3385 3414
3386 /* we only need to pin inside GTT if cursor is non-phy */ 3415 /* we only need to pin inside GTT if cursor is non-phy */
3387 mutex_lock(&dev->struct_mutex); 3416 mutex_lock(&dev->struct_mutex);
3388 if (!dev_priv->cursor_needs_physical) { 3417 if (!dev_priv->info->cursor_needs_physical) {
3389 ret = i915_gem_object_pin(bo, PAGE_SIZE); 3418 ret = i915_gem_object_pin(bo, PAGE_SIZE);
3390 if (ret) { 3419 if (ret) {
3391 DRM_ERROR("failed to pin cursor bo\n"); 3420 DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3449,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3420 I915_WRITE(base, addr); 3449 I915_WRITE(base, addr);
3421 3450
3422 if (intel_crtc->cursor_bo) { 3451 if (intel_crtc->cursor_bo) {
3423 if (dev_priv->cursor_needs_physical) { 3452 if (dev_priv->info->cursor_needs_physical) {
3424 if (intel_crtc->cursor_bo != bo) 3453 if (intel_crtc->cursor_bo != bo)
3425 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 3454 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
3426 } else 3455 } else
@@ -3779,125 +3808,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
3779 queue_work(dev_priv->wq, &dev_priv->idle_work); 3808 queue_work(dev_priv->wq, &dev_priv->idle_work);
3780} 3809}
3781 3810
3782void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3783{
3784 drm_i915_private_t *dev_priv = dev->dev_private;
3785
3786 if (IS_IRONLAKE(dev))
3787 return;
3788
3789 if (!dev_priv->render_reclock_avail) {
3790 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3791 return;
3792 }
3793
3794 /* Restore render clock frequency to original value */
3795 if (IS_G4X(dev) || IS_I9XX(dev))
3796 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3797 else if (IS_I85X(dev))
3798 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3799 DRM_DEBUG_DRIVER("increasing render clock frequency\n");
3800
3801 /* Schedule downclock */
3802 if (schedule)
3803 mod_timer(&dev_priv->idle_timer, jiffies +
3804 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
3805}
3806
3807void intel_decrease_renderclock(struct drm_device *dev)
3808{
3809 drm_i915_private_t *dev_priv = dev->dev_private;
3810
3811 if (IS_IRONLAKE(dev))
3812 return;
3813
3814 if (!dev_priv->render_reclock_avail) {
3815 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3816 return;
3817 }
3818
3819 if (IS_G4X(dev)) {
3820 u16 gcfgc;
3821
3822 /* Adjust render clock... */
3823 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3824
3825 /* Down to minimum... */
3826 gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
3827 gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
3828
3829 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3830 } else if (IS_I965G(dev)) {
3831 u16 gcfgc;
3832
3833 /* Adjust render clock... */
3834 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3835
3836 /* Down to minimum... */
3837 gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
3838 gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
3839
3840 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3841 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
3842 u16 gcfgc;
3843
3844 /* Adjust render clock... */
3845 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3846
3847 /* Down to minimum... */
3848 gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
3849 gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
3850
3851 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3852 } else if (IS_I915G(dev)) {
3853 u16 gcfgc;
3854
3855 /* Adjust render clock... */
3856 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3857
3858 /* Down to minimum... */
3859 gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
3860 gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
3861
3862 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3863 } else if (IS_I85X(dev)) {
3864 u16 hpllcc;
3865
3866 /* Adjust render clock... */
3867 pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
3868
3869 /* Up to maximum... */
3870 hpllcc &= ~GC_CLOCK_CONTROL_MASK;
3871 hpllcc |= GC_CLOCK_133_200;
3872
3873 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3874 }
3875 DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
3876}
3877
3878/* Note that no increase function is needed for this - increase_renderclock()
3879 * will also rewrite these bits
3880 */
3881void intel_decrease_displayclock(struct drm_device *dev)
3882{
3883 if (IS_IRONLAKE(dev))
3884 return;
3885
3886 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
3887 IS_I915GM(dev)) {
3888 u16 gcfgc;
3889
3890 /* Adjust render clock... */
3891 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3892
3893 /* Down to minimum... */
3894 gcfgc &= ~0xf0;
3895 gcfgc |= 0x80;
3896
3897 pci_write_config_word(dev->pdev, GCFGC, gcfgc);
3898 }
3899}
3900
3901#define CRTC_IDLE_TIMEOUT 1000 /* ms */ 3811#define CRTC_IDLE_TIMEOUT 1000 /* ms */
3902 3812
3903static void intel_crtc_idle_timer(unsigned long arg) 3813static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +3921,6 @@ static void intel_idle_update(struct work_struct *work)
4011 3921
4012 mutex_lock(&dev->struct_mutex); 3922 mutex_lock(&dev->struct_mutex);
4013 3923
4014 /* GPU isn't processing, downclock it. */
4015 if (!dev_priv->busy) {
4016 intel_decrease_renderclock(dev);
4017 intel_decrease_displayclock(dev);
4018 }
4019
4020 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3924 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4021 /* Skip inactive CRTCs */ 3925 /* Skip inactive CRTCs */
4022 if (!crtc->fb) 3926 if (!crtc->fb)
@@ -4050,13 +3954,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4050 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3954 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4051 return; 3955 return;
4052 3956
4053 if (!dev_priv->busy) { 3957 if (!dev_priv->busy)
4054 dev_priv->busy = true; 3958 dev_priv->busy = true;
4055 intel_increase_renderclock(dev, true); 3959 else
4056 } else {
4057 mod_timer(&dev_priv->idle_timer, jiffies + 3960 mod_timer(&dev_priv->idle_timer, jiffies +
4058 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 3961 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4059 }
4060 3962
4061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3963 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4062 if (!crtc->fb) 3964 if (!crtc->fb)
@@ -4400,29 +4302,43 @@ static void intel_setup_outputs(struct drm_device *dev)
4400 bool found = false; 4302 bool found = false;
4401 4303
4402 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4304 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4305 DRM_DEBUG_KMS("probing SDVOB\n");
4403 found = intel_sdvo_init(dev, SDVOB); 4306 found = intel_sdvo_init(dev, SDVOB);
4404 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 4307 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
4308 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
4405 intel_hdmi_init(dev, SDVOB); 4309 intel_hdmi_init(dev, SDVOB);
4310 }
4406 4311
4407 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 4312 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
4313 DRM_DEBUG_KMS("probing DP_B\n");
4408 intel_dp_init(dev, DP_B); 4314 intel_dp_init(dev, DP_B);
4315 }
4409 } 4316 }
4410 4317
4411 /* Before G4X SDVOC doesn't have its own detect register */ 4318 /* Before G4X SDVOC doesn't have its own detect register */
4412 4319
4413 if (I915_READ(SDVOB) & SDVO_DETECTED) 4320 if (I915_READ(SDVOB) & SDVO_DETECTED) {
4321 DRM_DEBUG_KMS("probing SDVOC\n");
4414 found = intel_sdvo_init(dev, SDVOC); 4322 found = intel_sdvo_init(dev, SDVOC);
4323 }
4415 4324
4416 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 4325 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
4417 4326
4418 if (SUPPORTS_INTEGRATED_HDMI(dev)) 4327 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
4328 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
4419 intel_hdmi_init(dev, SDVOC); 4329 intel_hdmi_init(dev, SDVOC);
4420 if (SUPPORTS_INTEGRATED_DP(dev)) 4330 }
4331 if (SUPPORTS_INTEGRATED_DP(dev)) {
4332 DRM_DEBUG_KMS("probing DP_C\n");
4421 intel_dp_init(dev, DP_C); 4333 intel_dp_init(dev, DP_C);
4334 }
4422 } 4335 }
4423 4336
4424 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4337 if (SUPPORTS_INTEGRATED_DP(dev) &&
4338 (I915_READ(DP_D) & DP_DETECTED)) {
4339 DRM_DEBUG_KMS("probing DP_D\n");
4425 intel_dp_init(dev, DP_D); 4340 intel_dp_init(dev, DP_D);
4341 }
4426 } else if (IS_I8XX(dev)) 4342 } else if (IS_I8XX(dev))
4427 intel_dvo_init(dev); 4343 intel_dvo_init(dev);
4428 4344
@@ -4527,6 +4443,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
4527 .fb_changed = intelfb_probe, 4443 .fb_changed = intelfb_probe,
4528}; 4444};
4529 4445
4446static struct drm_gem_object *
4447intel_alloc_power_context(struct drm_device *dev)
4448{
4449 struct drm_gem_object *pwrctx;
4450 int ret;
4451
4452 pwrctx = drm_gem_object_alloc(dev, 4096);
4453 if (!pwrctx) {
4454 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4455 return NULL;
4456 }
4457
4458 mutex_lock(&dev->struct_mutex);
4459 ret = i915_gem_object_pin(pwrctx, 4096);
4460 if (ret) {
4461 DRM_ERROR("failed to pin power context: %d\n", ret);
4462 goto err_unref;
4463 }
4464
4465 ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4466 if (ret) {
4467 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
4468 goto err_unpin;
4469 }
4470 mutex_unlock(&dev->struct_mutex);
4471
4472 return pwrctx;
4473
4474err_unpin:
4475 i915_gem_object_unpin(pwrctx);
4476err_unref:
4477 drm_gem_object_unreference(pwrctx);
4478 mutex_unlock(&dev->struct_mutex);
4479 return NULL;
4480}
4481
4530void intel_init_clock_gating(struct drm_device *dev) 4482void intel_init_clock_gating(struct drm_device *dev)
4531{ 4483{
4532 struct drm_i915_private *dev_priv = dev->dev_private; 4484 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4531,27 @@ void intel_init_clock_gating(struct drm_device *dev)
4579 * GPU can automatically power down the render unit if given a page 4531 * GPU can automatically power down the render unit if given a page
4580 * to save state. 4532 * to save state.
4581 */ 4533 */
4582 if (I915_HAS_RC6(dev)) { 4534 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
4583 struct drm_gem_object *pwrctx; 4535 struct drm_i915_gem_object *obj_priv = NULL;
4584 struct drm_i915_gem_object *obj_priv;
4585 int ret;
4586 4536
4587 if (dev_priv->pwrctx) { 4537 if (dev_priv->pwrctx) {
4588 obj_priv = dev_priv->pwrctx->driver_private; 4538 obj_priv = dev_priv->pwrctx->driver_private;
4589 } else { 4539 } else {
4590 pwrctx = drm_gem_object_alloc(dev, 4096); 4540 struct drm_gem_object *pwrctx;
4591 if (!pwrctx) {
4592 DRM_DEBUG("failed to alloc power context, "
4593 "RC6 disabled\n");
4594 goto out;
4595 }
4596 4541
4597 ret = i915_gem_object_pin(pwrctx, 4096); 4542 pwrctx = intel_alloc_power_context(dev);
4598 if (ret) { 4543 if (pwrctx) {
4599 DRM_ERROR("failed to pin power context: %d\n", 4544 dev_priv->pwrctx = pwrctx;
4600 ret); 4545 obj_priv = pwrctx->driver_private;
4601 drm_gem_object_unreference(pwrctx);
4602 goto out;
4603 } 4546 }
4604
4605 i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4606
4607 dev_priv->pwrctx = pwrctx;
4608 obj_priv = pwrctx->driver_private;
4609 } 4547 }
4610 4548
4611 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); 4549 if (obj_priv) {
4612 I915_WRITE(MCHBAR_RENDER_STANDBY, 4550 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4613 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 4551 I915_WRITE(MCHBAR_RENDER_STANDBY,
4552 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
4553 }
4614 } 4554 }
4615
4616out:
4617 return;
4618} 4555}
4619 4556
4620/* Set up chip specific display functions */ 4557/* Set up chip specific display functions */
@@ -4770,7 +4707,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
4770 del_timer_sync(&intel_crtc->idle_timer); 4707 del_timer_sync(&intel_crtc->idle_timer);
4771 } 4708 }
4772 4709
4773 intel_increase_renderclock(dev, false);
4774 del_timer_sync(&dev_priv->idle_timer); 4710 del_timer_sync(&dev_priv->idle_timer);
4775 4711
4776 if (dev_priv->display.disable_fbc) 4712 if (dev_priv->display.disable_fbc)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4e7aa8b7b938..1349d9fd01c4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1402,14 +1402,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1402 break; 1402 break;
1403 case DP_B: 1403 case DP_B:
1404 case PCH_DP_B: 1404 case PCH_DP_B:
1405 dev_priv->hotplug_supported_mask |=
1406 HDMIB_HOTPLUG_INT_STATUS;
1405 name = "DPDDC-B"; 1407 name = "DPDDC-B";
1406 break; 1408 break;
1407 case DP_C: 1409 case DP_C:
1408 case PCH_DP_C: 1410 case PCH_DP_C:
1411 dev_priv->hotplug_supported_mask |=
1412 HDMIC_HOTPLUG_INT_STATUS;
1409 name = "DPDDC-C"; 1413 name = "DPDDC-C";
1410 break; 1414 break;
1411 case DP_D: 1415 case DP_D:
1412 case PCH_DP_D: 1416 case PCH_DP_D:
1417 dev_priv->hotplug_supported_mask |=
1418 HDMID_HOTPLUG_INT_STATUS;
1413 name = "DPDDC-D"; 1419 name = "DPDDC-D";
1414 break; 1420 break;
1415 } 1421 }
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f04dbbe7d400..06431941b233 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -303,21 +303,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
303 if (sdvox_reg == SDVOB) { 303 if (sdvox_reg == SDVOB) {
304 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 304 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
305 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 305 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
306 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
306 } else if (sdvox_reg == SDVOC) { 307 } else if (sdvox_reg == SDVOC) {
307 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 308 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
308 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 309 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
310 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
309 } else if (sdvox_reg == HDMIB) { 311 } else if (sdvox_reg == HDMIB) {
310 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 312 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
311 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 313 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
312 "HDMIB"); 314 "HDMIB");
315 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
313 } else if (sdvox_reg == HDMIC) { 316 } else if (sdvox_reg == HDMIC) {
314 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 317 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
315 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 318 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
316 "HDMIC"); 319 "HDMIC");
320 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
317 } else if (sdvox_reg == HDMID) { 321 } else if (sdvox_reg == HDMID) {
318 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 322 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
319 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 323 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
320 "HDMID"); 324 "HDMID");
325 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
321 } 326 }
322 if (!intel_output->ddc_bus) 327 if (!intel_output->ddc_bus)
323 goto err_connector; 328 goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3118ce274e67..f4b4aa242df1 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -608,6 +608,13 @@ static const struct dmi_system_id bad_lid_status[] = {
608 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), 608 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
609 }, 609 },
610 }, 610 },
611 {
612 .ident = "PC-81005",
613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
615 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
616 },
617 },
611 { } 618 { }
612}; 619};
613 620
@@ -679,7 +686,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
679 struct drm_i915_private *dev_priv = 686 struct drm_i915_private *dev_priv =
680 container_of(nb, struct drm_i915_private, lid_notifier); 687 container_of(nb, struct drm_i915_private, lid_notifier);
681 struct drm_device *dev = dev_priv->dev; 688 struct drm_device *dev = dev_priv->dev;
689 struct drm_connector *connector = dev_priv->int_lvds_connector;
682 690
691 /*
692 * check and update the status of LVDS connector after receiving
693 * the LID nofication event.
694 */
695 if (connector)
696 connector->status = connector->funcs->detect(connector);
683 if (!acpi_lid_open()) { 697 if (!acpi_lid_open()) {
684 dev_priv->modeset_on_lid = 1; 698 dev_priv->modeset_on_lid = 1;
685 return NOTIFY_OK; 699 return NOTIFY_OK;
@@ -854,65 +868,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
854 { } /* terminating entry */ 868 { } /* terminating entry */
855}; 869};
856 870
857#ifdef CONFIG_ACPI
858/*
859 * check_lid_device -- check whether @handle is an ACPI LID device.
860 * @handle: ACPI device handle
861 * @level : depth in the ACPI namespace tree
862 * @context: the number of LID device when we find the device
863 * @rv: a return value to fill if desired (Not use)
864 */
865static acpi_status
866check_lid_device(acpi_handle handle, u32 level, void *context,
867 void **return_value)
868{
869 struct acpi_device *acpi_dev;
870 int *lid_present = context;
871
872 acpi_dev = NULL;
873 /* Get the acpi device for device handle */
874 if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
875 /* If there is no ACPI device for handle, return */
876 return AE_OK;
877 }
878
879 if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
880 *lid_present = 1;
881
882 return AE_OK;
883}
884
885/**
886 * check whether there exists the ACPI LID device by enumerating the ACPI
887 * device tree.
888 */
889static int intel_lid_present(void)
890{
891 int lid_present = 0;
892
893 if (acpi_disabled) {
894 /* If ACPI is disabled, there is no ACPI device tree to
895 * check, so assume the LID device would have been present.
896 */
897 return 1;
898 }
899
900 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
901 ACPI_UINT32_MAX,
902 check_lid_device, NULL, &lid_present, NULL);
903
904 return lid_present;
905}
906#else
907static int intel_lid_present(void)
908{
909 /* In the absence of ACPI built in, assume that the LID device would
910 * have been present.
911 */
912 return 1;
913}
914#endif
915
916/** 871/**
917 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID 872 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
918 * @dev: drm device 873 * @dev: drm device
@@ -1031,12 +986,8 @@ void intel_lvds_init(struct drm_device *dev)
1031 if (dmi_check_system(intel_no_lvds)) 986 if (dmi_check_system(intel_no_lvds))
1032 return; 987 return;
1033 988
1034 /* 989 if (!lvds_is_present_in_vbt(dev)) {
1035 * Assume LVDS is present if there's an ACPI lid device or if the 990 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
1036 * device is present in the VBT.
1037 */
1038 if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
1039 DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
1040 return; 991 return;
1041 } 992 }
1042 993
@@ -1180,6 +1131,8 @@ out:
1180 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1131 DRM_DEBUG_KMS("lid notifier registration failed\n");
1181 dev_priv->lid_notifier.notifier_call = NULL; 1132 dev_priv->lid_notifier.notifier_call = NULL;
1182 } 1133 }
1134 /* keep the LVDS connector */
1135 dev_priv->int_lvds_connector = connector;
1183 drm_sysfs_connector_add(connector); 1136 drm_sysfs_connector_add(connector);
1184 return; 1137 return;
1185 1138
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 24a3dc99716c..de5144c8c153 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2662,6 +2662,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2662 2662
2663bool intel_sdvo_init(struct drm_device *dev, int output_device) 2663bool intel_sdvo_init(struct drm_device *dev, int output_device)
2664{ 2664{
2665 struct drm_i915_private *dev_priv = dev->dev_private;
2665 struct drm_connector *connector; 2666 struct drm_connector *connector;
2666 struct intel_output *intel_output; 2667 struct intel_output *intel_output;
2667 struct intel_sdvo_priv *sdvo_priv; 2668 struct intel_sdvo_priv *sdvo_priv;
@@ -2708,10 +2709,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2708 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2709 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
2709 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2710 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2710 "SDVOB/VGA DDC BUS"); 2711 "SDVOB/VGA DDC BUS");
2712 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2711 } else { 2713 } else {
2712 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2714 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
2713 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2715 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2714 "SDVOC/VGA DDC BUS"); 2716 "SDVOC/VGA DDC BUS");
2717 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2715 } 2718 }
2716 2719
2717 if (intel_output->ddc_bus == NULL) 2720 if (intel_output->ddc_bus == NULL)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 552ec110b741..1d5b9b7b033f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1840,6 +1840,8 @@ intel_tv_init(struct drm_device *dev)
1840 drm_connector_attach_property(connector, 1840 drm_connector_attach_property(connector,
1841 dev->mode_config.tv_bottom_margin_property, 1841 dev->mode_config.tv_bottom_margin_property,
1842 tv_priv->margin[TV_MARGIN_BOTTOM]); 1842 tv_priv->margin[TV_MARGIN_BOTTOM]);
1843
1844 dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
1843out: 1845out:
1844 drm_sysfs_connector_add(connector); 1846 drm_sysfs_connector_add(connector);
1845} 1847}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 321044bef71c..41dd8ebff219 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
114 i2c.i2c_id = gpio->sucI2cId.ucAccess; 114 i2c.i2c_id = gpio->sucI2cId.ucAccess;
115 115
116 i2c.valid = true; 116 i2c.valid = true;
117 break;
117 } 118 }
118 } 119 }
119 120
@@ -1026,6 +1027,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1026 ss->delay = ss_info->asSS_Info[i].ucSS_Delay; 1027 ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
1027 ss->range = ss_info->asSS_Info[i].ucSS_Range; 1028 ss->range = ss_info->asSS_Info[i].ucSS_Range;
1028 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; 1029 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
1030 break;
1029 } 1031 }
1030 } 1032 }
1031 } 1033 }
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index fd94dbca33ac..58f342659cc7 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -595,6 +595,34 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
595 return false; 595 return false;
596} 596}
597 597
598static const uint32_t default_primarydac_adj[CHIP_LAST] = {
599 0x00000808, /* r100 */
600 0x00000808, /* rv100 */
601 0x00000808, /* rs100 */
602 0x00000808, /* rv200 */
603 0x00000808, /* rs200 */
604 0x00000808, /* r200 */
605 0x00000808, /* rv250 */
606 0x00000000, /* rs300 */
607 0x00000808, /* rv280 */
608 0x00000808, /* r300 */
609 0x00000808, /* r350 */
610 0x00000808, /* rv350 */
611 0x00000808, /* rv380 */
612 0x00000808, /* r420 */
613 0x00000808, /* r423 */
614 0x00000808, /* rv410 */
615 0x00000000, /* rs400 */
616 0x00000000, /* rs480 */
617};
618
619static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
620 struct radeon_encoder_primary_dac *p_dac)
621{
622 p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
623 return;
624}
625
598struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct 626struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
599 radeon_encoder 627 radeon_encoder
600 *encoder) 628 *encoder)
@@ -604,20 +632,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
604 uint16_t dac_info; 632 uint16_t dac_info;
605 uint8_t rev, bg, dac; 633 uint8_t rev, bg, dac;
606 struct radeon_encoder_primary_dac *p_dac = NULL; 634 struct radeon_encoder_primary_dac *p_dac = NULL;
635 int found = 0;
607 636
608 if (rdev->bios == NULL) 637 p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
638 GFP_KERNEL);
639
640 if (!p_dac)
609 return NULL; 641 return NULL;
610 642
643 if (rdev->bios == NULL)
644 goto out;
645
611 /* check CRT table */ 646 /* check CRT table */
612 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); 647 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
613 if (dac_info) { 648 if (dac_info) {
614 p_dac =
615 kzalloc(sizeof(struct radeon_encoder_primary_dac),
616 GFP_KERNEL);
617
618 if (!p_dac)
619 return NULL;
620
621 rev = RBIOS8(dac_info) & 0x3; 649 rev = RBIOS8(dac_info) & 0x3;
622 if (rev < 2) { 650 if (rev < 2) {
623 bg = RBIOS8(dac_info + 0x2) & 0xf; 651 bg = RBIOS8(dac_info + 0x2) & 0xf;
@@ -628,9 +656,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
628 dac = RBIOS8(dac_info + 0x3) & 0xf; 656 dac = RBIOS8(dac_info + 0x3) & 0xf;
629 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 657 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
630 } 658 }
631 659 found = 1;
632 } 660 }
633 661
662out:
663 if (!found) /* fallback to defaults */
664 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
665
634 return p_dac; 666 return p_dac;
635} 667}
636 668
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 20161567dbff..b82ae61d4d17 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -615,7 +615,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
615 ret = connector_status_connected; 615 ret = connector_status_connected;
616 } 616 }
617 } else { 617 } else {
618 if (radeon_connector->dac_load_detect) { 618 if (radeon_connector->dac_load_detect && encoder) {
619 encoder_funcs = encoder->helper_private; 619 encoder_funcs = encoder->helper_private;
620 ret = encoder_funcs->detect(encoder, connector); 620 ret = encoder_funcs->detect(encoder, connector);
621 } 621 }
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 0b2f9c2ad2c1..06123ba31d31 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2145,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2145 &master_priv->sarea); 2145 &master_priv->sarea);
2146 if (ret) { 2146 if (ret) {
2147 DRM_ERROR("SAREA setup failed\n"); 2147 DRM_ERROR("SAREA setup failed\n");
2148 kfree(master_priv);
2148 return ret; 2149 return ret;
2149 } 2150 }
2150 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); 2151 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7c6848096bcd..0c51f8e46613 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -733,16 +733,18 @@ void radeon_device_fini(struct radeon_device *rdev)
733 */ 733 */
734int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 734int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
735{ 735{
736 struct radeon_device *rdev = dev->dev_private; 736 struct radeon_device *rdev;
737 struct drm_crtc *crtc; 737 struct drm_crtc *crtc;
738 int r; 738 int r;
739 739
740 if (dev == NULL || rdev == NULL) { 740 if (dev == NULL || dev->dev_private == NULL) {
741 return -ENODEV; 741 return -ENODEV;
742 } 742 }
743 if (state.event == PM_EVENT_PRETHAW) { 743 if (state.event == PM_EVENT_PRETHAW) {
744 return 0; 744 return 0;
745 } 745 }
746 rdev = dev->dev_private;
747
746 /* unpin the front buffers */ 748 /* unpin the front buffers */
747 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 749 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
748 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 750 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 91d72b70abc9..1fb2f029d7e8 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -329,8 +329,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
329 ret = radeon_get_atom_connector_info_from_object_table(dev); 329 ret = radeon_get_atom_connector_info_from_object_table(dev);
330 else 330 else
331 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); 331 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
332 } else 332 } else {
333 ret = radeon_get_legacy_connector_info_from_bios(dev); 333 ret = radeon_get_legacy_connector_info_from_bios(dev);
334 if (ret == false)
335 ret = radeon_get_legacy_connector_info_from_table(dev);
336 }
334 } else { 337 } else {
335 if (!ASIC_IS_AVIVO(rdev)) 338 if (!ASIC_IS_AVIVO(rdev))
336 ret = radeon_get_legacy_connector_info_from_table(dev); 339 ret = radeon_get_legacy_connector_info_from_table(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4cdd8b4f7549..8495d4e32e18 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
140 140
141bool radeon_fence_signaled(struct radeon_fence *fence) 141bool radeon_fence_signaled(struct radeon_fence *fence)
142{ 142{
143 struct radeon_device *rdev = fence->rdev;
144 unsigned long irq_flags; 143 unsigned long irq_flags;
145 bool signaled = false; 144 bool signaled = false;
146 145
147 if (rdev->gpu_lockup) { 146 if (!fence)
148 return true; 147 return true;
149 } 148
150 if (fence == NULL) { 149 if (fence->rdev->gpu_lockup)
151 return true; 150 return true;
152 } 151
153 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 152 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
154 signaled = fence->signaled; 153 signaled = fence->signaled;
155 /* if we are shuting down report all fence as signaled */ 154 /* if we are shuting down report all fence as signaled */
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index b79ecc4a7cc4..2f349a300195 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
289 drm_radeon_irq_emit_t *emit = data; 289 drm_radeon_irq_emit_t *emit = data;
290 int result; 290 int result;
291 291
292 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
293 return -EINVAL;
294
295 LOCK_TEST_WITH_RETURN(dev, file_priv);
296
297 if (!dev_priv) { 292 if (!dev_priv) {
298 DRM_ERROR("called with no initialization\n"); 293 DRM_ERROR("called with no initialization\n");
299 return -EINVAL; 294 return -EINVAL;
300 } 295 }
301 296
297 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
298 return -EINVAL;
299
300 LOCK_TEST_WITH_RETURN(dev, file_priv);
301
302 result = radeon_emit_irq(dev); 302 result = radeon_emit_irq(dev);
303 303
304 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 304 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4f8ea4260572..4245218e954f 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -396,7 +396,7 @@ int rs600_irq_process(struct radeon_device *rdev)
396 } 396 }
397 while (status || r500_disp_int) { 397 while (status || r500_disp_int) {
398 /* SW interrupt */ 398 /* SW interrupt */
399 if (G_000040_SW_INT_EN(status)) 399 if (G_000044_SW_INT(status))
400 radeon_fence_process(rdev); 400 radeon_fence_process(rdev);
401 /* Vertical blank interrupts */ 401 /* Vertical blank interrupts */
402 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) 402 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index c37ee9e6b67b..39e1c0d39476 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -68,6 +68,7 @@ config W90P910_ETH
68 tristate "Nuvoton w90p910 Ethernet support" 68 tristate "Nuvoton w90p910 Ethernet support"
69 depends on ARM && ARCH_W90X900 69 depends on ARM && ARCH_W90X900
70 select PHYLIB 70 select PHYLIB
71 select MII
71 help 72 help
72 Say Y here if you want to use built-in Ethernet ports 73 Say Y here if you want to use built-in Ethernet ports
73 on w90p910 processor. 74 on w90p910 processor.
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 1c01b96c9611..2d28d58200d0 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4684,6 +4684,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4684 INIT_WORK(&hw->restart_work, sky2_restart); 4684 INIT_WORK(&hw->restart_work, sky2_restart);
4685 4685
4686 pci_set_drvdata(pdev, hw); 4686 pci_set_drvdata(pdev, hw);
4687 pdev->d3_delay = 150;
4687 4688
4688 return 0; 4689 return 0;
4689 4690
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c5df94e86678..807224ec8351 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -75,7 +75,8 @@ static ssize_t local_cpus_show(struct device *dev,
75 int len; 75 int len;
76 76
77#ifdef CONFIG_NUMA 77#ifdef CONFIG_NUMA
78 mask = cpumask_of_node(dev_to_node(dev)); 78 mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
79 cpumask_of_node(dev_to_node(dev));
79#else 80#else
80 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 81 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
81#endif 82#endif
@@ -93,7 +94,8 @@ static ssize_t local_cpulist_show(struct device *dev,
93 int len; 94 int len;
94 95
95#ifdef CONFIG_NUMA 96#ifdef CONFIG_NUMA
96 mask = cpumask_of_node(dev_to_node(dev)); 97 mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
98 cpumask_of_node(dev_to_node(dev));
97#else 99#else
98 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 100 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
99#endif 101#endif
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0906599ebfde..315fea47e784 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -29,7 +29,17 @@ const char *pci_power_names[] = {
29}; 29};
30EXPORT_SYMBOL_GPL(pci_power_names); 30EXPORT_SYMBOL_GPL(pci_power_names);
31 31
32unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 32unsigned int pci_pm_d3_delay;
33
34static void pci_dev_d3_sleep(struct pci_dev *dev)
35{
36 unsigned int delay = dev->d3_delay;
37
38 if (delay < pci_pm_d3_delay)
39 delay = pci_pm_d3_delay;
40
41 msleep(delay);
42}
33 43
34#ifdef CONFIG_PCI_DOMAINS 44#ifdef CONFIG_PCI_DOMAINS
35int pci_domains_supported = 1; 45int pci_domains_supported = 1;
@@ -522,7 +532,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
522 /* Mandatory power management transition delays */ 532 /* Mandatory power management transition delays */
523 /* see PCI PM 1.1 5.6.1 table 18 */ 533 /* see PCI PM 1.1 5.6.1 table 18 */
524 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 534 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
525 msleep(pci_pm_d3_delay); 535 pci_dev_d3_sleep(dev);
526 else if (state == PCI_D2 || dev->current_state == PCI_D2) 536 else if (state == PCI_D2 || dev->current_state == PCI_D2)
527 udelay(PCI_PM_D2_DELAY); 537 udelay(PCI_PM_D2_DELAY);
528 538
@@ -1409,6 +1419,7 @@ void pci_pm_init(struct pci_dev *dev)
1409 } 1419 }
1410 1420
1411 dev->pm_cap = pm; 1421 dev->pm_cap = pm;
1422 dev->d3_delay = PCI_PM_D3_WAIT;
1412 1423
1413 dev->d1_support = false; 1424 dev->d1_support = false;
1414 dev->d2_support = false; 1425 dev->d2_support = false;
@@ -2247,12 +2258,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
2247 csr &= ~PCI_PM_CTRL_STATE_MASK; 2258 csr &= ~PCI_PM_CTRL_STATE_MASK;
2248 csr |= PCI_D3hot; 2259 csr |= PCI_D3hot;
2249 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2260 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2250 msleep(pci_pm_d3_delay); 2261 pci_dev_d3_sleep(dev);
2251 2262
2252 csr &= ~PCI_PM_CTRL_STATE_MASK; 2263 csr &= ~PCI_PM_CTRL_STATE_MASK;
2253 csr |= PCI_D0; 2264 csr |= PCI_D0;
2254 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2265 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2255 msleep(pci_pm_d3_delay); 2266 pci_dev_d3_sleep(dev);
2256 2267
2257 return 0; 2268 return 0;
2258} 2269}
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 797d47809f7a..8c30a9544d61 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -321,7 +321,7 @@ static int aer_inject(struct aer_error_inj *einj)
321 unsigned long flags; 321 unsigned long flags;
322 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); 322 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
323 int pos_cap_err, rp_pos_cap_err; 323 int pos_cap_err, rp_pos_cap_err;
324 u32 sever; 324 u32 sever, mask;
325 int ret = 0; 325 int ret = 0;
326 326
327 dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); 327 dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
@@ -374,6 +374,24 @@ static int aer_inject(struct aer_error_inj *einj)
374 err->header_log2 = einj->header_log2; 374 err->header_log2 = einj->header_log2;
375 err->header_log3 = einj->header_log3; 375 err->header_log3 = einj->header_log3;
376 376
377 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &mask);
378 if (einj->cor_status && !(einj->cor_status & ~mask)) {
379 ret = -EINVAL;
380 printk(KERN_WARNING "The correctable error(s) is masked "
381 "by device\n");
382 spin_unlock_irqrestore(&inject_lock, flags);
383 goto out_put;
384 }
385
386 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, &mask);
387 if (einj->uncor_status && !(einj->uncor_status & ~mask)) {
388 ret = -EINVAL;
389 printk(KERN_WARNING "The uncorrectable error(s) is masked "
390 "by device\n");
391 spin_unlock_irqrestore(&inject_lock, flags);
392 goto out_put;
393 }
394
377 rperr = __find_aer_error_by_dev(rpdev); 395 rperr = __find_aer_error_by_dev(rpdev);
378 if (!rperr) { 396 if (!rperr) {
379 rperr = rperr_alloc; 397 rperr = rperr_alloc;
@@ -413,8 +431,14 @@ static int aer_inject(struct aer_error_inj *einj)
413 if (ret) 431 if (ret)
414 goto out_put; 432 goto out_put;
415 433
416 if (find_aer_device(rpdev, &edev)) 434 if (find_aer_device(rpdev, &edev)) {
435 if (!get_service_data(edev)) {
436 printk(KERN_WARNING "AER service is not initialized\n");
437 ret = -EINVAL;
438 goto out_put;
439 }
417 aer_irq(-1, edev); 440 aer_irq(-1, edev);
441 }
418 else 442 else
419 ret = -EINVAL; 443 ret = -EINVAL;
420out_put: 444out_put:
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 413262eb95b7..b174188ac121 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -27,7 +27,7 @@
27 */ 27 */
28static void release_pcie_device(struct device *dev) 28static void release_pcie_device(struct device *dev)
29{ 29{
30 kfree(to_pcie_device(dev)); 30 kfree(to_pcie_device(dev));
31} 31}
32 32
33/** 33/**
@@ -346,12 +346,11 @@ static int suspend_iter(struct device *dev, void *data)
346{ 346{
347 struct pcie_port_service_driver *service_driver; 347 struct pcie_port_service_driver *service_driver;
348 348
349 if ((dev->bus == &pcie_port_bus_type) && 349 if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
350 (dev->driver)) { 350 service_driver = to_service_driver(dev->driver);
351 service_driver = to_service_driver(dev->driver); 351 if (service_driver->suspend)
352 if (service_driver->suspend) 352 service_driver->suspend(to_pcie_device(dev));
353 service_driver->suspend(to_pcie_device(dev)); 353 }
354 }
355 return 0; 354 return 0;
356} 355}
357 356
@@ -494,6 +493,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
494 493
495 return driver_register(&new->driver); 494 return driver_register(&new->driver);
496} 495}
496EXPORT_SYMBOL(pcie_port_service_register);
497 497
498/** 498/**
499 * pcie_port_service_unregister - unregister PCI Express port service driver 499 * pcie_port_service_unregister - unregister PCI Express port service driver
@@ -503,6 +503,4 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
503{ 503{
504 driver_unregister(&drv->driver); 504 driver_unregister(&drv->driver);
505} 505}
506
507EXPORT_SYMBOL(pcie_port_service_register);
508EXPORT_SYMBOL(pcie_port_service_unregister); 506EXPORT_SYMBOL(pcie_port_service_unregister);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 34d65172a4d7..13c8972886e6 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -63,7 +63,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
63 * pcie_portdrv_probe - Probe PCI-Express port devices 63 * pcie_portdrv_probe - Probe PCI-Express port devices
64 * @dev: PCI-Express port device being probed 64 * @dev: PCI-Express port device being probed
65 * 65 *
66 * If detected invokes the pcie_port_device_register() method for 66 * If detected invokes the pcie_port_device_register() method for
67 * this port device. 67 * this port device.
68 * 68 *
69 */ 69 */
@@ -78,7 +78,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
78 (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) 78 (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
79 return -ENODEV; 79 return -ENODEV;
80 80
81 if (!dev->irq && dev->pin) { 81 if (!dev->irq && dev->pin) {
82 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " 82 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
83 "check vendor BIOS\n", dev->vendor, dev->device); 83 "check vendor BIOS\n", dev->vendor, dev->device);
84 } 84 }
@@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
91 return 0; 91 return 0;
92} 92}
93 93
94static void pcie_portdrv_remove (struct pci_dev *dev) 94static void pcie_portdrv_remove(struct pci_dev *dev)
95{ 95{
96 pcie_port_device_remove(dev); 96 pcie_port_device_remove(dev);
97 pci_disable_device(dev); 97 pci_disable_device(dev);
@@ -129,14 +129,13 @@ static int error_detected_iter(struct device *device, void *data)
129static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, 129static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
130 enum pci_channel_state error) 130 enum pci_channel_state error)
131{ 131{
132 struct aer_broadcast_data result_data = 132 struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
133 {error, PCI_ERS_RESULT_CAN_RECOVER}; 133 int ret;
134 int retval;
135 134
136 /* can not fail */ 135 /* can not fail */
137 retval = device_for_each_child(&dev->dev, &result_data, error_detected_iter); 136 ret = device_for_each_child(&dev->dev, &data, error_detected_iter);
138 137
139 return result_data.result; 138 return data.result;
140} 139}
141 140
142static int mmio_enabled_iter(struct device *device, void *data) 141static int mmio_enabled_iter(struct device *device, void *data)
@@ -290,7 +289,7 @@ static int __init pcie_portdrv_init(void)
290 return retval; 289 return retval;
291} 290}
292 291
293static void __exit pcie_portdrv_exit(void) 292static void __exit pcie_portdrv_exit(void)
294{ 293{
295 pci_unregister_driver(&pcie_portdriver); 294 pci_unregister_driver(&pcie_portdriver);
296 pcie_port_bus_unregister(); 295 pcie_port_bus_unregister();
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 5b648f0c6075..ad4c414dbfbc 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -393,8 +393,6 @@ static void hp_wmi_notify(u32 value, void *context)
393 } else 393 } else
394 printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", 394 printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
395 eventcode); 395 eventcode);
396
397 kfree(obj);
398} 396}
399 397
400static int __init hp_wmi_input_setup(void) 398static int __init hp_wmi_input_setup(void)
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 7c815d3327f7..28d86f9df83c 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -522,6 +522,40 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct of_device *op,
522 set_fan_speeds(fp); 522 set_fan_speeds(fp);
523} 523}
524 524
525static void destroy_one_temp(struct bbc_cpu_temperature *tp)
526{
527 bbc_i2c_detach(tp->client);
528 kfree(tp);
529}
530
531static void destroy_all_temps(struct bbc_i2c_bus *bp)
532{
533 struct bbc_cpu_temperature *tp, *tpos;
534
535 list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) {
536 list_del(&tp->bp_list);
537 list_del(&tp->glob_list);
538 destroy_one_temp(tp);
539 }
540}
541
542static void destroy_one_fan(struct bbc_fan_control *fp)
543{
544 bbc_i2c_detach(fp->client);
545 kfree(fp);
546}
547
548static void destroy_all_fans(struct bbc_i2c_bus *bp)
549{
550 struct bbc_fan_control *fp, *fpos;
551
552 list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) {
553 list_del(&fp->bp_list);
554 list_del(&fp->glob_list);
555 destroy_one_fan(fp);
556 }
557}
558
525int bbc_envctrl_init(struct bbc_i2c_bus *bp) 559int bbc_envctrl_init(struct bbc_i2c_bus *bp)
526{ 560{
527 struct of_device *op; 561 struct of_device *op;
@@ -541,6 +575,8 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
541 int err = PTR_ERR(kenvctrld_task); 575 int err = PTR_ERR(kenvctrld_task);
542 576
543 kenvctrld_task = NULL; 577 kenvctrld_task = NULL;
578 destroy_all_temps(bp);
579 destroy_all_fans(bp);
544 return err; 580 return err;
545 } 581 }
546 } 582 }
@@ -548,35 +584,11 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
548 return 0; 584 return 0;
549} 585}
550 586
551static void destroy_one_temp(struct bbc_cpu_temperature *tp)
552{
553 bbc_i2c_detach(tp->client);
554 kfree(tp);
555}
556
557static void destroy_one_fan(struct bbc_fan_control *fp)
558{
559 bbc_i2c_detach(fp->client);
560 kfree(fp);
561}
562
563void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) 587void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp)
564{ 588{
565 struct bbc_cpu_temperature *tp, *tpos;
566 struct bbc_fan_control *fp, *fpos;
567
568 if (kenvctrld_task) 589 if (kenvctrld_task)
569 kthread_stop(kenvctrld_task); 590 kthread_stop(kenvctrld_task);
570 591
571 list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { 592 destroy_all_temps(bp);
572 list_del(&tp->bp_list); 593 destroy_all_fans(bp);
573 list_del(&tp->glob_list);
574 destroy_one_temp(tp);
575 }
576
577 list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) {
578 list_del(&fp->bp_list);
579 list_del(&fp->glob_list);
580 destroy_one_fan(fp);
581 }
582} 594}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index 26ffdcd5a437..15a00e8b7122 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1440,6 +1440,10 @@ void cxgb3i_c3cn_release(struct s3_conn *c3cn)
1440static int is_cxgb3_dev(struct net_device *dev) 1440static int is_cxgb3_dev(struct net_device *dev)
1441{ 1441{
1442 struct cxgb3i_sdev_data *cdata; 1442 struct cxgb3i_sdev_data *cdata;
1443 struct net_device *ndev = dev;
1444
1445 if (dev->priv_flags & IFF_802_1Q_VLAN)
1446 ndev = vlan_dev_real_dev(dev);
1443 1447
1444 write_lock(&cdata_rwlock); 1448 write_lock(&cdata_rwlock);
1445 list_for_each_entry(cdata, &cdata_list, list) { 1449 list_for_each_entry(cdata, &cdata_list, list) {
@@ -1447,7 +1451,7 @@ static int is_cxgb3_dev(struct net_device *dev)
1447 int i; 1451 int i;
1448 1452
1449 for (i = 0; i < ports->nports; i++) 1453 for (i = 0; i < ports->nports; i++)
1450 if (dev == ports->lldevs[i]) { 1454 if (ndev == ports->lldevs[i]) {
1451 write_unlock(&cdata_rwlock); 1455 write_unlock(&cdata_rwlock);
1452 return 1; 1456 return 1;
1453 } 1457 }
@@ -1566,6 +1570,26 @@ out_err:
1566 return -EINVAL; 1570 return -EINVAL;
1567} 1571}
1568 1572
1573/**
1574 * cxgb3i_find_dev - find the interface associated with the given address
1575 * @ipaddr: ip address
1576 */
1577static struct net_device *
1578cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr)
1579{
1580 struct flowi fl;
1581 int err;
1582 struct rtable *rt;
1583
1584 memset(&fl, 0, sizeof(fl));
1585 fl.nl_u.ip4_u.daddr = ipaddr;
1586
1587 err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
1588 if (!err)
1589 return (&rt->u.dst)->dev;
1590
1591 return NULL;
1592}
1569 1593
1570/** 1594/**
1571 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address 1595 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
@@ -1581,6 +1605,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1581 struct cxgb3i_sdev_data *cdata; 1605 struct cxgb3i_sdev_data *cdata;
1582 struct t3cdev *cdev; 1606 struct t3cdev *cdev;
1583 __be32 sipv4; 1607 __be32 sipv4;
1608 struct net_device *dstdev;
1584 int err; 1609 int err;
1585 1610
1586 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); 1611 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
@@ -1591,6 +1616,13 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1591 c3cn->daddr.sin_port = usin->sin_port; 1616 c3cn->daddr.sin_port = usin->sin_port;
1592 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; 1617 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1593 1618
1619 dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr);
1620 if (!dstdev || !is_cxgb3_dev(dstdev))
1621 return -ENETUNREACH;
1622
1623 if (dstdev->priv_flags & IFF_802_1Q_VLAN)
1624 dev = dstdev;
1625
1594 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, 1626 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1595 c3cn->daddr.sin_addr.s_addr, 1627 c3cn->daddr.sin_addr.s_addr,
1596 c3cn->saddr.sin_port, 1628 c3cn->saddr.sin_port,
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index ce522702a6c1..2cc39684ce97 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4142,8 +4142,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4142 spin_lock_irq(shost->host_lock); 4142 spin_lock_irq(shost->host_lock);
4143 if (vport->fc_rscn_flush) { 4143 if (vport->fc_rscn_flush) {
4144 /* Another thread is walking fc_rscn_id_list on this vport */ 4144 /* Another thread is walking fc_rscn_id_list on this vport */
4145 spin_unlock_irq(shost->host_lock);
4146 vport->fc_flag |= FC_RSCN_DISCOVERY; 4145 vport->fc_flag |= FC_RSCN_DISCOVERY;
4146 spin_unlock_irq(shost->host_lock);
4147 /* Send back ACC */ 4147 /* Send back ACC */
4148 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 4148 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4149 return 0; 4149 return 0;
@@ -5948,8 +5948,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5948 lpfc_initial_fdisc(vport); 5948 lpfc_initial_fdisc(vport);
5949 break; 5949 break;
5950 } 5950 }
5951
5952 } else { 5951 } else {
5952 vport->vpi_state |= LPFC_VPI_REGISTERED;
5953 if (vport == phba->pport) 5953 if (vport == phba->pport)
5954 if (phba->sli_rev < LPFC_SLI_REV4) 5954 if (phba->sli_rev < LPFC_SLI_REV4)
5955 lpfc_issue_fabric_reglogin(vport); 5955 lpfc_issue_fabric_reglogin(vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3b9424427652..2445e399fd60 100755
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -747,6 +747,10 @@ lpfc_linkdown(struct lpfc_hba *phba)
747 747
748 if (phba->link_state == LPFC_LINK_DOWN) 748 if (phba->link_state == LPFC_LINK_DOWN)
749 return 0; 749 return 0;
750
751 /* Block all SCSI stack I/Os */
752 lpfc_scsi_dev_block(phba);
753
750 spin_lock_irq(&phba->hbalock); 754 spin_lock_irq(&phba->hbalock);
751 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); 755 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
752 if (phba->link_state > LPFC_LINK_DOWN) { 756 if (phba->link_state > LPFC_LINK_DOWN) {
@@ -1555,10 +1559,16 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1555 * to book keeping the FCFIs can be used. 1559 * to book keeping the FCFIs can be used.
1556 */ 1560 */
1557 if (shdr_status || shdr_add_status) { 1561 if (shdr_status || shdr_add_status) {
1558 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1562 if (shdr_status == STATUS_FCF_TABLE_EMPTY) {
1559 "2521 READ_FCF_RECORD mailbox failed " 1563 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1560 "with status x%x add_status x%x, mbx\n", 1564 "2726 READ_FCF_RECORD Indicates empty "
1561 shdr_status, shdr_add_status); 1565 "FCF table.\n");
1566 } else {
1567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1568 "2521 READ_FCF_RECORD mailbox failed "
1569 "with status x%x add_status x%x, mbx\n",
1570 shdr_status, shdr_add_status);
1571 }
1562 goto out; 1572 goto out;
1563 } 1573 }
1564 /* Interpreting the returned information of FCF records */ 1574 /* Interpreting the returned information of FCF records */
@@ -1698,7 +1708,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1698 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1708 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1699 return; 1709 return;
1700 } 1710 }
1711 spin_lock_irq(&phba->hbalock);
1701 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 1712 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
1713 spin_unlock_irq(&phba->hbalock);
1702 1714
1703 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 1715 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1704 lpfc_initial_fdisc(vport); 1716 lpfc_initial_fdisc(vport);
@@ -2259,7 +2271,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2259 mb->mbxStatus); 2271 mb->mbxStatus);
2260 break; 2272 break;
2261 } 2273 }
2274 spin_lock_irq(&phba->hbalock);
2262 vport->vpi_state &= ~LPFC_VPI_REGISTERED; 2275 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
2276 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2277 spin_unlock_irq(&phba->hbalock);
2263 vport->unreg_vpi_cmpl = VPORT_OK; 2278 vport->unreg_vpi_cmpl = VPORT_OK;
2264 mempool_free(pmb, phba->mbox_mem_pool); 2279 mempool_free(pmb, phba->mbox_mem_pool);
2265 /* 2280 /*
@@ -4475,8 +4490,10 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4475 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) 4490 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4476 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4491 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4477 lpfc_mbx_unreg_vpi(vports[i]); 4492 lpfc_mbx_unreg_vpi(vports[i]);
4493 spin_lock_irq(&phba->hbalock);
4478 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 4494 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4479 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 4495 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
4496 spin_unlock_irq(&phba->hbalock);
4480 } 4497 }
4481 lpfc_destroy_vport_work_array(phba, vports); 4498 lpfc_destroy_vport_work_array(phba, vports);
4482 4499
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1585148a17e5..8a2a1c5935c6 100644..100755
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1013,7 +1013,7 @@ struct lpfc_mbx_wq_destroy {
1013}; 1013};
1014 1014
1015#define LPFC_HDR_BUF_SIZE 128 1015#define LPFC_HDR_BUF_SIZE 128
1016#define LPFC_DATA_BUF_SIZE 4096 1016#define LPFC_DATA_BUF_SIZE 2048
1017struct rq_context { 1017struct rq_context {
1018 uint32_t word0; 1018 uint32_t word0;
1019#define lpfc_rq_context_rq_size_SHIFT 16 1019#define lpfc_rq_context_rq_size_SHIFT 16
@@ -1371,6 +1371,7 @@ struct lpfc_mbx_query_fw_cfg {
1371#define STATUS_ERROR_ACITMAIN 0x2a 1371#define STATUS_ERROR_ACITMAIN 0x2a
1372#define STATUS_REBOOT_REQUIRED 0x2c 1372#define STATUS_REBOOT_REQUIRED 0x2c
1373#define STATUS_FCF_IN_USE 0x3a 1373#define STATUS_FCF_IN_USE 0x3a
1374#define STATUS_FCF_TABLE_EMPTY 0x43
1374 1375
1375struct lpfc_mbx_sli4_config { 1376struct lpfc_mbx_sli4_config {
1376 struct mbox_header header; 1377 struct mbox_header header;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d4da6bdd0e73..b8eb1b6e5e77 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3006,6 +3006,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3006 struct lpfc_vport *vport; 3006 struct lpfc_vport *vport;
3007 struct lpfc_nodelist *ndlp; 3007 struct lpfc_nodelist *ndlp;
3008 struct Scsi_Host *shost; 3008 struct Scsi_Host *shost;
3009 uint32_t link_state;
3009 3010
3010 phba->fc_eventTag = acqe_fcoe->event_tag; 3011 phba->fc_eventTag = acqe_fcoe->event_tag;
3011 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3012 phba->fcoe_eventtag = acqe_fcoe->event_tag;
@@ -3052,9 +3053,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3052 break; 3053 break;
3053 /* 3054 /*
3054 * Currently, driver support only one FCF - so treat this as 3055 * Currently, driver support only one FCF - so treat this as
3055 * a link down. 3056 * a link down, but save the link state because we don't want
3057 * it to be changed to Link Down unless it is already down.
3056 */ 3058 */
3059 link_state = phba->link_state;
3057 lpfc_linkdown(phba); 3060 lpfc_linkdown(phba);
3061 phba->link_state = link_state;
3058 /* Unregister FCF if no devices connected to it */ 3062 /* Unregister FCF if no devices connected to it */
3059 lpfc_unregister_unused_fcf(phba); 3063 lpfc_unregister_unused_fcf(phba);
3060 break; 3064 break;
@@ -7226,8 +7230,6 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7226{ 7230{
7227 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7228 "2711 PCI channel permanent disable for failure\n"); 7232 "2711 PCI channel permanent disable for failure\n");
7229 /* Block all SCSI devices' I/Os on the host */
7230 lpfc_scsi_dev_block(phba);
7231 /* Clean up all driver's outstanding SCSI I/Os */ 7233 /* Clean up all driver's outstanding SCSI I/Os */
7232 lpfc_sli_flush_fcp_rings(phba); 7234 lpfc_sli_flush_fcp_rings(phba);
7233} 7235}
@@ -7256,6 +7258,9 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7256 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7258 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7257 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7259 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7258 7260
7261 /* Block all SCSI devices' I/Os on the host */
7262 lpfc_scsi_dev_block(phba);
7263
7259 switch (state) { 7264 switch (state) {
7260 case pci_channel_io_normal: 7265 case pci_channel_io_normal:
7261 /* Non-fatal error, prepare for recovery */ 7266 /* Non-fatal error, prepare for recovery */
@@ -7507,6 +7512,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7507 error = -ENODEV; 7512 error = -ENODEV;
7508 goto out_free_sysfs_attr; 7513 goto out_free_sysfs_attr;
7509 } 7514 }
7515 /* Default to single FCP EQ for non-MSI-X */
7516 if (phba->intr_type != MSIX)
7517 phba->cfg_fcp_eq_count = 1;
7510 /* Set up SLI-4 HBA */ 7518 /* Set up SLI-4 HBA */
7511 if (lpfc_sli4_hba_setup(phba)) { 7519 if (lpfc_sli4_hba_setup(phba)) {
7512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7935667b81a5..589549b2bf0e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1383,7 +1383,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1383/* HBQ for ELS and CT traffic. */ 1383/* HBQ for ELS and CT traffic. */
1384static struct lpfc_hbq_init lpfc_els_hbq = { 1384static struct lpfc_hbq_init lpfc_els_hbq = {
1385 .rn = 1, 1385 .rn = 1,
1386 .entry_count = 200, 1386 .entry_count = 256,
1387 .mask_count = 0, 1387 .mask_count = 0,
1388 .profile = 0, 1388 .profile = 0,
1389 .ring_mask = (1 << LPFC_ELS_RING), 1389 .ring_mask = (1 << LPFC_ELS_RING),
@@ -1482,8 +1482,11 @@ err:
1482int 1482int
1483lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1483lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1484{ 1484{
1485 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1485 if (phba->sli_rev == LPFC_SLI_REV4)
1486 lpfc_hbq_defs[qno]->add_count)); 1486 return 0;
1487 else
1488 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1489 lpfc_hbq_defs[qno]->add_count);
1487} 1490}
1488 1491
1489/** 1492/**
@@ -1498,8 +1501,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1498static int 1501static int
1499lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1502lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1500{ 1503{
1501 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1504 if (phba->sli_rev == LPFC_SLI_REV4)
1502 lpfc_hbq_defs[qno]->init_count)); 1505 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1506 lpfc_hbq_defs[qno]->entry_count);
1507 else
1508 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1509 lpfc_hbq_defs[qno]->init_count);
1503} 1510}
1504 1511
1505/** 1512/**
@@ -4110,6 +4117,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4110 if (rc) { 4117 if (rc) {
4111 dma_free_coherent(&phba->pcidev->dev, dma_size, 4118 dma_free_coherent(&phba->pcidev->dev, dma_size,
4112 dmabuf->virt, dmabuf->phys); 4119 dmabuf->virt, dmabuf->phys);
4120 kfree(dmabuf);
4113 return -EIO; 4121 return -EIO;
4114 } 4122 }
4115 4123
@@ -5848,7 +5856,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5848 iocbq->iocb.un.ulpWord[3]); 5856 iocbq->iocb.un.ulpWord[3]);
5849 wqe->generic.word3 = 0; 5857 wqe->generic.word3 = 0;
5850 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); 5858 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5851 bf_set(wqe_xc, &wqe->generic, 1);
5852 /* The entire sequence is transmitted for this IOCB */ 5859 /* The entire sequence is transmitted for this IOCB */
5853 xmit_len = total_len; 5860 xmit_len = total_len;
5854 cmnd = CMD_XMIT_SEQUENCE64_CR; 5861 cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -10944,7 +10951,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10944 return dmabuf; 10951 return dmabuf;
10945 } 10952 }
10946 temp_hdr = seq_dmabuf->hbuf.virt; 10953 temp_hdr = seq_dmabuf->hbuf.virt;
10947 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { 10954 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
10955 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
10948 list_del_init(&seq_dmabuf->hbuf.list); 10956 list_del_init(&seq_dmabuf->hbuf.list);
10949 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 10957 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10950 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 10958 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
@@ -10955,6 +10963,11 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10955 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 10963 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
10956 seq_dmabuf->time_stamp = jiffies; 10964 seq_dmabuf->time_stamp = jiffies;
10957 lpfc_update_rcv_time_stamp(vport); 10965 lpfc_update_rcv_time_stamp(vport);
10966 if (list_empty(&seq_dmabuf->dbuf.list)) {
10967 temp_hdr = dmabuf->hbuf.virt;
10968 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
10969 return seq_dmabuf;
10970 }
10958 /* find the correct place in the sequence to insert this frame */ 10971 /* find the correct place in the sequence to insert this frame */
10959 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 10972 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10960 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 10973 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10963,7 +10976,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10963 * If the frame's sequence count is greater than the frame on 10976 * If the frame's sequence count is greater than the frame on
10964 * the list then insert the frame right after this frame 10977 * the list then insert the frame right after this frame
10965 */ 10978 */
10966 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { 10979 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
10980 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
10967 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 10981 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10968 return seq_dmabuf; 10982 return seq_dmabuf;
10969 } 10983 }
@@ -11210,7 +11224,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
11210 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11224 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
11211 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11225 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11212 /* If there is a hole in the sequence count then fail. */ 11226 /* If there is a hole in the sequence count then fail. */
11213 if (++seq_count != hdr->fh_seq_cnt) 11227 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
11214 return 0; 11228 return 0;
11215 fctl = (hdr->fh_f_ctl[0] << 16 | 11229 fctl = (hdr->fh_f_ctl[0] << 16 |
11216 hdr->fh_f_ctl[1] << 8 | 11230 hdr->fh_f_ctl[1] << 8 |
@@ -11242,6 +11256,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11242 struct lpfc_iocbq *first_iocbq, *iocbq; 11256 struct lpfc_iocbq *first_iocbq, *iocbq;
11243 struct fc_frame_header *fc_hdr; 11257 struct fc_frame_header *fc_hdr;
11244 uint32_t sid; 11258 uint32_t sid;
11259 struct ulp_bde64 *pbde;
11245 11260
11246 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11261 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11247 /* remove from receive buffer list */ 11262 /* remove from receive buffer list */
@@ -11283,8 +11298,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11283 if (!iocbq->context3) { 11298 if (!iocbq->context3) {
11284 iocbq->context3 = d_buf; 11299 iocbq->context3 = d_buf;
11285 iocbq->iocb.ulpBdeCount++; 11300 iocbq->iocb.ulpBdeCount++;
11286 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = 11301 pbde = (struct ulp_bde64 *)
11287 LPFC_DATA_BUF_SIZE; 11302 &iocbq->iocb.unsli3.sli3Words[4];
11303 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
11288 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11304 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
11289 bf_get(lpfc_rcqe_length, 11305 bf_get(lpfc_rcqe_length,
11290 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 11306 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
@@ -11401,15 +11417,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11401 return; 11417 return;
11402 } 11418 }
11403 /* If not last frame in sequence continue processing frames. */ 11419 /* If not last frame in sequence continue processing frames. */
11404 if (!lpfc_seq_complete(seq_dmabuf)) { 11420 if (!lpfc_seq_complete(seq_dmabuf))
11405 /*
11406 * When saving off frames post a new one and mark this
11407 * frame to be freed when it is finished.
11408 **/
11409 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
11410 dmabuf->tag = -1;
11411 return; 11421 return;
11412 } 11422
11413 /* Send the complete sequence to the upper layer protocol */ 11423 /* Send the complete sequence to the upper layer protocol */
11414 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 11424 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
11415} 11425}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 25d66d070cf8..44e5f574236b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -28,7 +28,7 @@
28/* Multi-queue arrangement for fast-path FCP work queues */ 28/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8 29#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1 30#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1 31#define LPFC_FP_EQN_DEF 4
32#define LPFC_FP_EQN_MIN 1 32#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) 33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34 34
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c7f3aed2aab8..792f72263f1a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.6" 21#define LPFC_DRIVER_VERSION "8.3.7"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 7d6dd83d3592..e3c7fa642306 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -512,8 +512,10 @@ enable_vport(struct fc_vport *fc_vport)
512 return VPORT_OK; 512 return VPORT_OK;
513 } 513 }
514 514
515 spin_lock_irq(&phba->hbalock);
515 vport->load_flag |= FC_LOADING; 516 vport->load_flag |= FC_LOADING;
516 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 517 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
518 spin_unlock_irq(&phba->hbalock);
517 519
518 /* Use the Physical nodes Fabric NDLP to determine if the link is 520 /* Use the Physical nodes Fabric NDLP to determine if the link is
519 * up and ready to FDISC. 521 * up and ready to FDISC.
@@ -700,7 +702,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
700 } 702 }
701 spin_unlock_irq(&phba->ndlp_lock); 703 spin_unlock_irq(&phba->ndlp_lock);
702 } 704 }
703 if (vport->vpi_state != LPFC_VPI_REGISTERED) 705 if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
704 goto skip_logo; 706 goto skip_logo;
705 vport->unreg_vpi_cmpl = VPORT_INVAL; 707 vport->unreg_vpi_cmpl = VPORT_INVAL;
706 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 708 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e7d2688fbeba..b6f1ef954af1 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2483,14 +2483,12 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2483 sense_copied = 1; 2483 sense_copied = 1;
2484 } 2484 }
2485 2485
2486 if (RES_IS_GSCSI(res->cfg_entry)) { 2486 if (RES_IS_GSCSI(res->cfg_entry))
2487 pmcraid_cancel_all(cmd, sense_copied); 2487 pmcraid_cancel_all(cmd, sense_copied);
2488 } else if (sense_copied) { 2488 else if (sense_copied)
2489 pmcraid_erp_done(cmd); 2489 pmcraid_erp_done(cmd);
2490 return 0; 2490 else
2491 } else {
2492 pmcraid_request_sense(cmd); 2491 pmcraid_request_sense(cmd);
2493 }
2494 2492
2495 return 1; 2493 return 1;
2496 2494
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 21e2bc4d7401..3a9f5b288aee 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -232,6 +232,9 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
232 if (off) 232 if (off)
233 return 0; 233 return 0;
234 234
235 if (unlikely(pci_channel_offline(ha->pdev)))
236 return 0;
237
235 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 238 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
236 return -EINVAL; 239 return -EINVAL;
237 if (start > ha->optrom_size) 240 if (start > ha->optrom_size)
@@ -379,6 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
379 struct device, kobj))); 382 struct device, kobj)));
380 struct qla_hw_data *ha = vha->hw; 383 struct qla_hw_data *ha = vha->hw;
381 384
385 if (unlikely(pci_channel_offline(ha->pdev)))
386 return 0;
387
382 if (!capable(CAP_SYS_ADMIN)) 388 if (!capable(CAP_SYS_ADMIN))
383 return 0; 389 return 0;
384 390
@@ -398,6 +404,9 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
398 struct qla_hw_data *ha = vha->hw; 404 struct qla_hw_data *ha = vha->hw;
399 uint8_t *tmp_data; 405 uint8_t *tmp_data;
400 406
407 if (unlikely(pci_channel_offline(ha->pdev)))
408 return 0;
409
401 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 410 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
402 !ha->isp_ops->write_nvram) 411 !ha->isp_ops->write_nvram)
403 return 0; 412 return 0;
@@ -1238,10 +1247,11 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1238 char *buf) 1247 char *buf)
1239{ 1248{
1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1249 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1241 int rval; 1250 int rval = QLA_FUNCTION_FAILED;
1242 uint16_t state[5]; 1251 uint16_t state[5];
1243 1252
1244 rval = qla2x00_get_firmware_state(vha, state); 1253 if (!vha->hw->flags.eeh_busy)
1254 rval = qla2x00_get_firmware_state(vha, state);
1245 if (rval != QLA_SUCCESS) 1255 if (rval != QLA_SUCCESS)
1246 memset(state, -1, sizeof(state)); 1256 memset(state, -1, sizeof(state));
1247 1257
@@ -1452,10 +1462,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1452 if (!fcport) 1462 if (!fcport)
1453 return; 1463 return;
1454 1464
1455 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) 1465 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1466 return;
1467
1468 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1456 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1469 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1457 else 1470 return;
1458 qla2x00_abort_fcport_cmds(fcport); 1471 }
1459 1472
1460 /* 1473 /*
1461 * Transport has effectively 'deleted' the rport, clear 1474 * Transport has effectively 'deleted' the rport, clear
@@ -1475,6 +1488,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1475 if (!fcport) 1488 if (!fcport)
1476 return; 1489 return;
1477 1490
1491 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1492 return;
1493
1478 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { 1494 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1479 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); 1495 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1480 return; 1496 return;
@@ -1515,6 +1531,12 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1515 pfc_host_stat = &ha->fc_host_stat; 1531 pfc_host_stat = &ha->fc_host_stat;
1516 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1532 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1517 1533
1534 if (test_bit(UNLOADING, &vha->dpc_flags))
1535 goto done;
1536
1537 if (unlikely(pci_channel_offline(ha->pdev)))
1538 goto done;
1539
1518 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1540 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1519 if (stats == NULL) { 1541 if (stats == NULL) {
1520 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1542 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f660dd70b72e..d6d9c86cb058 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -26,7 +26,7 @@
26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ 26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ 27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ 28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */ 29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30 30
31/* 31/*
32* Macros use for debugging the driver. 32* Macros use for debugging the driver.
@@ -132,6 +132,13 @@
132#else 132#else
133#define DEBUG16(x) do {} while (0) 133#define DEBUG16(x) do {} while (0)
134#endif 134#endif
135
136#if defined(QL_DEBUG_LEVEL_17)
137#define DEBUG17(x) do {x;} while (0)
138#else
139#define DEBUG17(x) do {} while (0)
140#endif
141
135/* 142/*
136 * Firmware Dump structure definition 143 * Firmware Dump structure definition
137 */ 144 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 384afda7dbe9..608e675f68c8 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2256,11 +2256,13 @@ struct qla_hw_data {
2256 uint32_t disable_serdes :1; 2256 uint32_t disable_serdes :1;
2257 uint32_t gpsc_supported :1; 2257 uint32_t gpsc_supported :1;
2258 uint32_t npiv_supported :1; 2258 uint32_t npiv_supported :1;
2259 uint32_t pci_channel_io_perm_failure :1;
2259 uint32_t fce_enabled :1; 2260 uint32_t fce_enabled :1;
2260 uint32_t fac_supported :1; 2261 uint32_t fac_supported :1;
2261 uint32_t chip_reset_done :1; 2262 uint32_t chip_reset_done :1;
2262 uint32_t port0 :1; 2263 uint32_t port0 :1;
2263 uint32_t running_gold_fw :1; 2264 uint32_t running_gold_fw :1;
2265 uint32_t eeh_busy :1;
2264 uint32_t cpu_affinity_enabled :1; 2266 uint32_t cpu_affinity_enabled :1;
2265 uint32_t disable_msix_handshake :1; 2267 uint32_t disable_msix_handshake :1;
2266 } flags; 2268 } flags;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b6801fc6389..f61fb8d01330 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -324,6 +324,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
324extern int 324extern int
325qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); 325qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
326 326
327extern int qla2x00_get_data_rate(scsi_qla_host_t *);
327/* 328/*
328 * Global Function Prototypes in qla_isr.c source file. 329 * Global Function Prototypes in qla_isr.c source file.
329 */ 330 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 73a793539d45..b4a0eac8f96d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -269,6 +269,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
269 vha->flags.online = 0; 269 vha->flags.online = 0;
270 ha->flags.chip_reset_done = 0; 270 ha->flags.chip_reset_done = 0;
271 vha->flags.reset_active = 0; 271 vha->flags.reset_active = 0;
272 ha->flags.pci_channel_io_perm_failure = 0;
273 ha->flags.eeh_busy = 0;
272 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 274 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
273 atomic_set(&vha->loop_state, LOOP_DOWN); 275 atomic_set(&vha->loop_state, LOOP_DOWN);
274 vha->device_flags = DFLG_NO_CABLE; 276 vha->device_flags = DFLG_NO_CABLE;
@@ -581,6 +583,9 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
581 uint32_t cnt; 583 uint32_t cnt;
582 uint16_t cmd; 584 uint16_t cmd;
583 585
586 if (unlikely(pci_channel_offline(ha->pdev)))
587 return;
588
584 ha->isp_ops->disable_intrs(ha); 589 ha->isp_ops->disable_intrs(ha);
585 590
586 spin_lock_irqsave(&ha->hardware_lock, flags); 591 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -786,6 +791,12 @@ void
786qla24xx_reset_chip(scsi_qla_host_t *vha) 791qla24xx_reset_chip(scsi_qla_host_t *vha)
787{ 792{
788 struct qla_hw_data *ha = vha->hw; 793 struct qla_hw_data *ha = vha->hw;
794
795 if (pci_channel_offline(ha->pdev) &&
796 ha->flags.pci_channel_io_perm_failure) {
797 return;
798 }
799
789 ha->isp_ops->disable_intrs(ha); 800 ha->isp_ops->disable_intrs(ha);
790 801
791 /* Perform RISC reset. */ 802 /* Perform RISC reset. */
@@ -2266,6 +2277,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2266 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2277 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2267 clear_bit(RSCN_UPDATE, &vha->dpc_flags); 2278 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
2268 2279
2280 qla2x00_get_data_rate(vha);
2281
2269 /* Determine what we need to do */ 2282 /* Determine what we need to do */
2270 if (ha->current_topology == ISP_CFG_FL && 2283 if (ha->current_topology == ISP_CFG_FL &&
2271 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2284 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
@@ -3560,6 +3573,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3560 /* Requeue all commands in outstanding command list. */ 3573 /* Requeue all commands in outstanding command list. */
3561 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 3574 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3562 3575
3576 if (unlikely(pci_channel_offline(ha->pdev) &&
3577 ha->flags.pci_channel_io_perm_failure)) {
3578 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3579 status = 0;
3580 return status;
3581 }
3582
3563 ha->isp_ops->get_flash_version(vha, req->ring); 3583 ha->isp_ops->get_flash_version(vha, req->ring);
3564 3584
3565 ha->isp_ops->nvram_config(vha); 3585 ha->isp_ops->nvram_config(vha);
@@ -4458,6 +4478,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4458 int ret, retries; 4478 int ret, retries;
4459 struct qla_hw_data *ha = vha->hw; 4479 struct qla_hw_data *ha = vha->hw;
4460 4480
4481 if (ha->flags.pci_channel_io_perm_failure)
4482 return;
4461 if (!IS_FWI2_CAPABLE(ha)) 4483 if (!IS_FWI2_CAPABLE(ha))
4462 return; 4484 return;
4463 if (!ha->fw_major_version) 4485 if (!ha->fw_major_version)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1692a883f4de..ffd0efdff40e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -152,7 +152,7 @@ qla2300_intr_handler(int irq, void *dev_id)
152 for (iter = 50; iter--; ) { 152 for (iter = 50; iter--; ) {
153 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 153 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
154 if (stat & HSR_RISC_PAUSED) { 154 if (stat & HSR_RISC_PAUSED) {
155 if (pci_channel_offline(ha->pdev)) 155 if (unlikely(pci_channel_offline(ha->pdev)))
156 break; 156 break;
157 157
158 hccr = RD_REG_WORD(&reg->hccr); 158 hccr = RD_REG_WORD(&reg->hccr);
@@ -1846,12 +1846,15 @@ qla24xx_intr_handler(int irq, void *dev_id)
1846 reg = &ha->iobase->isp24; 1846 reg = &ha->iobase->isp24;
1847 status = 0; 1847 status = 0;
1848 1848
1849 if (unlikely(pci_channel_offline(ha->pdev)))
1850 return IRQ_HANDLED;
1851
1849 spin_lock_irqsave(&ha->hardware_lock, flags); 1852 spin_lock_irqsave(&ha->hardware_lock, flags);
1850 vha = pci_get_drvdata(ha->pdev); 1853 vha = pci_get_drvdata(ha->pdev);
1851 for (iter = 50; iter--; ) { 1854 for (iter = 50; iter--; ) {
1852 stat = RD_REG_DWORD(&reg->host_status); 1855 stat = RD_REG_DWORD(&reg->host_status);
1853 if (stat & HSRX_RISC_PAUSED) { 1856 if (stat & HSRX_RISC_PAUSED) {
1854 if (pci_channel_offline(ha->pdev)) 1857 if (unlikely(pci_channel_offline(ha->pdev)))
1855 break; 1858 break;
1856 1859
1857 hccr = RD_REG_DWORD(&reg->hccr); 1860 hccr = RD_REG_DWORD(&reg->hccr);
@@ -1992,7 +1995,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1992 do { 1995 do {
1993 stat = RD_REG_DWORD(&reg->host_status); 1996 stat = RD_REG_DWORD(&reg->host_status);
1994 if (stat & HSRX_RISC_PAUSED) { 1997 if (stat & HSRX_RISC_PAUSED) {
1995 if (pci_channel_offline(ha->pdev)) 1998 if (unlikely(pci_channel_offline(ha->pdev)))
1996 break; 1999 break;
1997 2000
1998 hccr = RD_REG_DWORD(&reg->hccr); 2001 hccr = RD_REG_DWORD(&reg->hccr);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 05d595d9a7ef..056e4d4505f3 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -56,6 +56,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
56 56
57 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no)); 57 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
58 58
59 if (ha->flags.pci_channel_io_perm_failure) {
60 DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX "
61 "Exiting.\n", __func__, vha->host_no));
62 return QLA_FUNCTION_TIMEOUT;
63 }
64
59 /* 65 /*
60 * Wait for active mailbox commands to finish by waiting at most tov 66 * Wait for active mailbox commands to finish by waiting at most tov
61 * seconds. This is to serialize actual issuing of mailbox cmds during 67 * seconds. This is to serialize actual issuing of mailbox cmds during
@@ -154,10 +160,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
154 /* Check for pending interrupts. */ 160 /* Check for pending interrupts. */
155 qla2x00_poll(ha->rsp_q_map[0]); 161 qla2x00_poll(ha->rsp_q_map[0]);
156 162
157 if (command != MBC_LOAD_RISC_RAM_EXTENDED && 163 if (!ha->flags.mbox_int &&
158 !ha->flags.mbox_int) 164 !(IS_QLA2200(ha) &&
165 command == MBC_LOAD_RISC_RAM_EXTENDED))
159 msleep(10); 166 msleep(10);
160 } /* while */ 167 } /* while */
168 DEBUG17(qla_printk(KERN_WARNING, ha,
169 "Waited %d sec\n",
170 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)));
161 } 171 }
162 172
163 /* Check whether we timed out */ 173 /* Check whether we timed out */
@@ -227,7 +237,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
227 237
228 if (rval == QLA_FUNCTION_TIMEOUT && 238 if (rval == QLA_FUNCTION_TIMEOUT &&
229 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 239 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
230 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { 240 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
241 ha->flags.eeh_busy) {
231 /* not in dpc. schedule it for dpc to take over. */ 242 /* not in dpc. schedule it for dpc to take over. */
232 DEBUG(printk("%s(%ld): timeout schedule " 243 DEBUG(printk("%s(%ld): timeout schedule "
233 "isp_abort_needed.\n", __func__, 244 "isp_abort_needed.\n", __func__,
@@ -237,7 +248,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
237 base_vha->host_no)); 248 base_vha->host_no));
238 qla_printk(KERN_WARNING, ha, 249 qla_printk(KERN_WARNING, ha,
239 "Mailbox command timeout occurred. Scheduling ISP " 250 "Mailbox command timeout occurred. Scheduling ISP "
240 "abort.\n"); 251 "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy);
241 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 252 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
242 qla2xxx_wake_dpc(vha); 253 qla2xxx_wake_dpc(vha);
243 } else if (!abort_active) { 254 } else if (!abort_active) {
@@ -2530,6 +2541,9 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2530 if (!IS_FWI2_CAPABLE(vha->hw)) 2541 if (!IS_FWI2_CAPABLE(vha->hw))
2531 return QLA_FUNCTION_FAILED; 2542 return QLA_FUNCTION_FAILED;
2532 2543
2544 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2545 return QLA_FUNCTION_FAILED;
2546
2533 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2547 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2534 2548
2535 mcp->mb[0] = MBC_TRACE_CONTROL; 2549 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2565,6 +2579,9 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2565 if (!IS_FWI2_CAPABLE(vha->hw)) 2579 if (!IS_FWI2_CAPABLE(vha->hw))
2566 return QLA_FUNCTION_FAILED; 2580 return QLA_FUNCTION_FAILED;
2567 2581
2582 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2583 return QLA_FUNCTION_FAILED;
2584
2568 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2585 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2569 2586
2570 mcp->mb[0] = MBC_TRACE_CONTROL; 2587 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2595,6 +2612,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2595 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) 2612 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2596 return QLA_FUNCTION_FAILED; 2613 return QLA_FUNCTION_FAILED;
2597 2614
2615 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2616 return QLA_FUNCTION_FAILED;
2617
2598 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2618 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2599 2619
2600 mcp->mb[0] = MBC_TRACE_CONTROL; 2620 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2639,6 +2659,9 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2639 if (!IS_FWI2_CAPABLE(vha->hw)) 2659 if (!IS_FWI2_CAPABLE(vha->hw))
2640 return QLA_FUNCTION_FAILED; 2660 return QLA_FUNCTION_FAILED;
2641 2661
2662 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2663 return QLA_FUNCTION_FAILED;
2664
2642 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2665 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2643 2666
2644 mcp->mb[0] = MBC_TRACE_CONTROL; 2667 mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -3643,3 +3666,36 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3643 3666
3644 return rval; 3667 return rval;
3645} 3668}
3669
3670int
3671qla2x00_get_data_rate(scsi_qla_host_t *vha)
3672{
3673 int rval;
3674 mbx_cmd_t mc;
3675 mbx_cmd_t *mcp = &mc;
3676 struct qla_hw_data *ha = vha->hw;
3677
3678 if (!IS_FWI2_CAPABLE(ha))
3679 return QLA_FUNCTION_FAILED;
3680
3681 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
3682
3683 mcp->mb[0] = MBC_DATA_RATE;
3684 mcp->mb[1] = 0;
3685 mcp->out_mb = MBX_1|MBX_0;
3686 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3687 mcp->tov = MBX_TOV_SECONDS;
3688 mcp->flags = 0;
3689 rval = qla2x00_mailbox_command(vha, mcp);
3690 if (rval != QLA_SUCCESS) {
3691 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
3692 __func__, vha->host_no, rval, mcp->mb[0]));
3693 } else {
3694 DEBUG11(printk(KERN_INFO
3695 "%s(%ld): done.\n", __func__, vha->host_no));
3696 if (mcp->mb[1] != 0x7)
3697 ha->link_data_rate = mcp->mb[1];
3698 }
3699
3700 return rval;
3701}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 2a4c7f4e7b69..b901aa267e7d 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -639,8 +639,10 @@ static void qla_do_work(struct work_struct *work)
639 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); 639 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
640 struct scsi_qla_host *vha; 640 struct scsi_qla_host *vha;
641 641
642 spin_lock_irq(&rsp->hw->hardware_lock);
642 vha = qla25xx_get_host(rsp); 643 vha = qla25xx_get_host(rsp);
643 qla24xx_process_response_queue(vha, rsp); 644 qla24xx_process_response_queue(vha, rsp);
645 spin_unlock_irq(&rsp->hw->hardware_lock);
644} 646}
645 647
646/* create response queue */ 648/* create response queue */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2f873d237325..209f50e788a1 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -475,11 +475,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
475 srb_t *sp; 475 srb_t *sp;
476 int rval; 476 int rval;
477 477
478 if (unlikely(pci_channel_offline(ha->pdev))) { 478 if (ha->flags.eeh_busy) {
479 if (ha->pdev->error_state == pci_channel_io_frozen) 479 if (ha->flags.pci_channel_io_perm_failure)
480 cmd->result = DID_REQUEUE << 16;
481 else
482 cmd->result = DID_NO_CONNECT << 16; 480 cmd->result = DID_NO_CONNECT << 16;
481 else
482 cmd->result = DID_REQUEUE << 16;
483 goto qc24_fail_command; 483 goto qc24_fail_command;
484 } 484 }
485 485
@@ -552,8 +552,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
552#define ABORT_POLLING_PERIOD 1000 552#define ABORT_POLLING_PERIOD 1000
553#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 553#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
554 unsigned long wait_iter = ABORT_WAIT_ITER; 554 unsigned long wait_iter = ABORT_WAIT_ITER;
555 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
556 struct qla_hw_data *ha = vha->hw;
555 int ret = QLA_SUCCESS; 557 int ret = QLA_SUCCESS;
556 558
559 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
560 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
561 return ret;
562 }
563
557 while (CMD_SP(cmd) && wait_iter--) { 564 while (CMD_SP(cmd) && wait_iter--) {
558 msleep(ABORT_POLLING_PERIOD); 565 msleep(ABORT_POLLING_PERIOD);
559 } 566 }
@@ -1810,6 +1817,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1810 1817
1811 /* Set ISP-type information. */ 1818 /* Set ISP-type information. */
1812 qla2x00_set_isp_flags(ha); 1819 qla2x00_set_isp_flags(ha);
1820
1821 /* Set EEH reset type to fundamental if required by hba */
1822 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
1823 pdev->needs_freset = 1;
1824 pci_save_state(pdev);
1825 }
1826
1813 /* Configure PCI I/O space */ 1827 /* Configure PCI I/O space */
1814 ret = qla2x00_iospace_config(ha); 1828 ret = qla2x00_iospace_config(ha);
1815 if (ret) 1829 if (ret)
@@ -2174,6 +2188,24 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2174{ 2188{
2175 struct qla_hw_data *ha = vha->hw; 2189 struct qla_hw_data *ha = vha->hw;
2176 2190
2191 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
2192
2193 /* Disable timer */
2194 if (vha->timer_active)
2195 qla2x00_stop_timer(vha);
2196
2197 /* Kill the kernel thread for this host */
2198 if (ha->dpc_thread) {
2199 struct task_struct *t = ha->dpc_thread;
2200
2201 /*
2202 * qla2xxx_wake_dpc checks for ->dpc_thread
2203 * so we need to zero it out.
2204 */
2205 ha->dpc_thread = NULL;
2206 kthread_stop(t);
2207 }
2208
2177 qla25xx_delete_queues(vha); 2209 qla25xx_delete_queues(vha);
2178 2210
2179 if (ha->flags.fce_enabled) 2211 if (ha->flags.fce_enabled)
@@ -2185,6 +2217,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2185 /* Stop currently executing firmware. */ 2217 /* Stop currently executing firmware. */
2186 qla2x00_try_to_stop_firmware(vha); 2218 qla2x00_try_to_stop_firmware(vha);
2187 2219
2220 vha->flags.online = 0;
2221
2188 /* turn-off interrupts on the card */ 2222 /* turn-off interrupts on the card */
2189 if (ha->interrupts_on) 2223 if (ha->interrupts_on)
2190 ha->isp_ops->disable_intrs(ha); 2224 ha->isp_ops->disable_intrs(ha);
@@ -2859,6 +2893,13 @@ qla2x00_do_dpc(void *data)
2859 if (!base_vha->flags.init_done) 2893 if (!base_vha->flags.init_done)
2860 continue; 2894 continue;
2861 2895
2896 if (ha->flags.eeh_busy) {
2897 DEBUG17(qla_printk(KERN_WARNING, ha,
2898 "qla2x00_do_dpc: dpc_flags: %lx\n",
2899 base_vha->dpc_flags));
2900 continue;
2901 }
2902
2862 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no)); 2903 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
2863 2904
2864 ha->dpc_active = 1; 2905 ha->dpc_active = 1;
@@ -3049,8 +3090,13 @@ qla2x00_timer(scsi_qla_host_t *vha)
3049 int index; 3090 int index;
3050 srb_t *sp; 3091 srb_t *sp;
3051 int t; 3092 int t;
3093 uint16_t w;
3052 struct qla_hw_data *ha = vha->hw; 3094 struct qla_hw_data *ha = vha->hw;
3053 struct req_que *req; 3095 struct req_que *req;
3096
3097 /* Hardware read to raise pending EEH errors during mailbox waits. */
3098 if (!pci_channel_offline(ha->pdev))
3099 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3054 /* 3100 /*
3055 * Ports - Port down timer. 3101 * Ports - Port down timer.
3056 * 3102 *
@@ -3252,16 +3298,23 @@ qla2x00_release_firmware(void)
3252static pci_ers_result_t 3298static pci_ers_result_t
3253qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 3299qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3254{ 3300{
3255 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); 3301 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3302 struct qla_hw_data *ha = vha->hw;
3303
3304 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
3305 state));
3256 3306
3257 switch (state) { 3307 switch (state) {
3258 case pci_channel_io_normal: 3308 case pci_channel_io_normal:
3309 ha->flags.eeh_busy = 0;
3259 return PCI_ERS_RESULT_CAN_RECOVER; 3310 return PCI_ERS_RESULT_CAN_RECOVER;
3260 case pci_channel_io_frozen: 3311 case pci_channel_io_frozen:
3312 ha->flags.eeh_busy = 1;
3261 pci_disable_device(pdev); 3313 pci_disable_device(pdev);
3262 return PCI_ERS_RESULT_NEED_RESET; 3314 return PCI_ERS_RESULT_NEED_RESET;
3263 case pci_channel_io_perm_failure: 3315 case pci_channel_io_perm_failure:
3264 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 3316 ha->flags.pci_channel_io_perm_failure = 1;
3317 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3265 return PCI_ERS_RESULT_DISCONNECT; 3318 return PCI_ERS_RESULT_DISCONNECT;
3266 } 3319 }
3267 return PCI_ERS_RESULT_NEED_RESET; 3320 return PCI_ERS_RESULT_NEED_RESET;
@@ -3312,6 +3365,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3312 struct qla_hw_data *ha = base_vha->hw; 3365 struct qla_hw_data *ha = base_vha->hw;
3313 int rc; 3366 int rc;
3314 3367
3368 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
3369
3315 if (ha->mem_only) 3370 if (ha->mem_only)
3316 rc = pci_enable_device_mem(pdev); 3371 rc = pci_enable_device_mem(pdev);
3317 else 3372 else
@@ -3320,19 +3375,33 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3320 if (rc) { 3375 if (rc) {
3321 qla_printk(KERN_WARNING, ha, 3376 qla_printk(KERN_WARNING, ha,
3322 "Can't re-enable PCI device after reset.\n"); 3377 "Can't re-enable PCI device after reset.\n");
3323
3324 return ret; 3378 return ret;
3325 } 3379 }
3326 pci_set_master(pdev);
3327 3380
3328 if (ha->isp_ops->pci_config(base_vha)) 3381 if (ha->isp_ops->pci_config(base_vha))
3329 return ret; 3382 return ret;
3330 3383
3384#ifdef QL_DEBUG_LEVEL_17
3385 {
3386 uint8_t b;
3387 uint32_t i;
3388
3389 printk("slot_reset_1: ");
3390 for (i = 0; i < 256; i++) {
3391 pci_read_config_byte(ha->pdev, i, &b);
3392 printk("%s%02x", (i%16) ? " " : "\n", b);
3393 }
3394 printk("\n");
3395 }
3396#endif
3331 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3397 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3332 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) 3398 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
3333 ret = PCI_ERS_RESULT_RECOVERED; 3399 ret = PCI_ERS_RESULT_RECOVERED;
3334 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3400 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3335 3401
3402 DEBUG17(qla_printk(KERN_WARNING, ha,
3403 "slot_reset-return:ret=%x\n", ret));
3404
3336 return ret; 3405 return ret;
3337} 3406}
3338 3407
@@ -3343,12 +3412,17 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
3343 struct qla_hw_data *ha = base_vha->hw; 3412 struct qla_hw_data *ha = base_vha->hw;
3344 int ret; 3413 int ret;
3345 3414
3415 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
3416
3346 ret = qla2x00_wait_for_hba_online(base_vha); 3417 ret = qla2x00_wait_for_hba_online(base_vha);
3347 if (ret != QLA_SUCCESS) { 3418 if (ret != QLA_SUCCESS) {
3348 qla_printk(KERN_ERR, ha, 3419 qla_printk(KERN_ERR, ha,
3349 "the device failed to resume I/O " 3420 "the device failed to resume I/O "
3350 "from slot/link_reset"); 3421 "from slot/link_reset");
3351 } 3422 }
3423
3424 ha->flags.eeh_busy = 0;
3425
3352 pci_cleanup_aer_uncorrect_error_status(pdev); 3426 pci_cleanup_aer_uncorrect_error_status(pdev);
3353} 3427}
3354 3428
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c482220f7eed..a65dd95507c6 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k8" 10#define QLA2XXX_VERSION "8.03.01-k9"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 3058bb1aff95..fd7b15be7640 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -623,6 +623,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
623 } 623 }
624 break; 624 break;
625 case INQUIRY: 625 case INQUIRY:
626 if (lun >= host->max_lun) {
627 cmd->result = DID_NO_CONNECT << 16;
628 done(cmd);
629 return 0;
630 }
626 if (id != host->max_id - 1) 631 if (id != host->max_id - 1)
627 break; 632 break;
628 if (!lun && !cmd->device->channel && 633 if (!lun && !cmd->device->channel &&
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 415858b421b3..825b665245bb 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1221,9 +1221,9 @@ static void setup_smart_timing(struct pxafb_info *fbi,
1221static int pxafb_smart_thread(void *arg) 1221static int pxafb_smart_thread(void *arg)
1222{ 1222{
1223 struct pxafb_info *fbi = arg; 1223 struct pxafb_info *fbi = arg;
1224 struct pxafb_mach_info *inf; 1224 struct pxafb_mach_info *inf = fbi->dev->platform_data;
1225 1225
1226 if (!fbi || !fbi->dev->platform_data->smart_update) { 1226 if (!inf->smart_update) {
1227 pr_err("%s: not properly initialized, thread terminated\n", 1227 pr_err("%s: not properly initialized, thread terminated\n",
1228 __func__); 1228 __func__);
1229 return -EINVAL; 1229 return -EINVAL;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 7dc85997e96c..c57d9ce5ff7e 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -171,6 +171,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
171#ifdef ELF_FDPIC_PLAT_INIT 171#ifdef ELF_FDPIC_PLAT_INIT
172 unsigned long dynaddr; 172 unsigned long dynaddr;
173#endif 173#endif
174#ifndef CONFIG_MMU
175 unsigned long stack_prot;
176#endif
174 struct file *interpreter = NULL; /* to shut gcc up */ 177 struct file *interpreter = NULL; /* to shut gcc up */
175 char *interpreter_name = NULL; 178 char *interpreter_name = NULL;
176 int executable_stack; 179 int executable_stack;
@@ -316,6 +319,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
316 * defunct, deceased, etc. after this point we have to exit via 319 * defunct, deceased, etc. after this point we have to exit via
317 * error_kill */ 320 * error_kill */
318 set_personality(PER_LINUX_FDPIC); 321 set_personality(PER_LINUX_FDPIC);
322 if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
323 current->personality |= READ_IMPLIES_EXEC;
319 set_binfmt(&elf_fdpic_format); 324 set_binfmt(&elf_fdpic_format);
320 325
321 current->mm->start_code = 0; 326 current->mm->start_code = 0;
@@ -377,9 +382,13 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
377 if (stack_size < PAGE_SIZE * 2) 382 if (stack_size < PAGE_SIZE * 2)
378 stack_size = PAGE_SIZE * 2; 383 stack_size = PAGE_SIZE * 2;
379 384
385 stack_prot = PROT_READ | PROT_WRITE;
386 if (executable_stack == EXSTACK_ENABLE_X ||
387 (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
388 stack_prot |= PROT_EXEC;
389
380 down_write(&current->mm->mmap_sem); 390 down_write(&current->mm->mmap_sem);
381 current->mm->start_brk = do_mmap(NULL, 0, stack_size, 391 current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
382 PROT_READ | PROT_WRITE | PROT_EXEC,
383 MAP_PRIVATE | MAP_ANONYMOUS | 392 MAP_PRIVATE | MAP_ANONYMOUS |
384 MAP_UNINITIALIZED | MAP_GROWSDOWN, 393 MAP_UNINITIALIZED | MAP_GROWSDOWN,
385 0); 394 0);
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 698a8636d39c..2afbcebeda71 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -738,13 +738,28 @@ static int exofs_write_begin_export(struct file *file,
738 fsdata); 738 fsdata);
739} 739}
740 740
741static int exofs_write_end(struct file *file, struct address_space *mapping,
742 loff_t pos, unsigned len, unsigned copied,
743 struct page *page, void *fsdata)
744{
745 struct inode *inode = mapping->host;
746 /* According to comment in simple_write_end i_mutex is held */
747 loff_t i_size = inode->i_size;
748 int ret;
749
750 ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
751 if (i_size != inode->i_size)
752 mark_inode_dirty(inode);
753 return ret;
754}
755
741const struct address_space_operations exofs_aops = { 756const struct address_space_operations exofs_aops = {
742 .readpage = exofs_readpage, 757 .readpage = exofs_readpage,
743 .readpages = exofs_readpages, 758 .readpages = exofs_readpages,
744 .writepage = exofs_writepage, 759 .writepage = exofs_writepage,
745 .writepages = exofs_writepages, 760 .writepages = exofs_writepages,
746 .write_begin = exofs_write_begin_export, 761 .write_begin = exofs_write_begin_export,
747 .write_end = simple_write_end, 762 .write_end = exofs_write_end,
748}; 763};
749 764
750/****************************************************************************** 765/******************************************************************************
diff --git a/fs/exofs/pnfs.h b/fs/exofs/pnfs.h
index 423033addd1f..c52e9888b8ab 100644
--- a/fs/exofs/pnfs.h
+++ b/fs/exofs/pnfs.h
@@ -15,13 +15,7 @@
15#ifndef __EXOFS_PNFS_H__ 15#ifndef __EXOFS_PNFS_H__
16#define __EXOFS_PNFS_H__ 16#define __EXOFS_PNFS_H__
17 17
18#if defined(CONFIG_PNFS) 18#if ! defined(__PNFS_OSD_XDR_H__)
19
20
21/* FIXME: move this file to: linux/exportfs/pnfs_osd_xdr.h */
22#include "../nfs/objlayout/pnfs_osd_xdr.h"
23
24#else /* defined(CONFIG_PNFS) */
25 19
26enum pnfs_iomode { 20enum pnfs_iomode {
27 IOMODE_READ = 1, 21 IOMODE_READ = 1,
@@ -46,6 +40,6 @@ struct pnfs_osd_data_map {
46 u32 odm_raid_algorithm; 40 u32 odm_raid_algorithm;
47}; 41};
48 42
49#endif /* else defined(CONFIG_PNFS) */ 43#endif /* ! defined(__PNFS_OSD_XDR_H__) */
50 44
51#endif /* __EXOFS_PNFS_H__ */ 45#endif /* __EXOFS_PNFS_H__ */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2c5ace4f00a7..3c7f03b669fb 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1615,6 +1615,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1615 goto out; 1615 goto out;
1616 1616
1617 new_dentry = dentry; 1617 new_dentry = dentry;
1618 rehash = NULL;
1618 new_inode = NULL; 1619 new_inode = NULL;
1619 } 1620 }
1620 } 1621 }
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 7c2e337d05af..c194793b642b 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -780,12 +780,9 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
780 int (*fsync) (struct file *, struct dentry *, int); 780 int (*fsync) (struct file *, struct dentry *, int);
781 int err; 781 int err;
782 782
783 err = filemap_fdatawrite(inode->i_mapping); 783 err = filemap_write_and_wait(inode->i_mapping);
784 if (err == 0 && fop && (fsync = fop->fsync)) 784 if (err == 0 && fop && (fsync = fop->fsync))
785 err = fsync(filp, dp, 0); 785 err = fsync(filp, dp, 0);
786 if (err == 0)
787 err = filemap_fdatawait(inode->i_mapping);
788
789 return err; 786 return err;
790} 787}
791 788
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 3d30a1c974a8..06ccf6a86d35 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1772,7 +1772,8 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1772 loff_t *ppos, 1772 loff_t *ppos,
1773 size_t count, 1773 size_t count,
1774 int appending, 1774 int appending,
1775 int *direct_io) 1775 int *direct_io,
1776 int *has_refcount)
1776{ 1777{
1777 int ret = 0, meta_level = 0; 1778 int ret = 0, meta_level = 0;
1778 struct inode *inode = dentry->d_inode; 1779 struct inode *inode = dentry->d_inode;
@@ -1833,6 +1834,8 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1833 saved_pos, 1834 saved_pos,
1834 count, 1835 count,
1835 &meta_level); 1836 &meta_level);
1837 if (has_refcount)
1838 *has_refcount = 1;
1836 } 1839 }
1837 1840
1838 if (ret < 0) { 1841 if (ret < 0) {
@@ -1856,6 +1859,10 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1856 break; 1859 break;
1857 } 1860 }
1858 1861
1862 if (has_refcount && *has_refcount == 1) {
1863 *direct_io = 0;
1864 break;
1865 }
1859 /* 1866 /*
1860 * Allowing concurrent direct writes means 1867 * Allowing concurrent direct writes means
1861 * i_size changes wouldn't be synchronized, so 1868 * i_size changes wouldn't be synchronized, so
@@ -1899,7 +1906,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1899 loff_t pos) 1906 loff_t pos)
1900{ 1907{
1901 int ret, direct_io, appending, rw_level, have_alloc_sem = 0; 1908 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1902 int can_do_direct; 1909 int can_do_direct, has_refcount = 0;
1903 ssize_t written = 0; 1910 ssize_t written = 0;
1904 size_t ocount; /* original count */ 1911 size_t ocount; /* original count */
1905 size_t count; /* after file limit checks */ 1912 size_t count; /* after file limit checks */
@@ -1942,7 +1949,7 @@ relock:
1942 can_do_direct = direct_io; 1949 can_do_direct = direct_io;
1943 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos, 1950 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1944 iocb->ki_left, appending, 1951 iocb->ki_left, appending,
1945 &can_do_direct); 1952 &can_do_direct, &has_refcount);
1946 if (ret < 0) { 1953 if (ret < 0) {
1947 mlog_errno(ret); 1954 mlog_errno(ret);
1948 goto out; 1955 goto out;
@@ -2006,14 +2013,16 @@ out_dio:
2006 /* buffered aio wouldn't have proper lock coverage today */ 2013 /* buffered aio wouldn't have proper lock coverage today */
2007 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); 2014 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2008 2015
2009 if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode)) { 2016 if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) ||
2017 (file->f_flags & O_DIRECT && has_refcount)) {
2010 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2018 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2011 pos + count - 1); 2019 pos + count - 1);
2012 if (ret < 0) 2020 if (ret < 0)
2013 written = ret; 2021 written = ret;
2014 2022
2015 if (!ret && (old_size != i_size_read(inode) || 2023 if (!ret && (old_size != i_size_read(inode) ||
2016 old_clusters != OCFS2_I(inode)->ip_clusters)) { 2024 old_clusters != OCFS2_I(inode)->ip_clusters ||
2025 has_refcount)) {
2017 ret = jbd2_journal_force_commit(osb->journal->j_journal); 2026 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2018 if (ret < 0) 2027 if (ret < 0)
2019 written = ret; 2028 written = ret;
@@ -2062,7 +2071,7 @@ static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
2062 int ret; 2071 int ret;
2063 2072
2064 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, &sd->pos, 2073 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, &sd->pos,
2065 sd->total_len, 0, NULL); 2074 sd->total_len, 0, NULL, NULL);
2066 if (ret < 0) { 2075 if (ret < 0) {
2067 mlog_errno(ret); 2076 mlog_errno(ret);
2068 return ret; 2077 return ret;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 71dafb69cfeb..ffac157fb5b2 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1408,7 +1408,7 @@ extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
1408 struct drm_ati_pcigart_info * gart_info); 1408 struct drm_ati_pcigart_info * gart_info);
1409 1409
1410extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, 1410extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
1411 size_t align, dma_addr_t maxaddr); 1411 size_t align);
1412extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1412extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1413extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1413extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
1414 1414
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index ec3f5e80a5df..b64a8d7cdf6d 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -188,6 +188,7 @@ typedef struct _drm_i915_sarea {
188#define DRM_I915_GEM_MADVISE 0x26 188#define DRM_I915_GEM_MADVISE 0x26
189#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 189#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
190#define DRM_I915_OVERLAY_ATTRS 0x28 190#define DRM_I915_OVERLAY_ATTRS 0x28
191#define DRM_I915_GEM_EXECBUFFER2 0x29
191 192
192#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 193#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
193#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 194#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -207,6 +208,7 @@ typedef struct _drm_i915_sarea {
207#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 208#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
208#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 209#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
209#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 210#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
211#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
210#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 212#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
211#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 213#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
212#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 214#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -272,6 +274,7 @@ typedef struct drm_i915_irq_wait {
272#define I915_PARAM_NUM_FENCES_AVAIL 6 274#define I915_PARAM_NUM_FENCES_AVAIL 6
273#define I915_PARAM_HAS_OVERLAY 7 275#define I915_PARAM_HAS_OVERLAY 7
274#define I915_PARAM_HAS_PAGEFLIPPING 8 276#define I915_PARAM_HAS_PAGEFLIPPING 8
277#define I915_PARAM_HAS_EXECBUF2 9
275 278
276typedef struct drm_i915_getparam { 279typedef struct drm_i915_getparam {
277 int param; 280 int param;
@@ -567,6 +570,57 @@ struct drm_i915_gem_execbuffer {
567 __u64 cliprects_ptr; 570 __u64 cliprects_ptr;
568}; 571};
569 572
573struct drm_i915_gem_exec_object2 {
574 /**
575 * User's handle for a buffer to be bound into the GTT for this
576 * operation.
577 */
578 __u32 handle;
579
580 /** Number of relocations to be performed on this buffer */
581 __u32 relocation_count;
582 /**
583 * Pointer to array of struct drm_i915_gem_relocation_entry containing
584 * the relocations to be performed in this buffer.
585 */
586 __u64 relocs_ptr;
587
588 /** Required alignment in graphics aperture */
589 __u64 alignment;
590
591 /**
592 * Returned value of the updated offset of the object, for future
593 * presumed_offset writes.
594 */
595 __u64 offset;
596
597#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
598 __u64 flags;
599 __u64 rsvd1;
600 __u64 rsvd2;
601};
602
603struct drm_i915_gem_execbuffer2 {
604 /**
605 * List of gem_exec_object2 structs
606 */
607 __u64 buffers_ptr;
608 __u32 buffer_count;
609
610 /** Offset in the batchbuffer to start execution from. */
611 __u32 batch_start_offset;
612 /** Bytes used in batchbuffer from batch_start_offset */
613 __u32 batch_len;
614 __u32 DR1;
615 __u32 DR4;
616 __u32 num_cliprects;
617 /** This is a struct drm_clip_rect *cliprects */
618 __u64 cliprects_ptr;
619 __u64 flags; /* currently unused */
620 __u64 rsvd1;
621 __u64 rsvd2;
622};
623
570struct drm_i915_gem_pin { 624struct drm_i915_gem_pin {
571 /** Handle of the buffer to be pinned. */ 625 /** Handle of the buffer to be pinned. */
572 __u32 handle; 626 __u32 handle;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 84a524afb3dc..84d020bed083 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -123,6 +123,8 @@ struct vm_region {
123 struct file *vm_file; /* the backing file or NULL */ 123 struct file *vm_file; /* the backing file or NULL */
124 124
125 atomic_t vm_usage; /* region usage count */ 125 atomic_t vm_usage; /* region usage count */
126 bool vm_icache_flushed : 1; /* true if the icache has been flushed for
127 * this region */
126}; 128};
127 129
128/* 130/*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5da0690d9cee..174e5392e51e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -243,6 +243,7 @@ struct pci_dev {
243 unsigned int d2_support:1; /* Low power state D2 is supported */ 243 unsigned int d2_support:1; /* Low power state D2 is supported */
244 unsigned int no_d1d2:1; /* Only allow D0 and D3 */ 244 unsigned int no_d1d2:1; /* Only allow D0 and D3 */
245 unsigned int wakeup_prepared:1; 245 unsigned int wakeup_prepared:1;
246 unsigned int d3_delay; /* D3->D0 transition time in ms */
246 247
247#ifdef CONFIG_PCIEASPM 248#ifdef CONFIG_PCIEASPM
248 struct pcie_link_state *link_state; /* ASPM link state. */ 249 struct pcie_link_state *link_state; /* ASPM link state. */
diff --git a/kernel/module.c b/kernel/module.c
index e96b8ed1cb6a..f82386bd9ee9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1010,6 +1010,12 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
1010 * J. Corbet <corbet@lwn.net> 1010 * J. Corbet <corbet@lwn.net>
1011 */ 1011 */
1012#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) 1012#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
1013
1014static inline bool sect_empty(const Elf_Shdr *sect)
1015{
1016 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1017}
1018
1013struct module_sect_attr 1019struct module_sect_attr
1014{ 1020{
1015 struct module_attribute mattr; 1021 struct module_attribute mattr;
@@ -1051,8 +1057,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1051 1057
1052 /* Count loaded sections and allocate structures */ 1058 /* Count loaded sections and allocate structures */
1053 for (i = 0; i < nsect; i++) 1059 for (i = 0; i < nsect; i++)
1054 if (sechdrs[i].sh_flags & SHF_ALLOC 1060 if (!sect_empty(&sechdrs[i]))
1055 && sechdrs[i].sh_size)
1056 nloaded++; 1061 nloaded++;
1057 size[0] = ALIGN(sizeof(*sect_attrs) 1062 size[0] = ALIGN(sizeof(*sect_attrs)
1058 + nloaded * sizeof(sect_attrs->attrs[0]), 1063 + nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1070,9 +1075,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1070 sattr = &sect_attrs->attrs[0]; 1075 sattr = &sect_attrs->attrs[0];
1071 gattr = &sect_attrs->grp.attrs[0]; 1076 gattr = &sect_attrs->grp.attrs[0];
1072 for (i = 0; i < nsect; i++) { 1077 for (i = 0; i < nsect; i++) {
1073 if (! (sechdrs[i].sh_flags & SHF_ALLOC)) 1078 if (sect_empty(&sechdrs[i]))
1074 continue;
1075 if (!sechdrs[i].sh_size)
1076 continue; 1079 continue;
1077 sattr->address = sechdrs[i].sh_addr; 1080 sattr->address = sechdrs[i].sh_addr;
1078 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, 1081 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
@@ -1156,7 +1159,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
1156 /* Count notes sections and allocate structures. */ 1159 /* Count notes sections and allocate structures. */
1157 notes = 0; 1160 notes = 0;
1158 for (i = 0; i < nsect; i++) 1161 for (i = 0; i < nsect; i++)
1159 if ((sechdrs[i].sh_flags & SHF_ALLOC) && 1162 if (!sect_empty(&sechdrs[i]) &&
1160 (sechdrs[i].sh_type == SHT_NOTE)) 1163 (sechdrs[i].sh_type == SHT_NOTE))
1161 ++notes; 1164 ++notes;
1162 1165
@@ -1172,7 +1175,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
1172 notes_attrs->notes = notes; 1175 notes_attrs->notes = notes;
1173 nattr = &notes_attrs->attrs[0]; 1176 nattr = &notes_attrs->attrs[0];
1174 for (loaded = i = 0; i < nsect; ++i) { 1177 for (loaded = i = 0; i < nsect; ++i) {
1175 if (!(sechdrs[i].sh_flags & SHF_ALLOC)) 1178 if (sect_empty(&sechdrs[i]))
1176 continue; 1179 continue;
1177 if (sechdrs[i].sh_type == SHT_NOTE) { 1180 if (sechdrs[i].sh_type == SHT_NOTE) {
1178 nattr->attr.name = mod->sect_attrs->attrs[loaded].name; 1181 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
diff --git a/mm/nommu.c b/mm/nommu.c
index 6f9248f89bde..17773862619b 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -432,6 +432,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
432 /* 432 /*
433 * Ok, looks good - let it rip. 433 * Ok, looks good - let it rip.
434 */ 434 */
435 flush_icache_range(mm->brk, brk);
435 return mm->brk = brk; 436 return mm->brk = brk;
436} 437}
437 438
@@ -1353,10 +1354,14 @@ unsigned long do_mmap_pgoff(struct file *file,
1353share: 1354share:
1354 add_vma_to_mm(current->mm, vma); 1355 add_vma_to_mm(current->mm, vma);
1355 1356
1356 up_write(&nommu_region_sem); 1357 /* we flush the region from the icache only when the first executable
1358 * mapping of it is made */
1359 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1360 flush_icache_range(region->vm_start, region->vm_end);
1361 region->vm_icache_flushed = true;
1362 }
1357 1363
1358 if (prot & PROT_EXEC) 1364 up_write(&nommu_region_sem);
1359 flush_icache_range(result, result + len);
1360 1365
1361 kleave(" = %lx", result); 1366 kleave(" = %lx", result);
1362 return result; 1367 return result;
@@ -1916,9 +1921,11 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
1916 1921
1917 /* only read or write mappings where it is permitted */ 1922 /* only read or write mappings where it is permitted */
1918 if (write && vma->vm_flags & VM_MAYWRITE) 1923 if (write && vma->vm_flags & VM_MAYWRITE)
1919 len -= copy_to_user((void *) addr, buf, len); 1924 copy_to_user_page(vma, NULL, addr,
1925 (void *) addr, buf, len);
1920 else if (!write && vma->vm_flags & VM_MAYREAD) 1926 else if (!write && vma->vm_flags & VM_MAYREAD)
1921 len -= copy_from_user(buf, (void *) addr, len); 1927 copy_from_user_page(vma, NULL, addr,
1928 buf, (void *) addr, len);
1922 else 1929 else
1923 len = 0; 1930 len = 0;
1924 } else { 1931 } else {
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3c3c50f38a1c..f7a7f8380e38 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -644,7 +644,22 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
644 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 644 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
645 if (IS_ERR(p)) { 645 if (IS_ERR(p)) {
646 err = PTR_ERR(p); 646 err = PTR_ERR(p);
647 gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES; 647 switch (err) {
648 case -EACCES:
649 gss_msg->msg.errno = err;
650 err = mlen;
651 break;
652 case -EFAULT:
653 case -ENOMEM:
654 case -EINVAL:
655 case -ENOSYS:
656 gss_msg->msg.errno = -EAGAIN;
657 break;
658 default:
659 printk(KERN_CRIT "%s: bad return from "
660 "gss_fill_context: %zd\n", __func__, err);
661 BUG();
662 }
648 goto err_release_msg; 663 goto err_release_msg;
649 } 664 }
650 gss_msg->ctx = gss_get_ctx(ctx); 665 gss_msg->ctx = gss_get_ctx(ctx);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index ef45eba22485..2deb0ed72ff4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -131,8 +131,10 @@ gss_import_sec_context_kerberos(const void *p,
131 struct krb5_ctx *ctx; 131 struct krb5_ctx *ctx;
132 int tmp; 132 int tmp;
133 133
134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) 134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
135 p = ERR_PTR(-ENOMEM);
135 goto out_err; 136 goto out_err;
137 }
136 138
137 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 139 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
138 if (IS_ERR(p)) 140 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 6efbb0cd3c7c..76e4c6f4ac3c 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
252 struct gss_ctx **ctx_id) 252 struct gss_ctx **ctx_id)
253{ 253{
254 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) 254 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
255 return GSS_S_FAILURE; 255 return -ENOMEM;
256 (*ctx_id)->mech_type = gss_mech_get(mech); 256 (*ctx_id)->mech_type = gss_mech_get(mech);
257 257
258 return mech->gm_ops 258 return mech->gm_ops
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 1c924ee0a1ef..7d1f9e928f69 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -699,7 +699,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
699 spin_unlock_bh(&pool->sp_lock); 699 spin_unlock_bh(&pool->sp_lock);
700 700
701 len = 0; 701 len = 0;
702 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 702 if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
703 !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
703 struct svc_xprt *newxpt; 704 struct svc_xprt *newxpt;
704 newxpt = xprt->xpt_ops->xpo_accept(xprt); 705 newxpt = xprt->xpt_ops->xpo_accept(xprt);
705 if (newxpt) { 706 if (newxpt) {
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index 69bd0acc81c8..a1bbe16b7f96 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -102,6 +102,12 @@ static int ac97_soc_probe(struct platform_device *pdev)
102 INIT_LIST_HEAD(&codec->dapm_widgets); 102 INIT_LIST_HEAD(&codec->dapm_widgets);
103 INIT_LIST_HEAD(&codec->dapm_paths); 103 INIT_LIST_HEAD(&codec->dapm_paths);
104 104
105 ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
106 if (ret < 0) {
107 printk(KERN_ERR "ASoC: failed to init gen ac97 glue\n");
108 goto err;
109 }
110
105 /* register pcms */ 111 /* register pcms */
106 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); 112 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
107 if (ret < 0) 113 if (ret < 0)
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index 265e68c75df8..af8cb6995a1f 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -424,23 +424,23 @@ static int wm8510_pcm_hw_params(struct snd_pcm_substream *substream,
424 424
425 /* filter coefficient */ 425 /* filter coefficient */
426 switch (params_rate(params)) { 426 switch (params_rate(params)) {
427 case SNDRV_PCM_RATE_8000: 427 case 8000:
428 adn |= 0x5 << 1; 428 adn |= 0x5 << 1;
429 break; 429 break;
430 case SNDRV_PCM_RATE_11025: 430 case 11025:
431 adn |= 0x4 << 1; 431 adn |= 0x4 << 1;
432 break; 432 break;
433 case SNDRV_PCM_RATE_16000: 433 case 16000:
434 adn |= 0x3 << 1; 434 adn |= 0x3 << 1;
435 break; 435 break;
436 case SNDRV_PCM_RATE_22050: 436 case 22050:
437 adn |= 0x2 << 1; 437 adn |= 0x2 << 1;
438 break; 438 break;
439 case SNDRV_PCM_RATE_32000: 439 case 32000:
440 adn |= 0x1 << 1; 440 adn |= 0x1 << 1;
441 break; 441 break;
442 case SNDRV_PCM_RATE_44100: 442 case 44100:
443 case SNDRV_PCM_RATE_48000: 443 case 48000:
444 break; 444 break;
445 } 445 }
446 446
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 3d850b97037a..31e39ffd1d8e 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -378,23 +378,23 @@ static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream,
378 iface |= (1 << 9); 378 iface |= (1 << 9);
379 379
380 switch (params_rate(params)) { 380 switch (params_rate(params)) {
381 case SNDRV_PCM_RATE_8000: 381 case 8000:
382 addcntrl |= (0x5 << 1); 382 addcntrl |= (0x5 << 1);
383 break; 383 break;
384 case SNDRV_PCM_RATE_11025: 384 case 11025:
385 addcntrl |= (0x4 << 1); 385 addcntrl |= (0x4 << 1);
386 break; 386 break;
387 case SNDRV_PCM_RATE_16000: 387 case 16000:
388 addcntrl |= (0x3 << 1); 388 addcntrl |= (0x3 << 1);
389 break; 389 break;
390 case SNDRV_PCM_RATE_22050: 390 case 22050:
391 addcntrl |= (0x2 << 1); 391 addcntrl |= (0x2 << 1);
392 break; 392 break;
393 case SNDRV_PCM_RATE_32000: 393 case 32000:
394 addcntrl |= (0x1 << 1); 394 addcntrl |= (0x1 << 1);
395 break; 395 break;
396 case SNDRV_PCM_RATE_44100: 396 case 44100:
397 case SNDRV_PCM_RATE_48000: 397 case 48000:
398 break; 398 break;
399 } 399 }
400 ret = snd_soc_write(codec, WM8940_ADDCNTRL, addcntrl); 400 ret = snd_soc_write(codec, WM8940_ADDCNTRL, addcntrl);
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index a808675388fc..8812751da8c9 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -482,23 +482,23 @@ static int wm8974_pcm_hw_params(struct snd_pcm_substream *substream,
482 482
483 /* filter coefficient */ 483 /* filter coefficient */
484 switch (params_rate(params)) { 484 switch (params_rate(params)) {
485 case SNDRV_PCM_RATE_8000: 485 case 8000:
486 adn |= 0x5 << 1; 486 adn |= 0x5 << 1;
487 break; 487 break;
488 case SNDRV_PCM_RATE_11025: 488 case 11025:
489 adn |= 0x4 << 1; 489 adn |= 0x4 << 1;
490 break; 490 break;
491 case SNDRV_PCM_RATE_16000: 491 case 16000:
492 adn |= 0x3 << 1; 492 adn |= 0x3 << 1;
493 break; 493 break;
494 case SNDRV_PCM_RATE_22050: 494 case 22050:
495 adn |= 0x2 << 1; 495 adn |= 0x2 << 1;
496 break; 496 break;
497 case SNDRV_PCM_RATE_32000: 497 case 32000:
498 adn |= 0x1 << 1; 498 adn |= 0x1 << 1;
499 break; 499 break;
500 case SNDRV_PCM_RATE_44100: 500 case 44100:
501 case SNDRV_PCM_RATE_48000: 501 case 48000:
502 break; 502 break;
503 } 503 }
504 504
diff --git a/sound/soc/sh/fsi-ak4642.c b/sound/soc/sh/fsi-ak4642.c
index c7af09729c6e..5263ab18f827 100644
--- a/sound/soc/sh/fsi-ak4642.c
+++ b/sound/soc/sh/fsi-ak4642.c
@@ -42,42 +42,12 @@ static struct snd_soc_device fsi_snd_devdata = {
42 .codec_dev = &soc_codec_dev_ak4642, 42 .codec_dev = &soc_codec_dev_ak4642,
43}; 43};
44 44
45#define AK4642_BUS 0
46#define AK4642_ADR 0x12
47static int ak4642_add_i2c_device(void)
48{
49 struct i2c_board_info info;
50 struct i2c_adapter *adapter;
51 struct i2c_client *client;
52
53 memset(&info, 0, sizeof(struct i2c_board_info));
54 info.addr = AK4642_ADR;
55 strlcpy(info.type, "ak4642", I2C_NAME_SIZE);
56
57 adapter = i2c_get_adapter(AK4642_BUS);
58 if (!adapter) {
59 printk(KERN_DEBUG "can't get i2c adapter\n");
60 return -ENODEV;
61 }
62
63 client = i2c_new_device(adapter, &info);
64 i2c_put_adapter(adapter);
65 if (!client) {
66 printk(KERN_DEBUG "can't add i2c device\n");
67 return -ENODEV;
68 }
69
70 return 0;
71}
72
73static struct platform_device *fsi_snd_device; 45static struct platform_device *fsi_snd_device;
74 46
75static int __init fsi_ak4642_init(void) 47static int __init fsi_ak4642_init(void)
76{ 48{
77 int ret = -ENOMEM; 49 int ret = -ENOMEM;
78 50
79 ak4642_add_i2c_device();
80
81 fsi_snd_device = platform_device_alloc("soc-audio", -1); 51 fsi_snd_device = platform_device_alloc("soc-audio", -1);
82 if (!fsi_snd_device) 52 if (!fsi_snd_device)
83 goto out; 53 goto out;