aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/arm/mach-omap1/clock.c6
-rw-r--r--arch/arm/mach-omap2/clock34xx_data.c4
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c62
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c2
-rw-r--r--arch/arm/mach-omap2/gpmc.c2
-rw-r--r--arch/arm/mach-omap2/id.c41
-rw-r--r--arch/arm/mach-omap2/irq.c18
-rw-r--r--arch/arm/mach-omap2/mux.c27
-rw-r--r--arch/arm/mach-omap2/mux.h24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c3
-rw-r--r--arch/arm/mach-omap2/pm-debug.c12
-rw-r--r--arch/arm/mach-omap2/pm.h8
-rw-r--r--arch/arm/mach-omap2/pm34xx.c47
-rw-r--r--arch/arm/mach-omap2/prcm.c11
-rw-r--r--arch/arm/mach-omap2/prm.h2
-rw-r--r--arch/arm/mach-omap2/prm44xx.h32
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S13
-rw-r--r--arch/arm/plat-omap/common.c26
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/plat-omap/dmtimer.c13
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h1
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h3
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h1
-rw-r--r--arch/sparc/Kconfig5
-rw-r--r--arch/sparc/configs/sparc32_defconfig87
-rw-r--r--arch/sparc/configs/sparc64_defconfig131
-rw-r--r--arch/sparc/include/asm/io_32.h2
-rw-r--r--arch/sparc/include/asm/page_32.h2
-rw-r--r--arch/sparc/include/asm/param.h19
-rw-r--r--arch/sparc/include/asm/timex_32.h1
-rw-r--r--arch/sparc/include/asm/topology_64.h4
-rw-r--r--arch/sparc/include/asm/uaccess_32.h2
-rw-r--r--arch/sparc/include/asm/uaccess_64.h2
-rw-r--r--arch/sparc/kernel/central.c4
-rw-r--r--arch/sparc/kernel/irq_64.c37
-rw-r--r--arch/sparc/kernel/pcic.c103
-rw-r--r--arch/sparc/kernel/perf_event.c627
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c6
-rw-r--r--arch/sparc/kernel/time_32.c116
-rw-r--r--arch/sparc/mm/fault_32.c12
-rw-r--r--arch/sparc/mm/fault_64.c13
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c187
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c13
-rw-r--r--drivers/gpu/drm/radeon/atom.c102
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c259
-rw-r--r--drivers/gpu/drm/radeon/r100.c5
-rw-r--r--drivers/gpu/drm/radeon/r200.c7
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c82
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/r600d.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c77
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r2002
-rw-r--r--drivers/gpu/drm/radeon/rv770.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c69
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c64
-rw-r--r--drivers/net/benet/be_cmds.c2
-rw-r--r--drivers/net/benet/be_main.c8
-rw-r--r--drivers/net/bfin_mac.c5
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_main.c43
-rw-r--r--drivers/net/e1000e/e1000.h1
-rw-r--r--drivers/net/e1000e/netdev.c57
-rw-r--r--drivers/net/igb/igb_main.c4
-rw-r--r--drivers/net/igbvf/netdev.c14
-rw-r--r--drivers/net/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/phy/phy_device.c1
-rw-r--r--drivers/net/qlge/qlge_main.c15
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sfc/mcdi.c7
-rw-r--r--drivers/net/sfc/mcdi.h1
-rw-r--r--drivers/net/sfc/mcdi_pcol.h4
-rw-r--r--drivers/net/sfc/mtd.c5
-rw-r--r--drivers/net/sfc/qt202x_phy.c6
-rw-r--r--drivers/net/sky2.c42
-rw-r--r--drivers/net/tulip/tulip_core.c1
-rw-r--r--drivers/net/ucc_geth.c5
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h2
-rw-r--r--drivers/net/wimax/i2400m/usb.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h26
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/p54/p54pci.c8
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/serial/serial_cs.c1
-rw-r--r--include/drm/ttm/ttm_bo_driver.h5
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/net/netns/xfrm.h6
-rw-r--r--include/net/netrom.h2
-rw-r--r--include/net/xfrm.h4
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/ax25/ax25_out.c6
-rw-r--r--net/dccp/ccid.c18
-rw-r--r--net/dccp/ccid.h2
-rw-r--r--net/dccp/probe.c3
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_probe.c19
-rw-r--r--net/ipv4/xfrm4_policy.c14
-rw-r--r--net/ipv6/xfrm6_policy.c25
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/rc80211_pid_algo.c2
-rw-r--r--net/netrom/nr_route.c11
-rw-r--r--net/rose/rose_link.c8
-rw-r--r--net/rose/rose_route.c5
-rw-r--r--net/wireless/sme.c1
-rw-r--r--net/xfrm/xfrm_policy.c75
-rw-r--r--net/xfrm/xfrm_state.c6
-rw-r--r--net/xfrm/xfrm_user.c14
152 files changed, 2325 insertions, 1268 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 1858646b52e3..03f38c18f323 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -987,7 +987,6 @@ F: drivers/platform/x86/asus-laptop.c
987 987
988ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API 988ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
989M: Dan Williams <dan.j.williams@intel.com> 989M: Dan Williams <dan.j.williams@intel.com>
990M: Maciej Sosnowski <maciej.sosnowski@intel.com>
991W: http://sourceforge.net/projects/xscaleiop 990W: http://sourceforge.net/projects/xscaleiop
992S: Supported 991S: Supported
993F: Documentation/crypto/async-tx-api.txt 992F: Documentation/crypto/async-tx-api.txt
@@ -1823,7 +1822,6 @@ S: Supported
1823F: fs/dlm/ 1822F: fs/dlm/
1824 1823
1825DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 1824DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
1826M: Maciej Sosnowski <maciej.sosnowski@intel.com>
1827M: Dan Williams <dan.j.williams@intel.com> 1825M: Dan Williams <dan.j.williams@intel.com>
1828S: Supported 1826S: Supported
1829F: drivers/dma/ 1827F: drivers/dma/
@@ -2786,7 +2784,7 @@ F: arch/x86/kernel/microcode_core.c
2786F: arch/x86/kernel/microcode_intel.c 2784F: arch/x86/kernel/microcode_intel.c
2787 2785
2788INTEL I/OAT DMA DRIVER 2786INTEL I/OAT DMA DRIVER
2789M: Maciej Sosnowski <maciej.sosnowski@intel.com> 2787M: Dan Williams <dan.j.williams@intel.com>
2790S: Supported 2788S: Supported
2791F: drivers/dma/ioat* 2789F: drivers/dma/ioat*
2792 2790
@@ -2824,10 +2822,11 @@ L: netdev@vger.kernel.org
2824S: Maintained 2822S: Maintained
2825F: drivers/net/ixp2000/ 2823F: drivers/net/ixp2000/
2826 2824
2827INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) 2825INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe)
2828M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> 2826M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2829M: Jesse Brandeburg <jesse.brandeburg@intel.com> 2827M: Jesse Brandeburg <jesse.brandeburg@intel.com>
2830M: Bruce Allan <bruce.w.allan@intel.com> 2828M: Bruce Allan <bruce.w.allan@intel.com>
2829M: Alex Duyck <alexander.h.duyck@intel.com>
2831M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> 2830M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
2832M: John Ronciak <john.ronciak@intel.com> 2831M: John Ronciak <john.ronciak@intel.com>
2833L: e1000-devel@lists.sourceforge.net 2832L: e1000-devel@lists.sourceforge.net
@@ -2837,6 +2836,7 @@ F: drivers/net/e100.c
2837F: drivers/net/e1000/ 2836F: drivers/net/e1000/
2838F: drivers/net/e1000e/ 2837F: drivers/net/e1000e/
2839F: drivers/net/igb/ 2838F: drivers/net/igb/
2839F: drivers/net/igbvf/
2840F: drivers/net/ixgb/ 2840F: drivers/net/ixgb/
2841F: drivers/net/ixgbe/ 2841F: drivers/net/ixgbe/
2842 2842
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 2ba9ab953731..04f1d29cba2c 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -214,8 +214,8 @@ int omap1_select_table_rate(struct clk *clk, unsigned long rate)
214 struct mpu_rate * ptr; 214 struct mpu_rate * ptr;
215 unsigned long dpll1_rate, ref_rate; 215 unsigned long dpll1_rate, ref_rate;
216 216
217 dpll1_rate = clk_get_rate(ck_dpll1_p); 217 dpll1_rate = ck_dpll1_p->rate;
218 ref_rate = clk_get_rate(ck_ref_p); 218 ref_rate = ck_ref_p->rate;
219 219
220 for (ptr = omap1_rate_table; ptr->rate; ptr++) { 220 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
221 if (ptr->xtal != ref_rate) 221 if (ptr->xtal != ref_rate)
@@ -306,7 +306,7 @@ long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
306 long highest_rate; 306 long highest_rate;
307 unsigned long ref_rate; 307 unsigned long ref_rate;
308 308
309 ref_rate = clk_get_rate(ck_ref_p); 309 ref_rate = ck_ref_p->rate;
310 310
311 highest_rate = -EINVAL; 311 highest_rate = -EINVAL;
312 312
diff --git a/arch/arm/mach-omap2/clock34xx_data.c b/arch/arm/mach-omap2/clock34xx_data.c
index c6031d74d6f6..74930e3158e3 100644
--- a/arch/arm/mach-omap2/clock34xx_data.c
+++ b/arch/arm/mach-omap2/clock34xx_data.c
@@ -671,7 +671,6 @@ static struct clk dpll4_m3x2_ck = {
671 .name = "dpll4_m3x2_ck", 671 .name = "dpll4_m3x2_ck",
672 .ops = &clkops_omap2_dflt_wait, 672 .ops = &clkops_omap2_dflt_wait,
673 .parent = &dpll4_m3_ck, 673 .parent = &dpll4_m3_ck,
674 .init = &omap2_init_clksel_parent,
675 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), 674 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
676 .enable_bit = OMAP3430_PWRDN_TV_SHIFT, 675 .enable_bit = OMAP3430_PWRDN_TV_SHIFT,
677 .flags = INVERT_ENABLE, 676 .flags = INVERT_ENABLE,
@@ -811,7 +810,6 @@ static struct clk dpll4_m6x2_ck = {
811 .name = "dpll4_m6x2_ck", 810 .name = "dpll4_m6x2_ck",
812 .ops = &clkops_omap2_dflt_wait, 811 .ops = &clkops_omap2_dflt_wait,
813 .parent = &dpll4_m6_ck, 812 .parent = &dpll4_m6_ck,
814 .init = &omap2_init_clksel_parent,
815 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), 813 .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
816 .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT, 814 .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
817 .flags = INVERT_ENABLE, 815 .flags = INVERT_ENABLE,
@@ -1047,7 +1045,6 @@ static struct clk iva2_ck = {
1047 .name = "iva2_ck", 1045 .name = "iva2_ck",
1048 .ops = &clkops_omap2_dflt_wait, 1046 .ops = &clkops_omap2_dflt_wait,
1049 .parent = &dpll2_m2_ck, 1047 .parent = &dpll2_m2_ck,
1050 .init = &omap2_init_clksel_parent,
1051 .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN), 1048 .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
1052 .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT, 1049 .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
1053 .clkdm_name = "iva2_clkdm", 1050 .clkdm_name = "iva2_clkdm",
@@ -1121,7 +1118,6 @@ static struct clk gfx_l3_ck = {
1121 .name = "gfx_l3_ck", 1118 .name = "gfx_l3_ck",
1122 .ops = &clkops_omap2_dflt_wait, 1119 .ops = &clkops_omap2_dflt_wait,
1123 .parent = &l3_ick, 1120 .parent = &l3_ick,
1124 .init = &omap2_init_clksel_parent,
1125 .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN), 1121 .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
1126 .enable_bit = OMAP_EN_GFX_SHIFT, 1122 .enable_bit = OMAP_EN_GFX_SHIFT,
1127 .recalc = &followparent_recalc, 1123 .recalc = &followparent_recalc,
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 2210e227d78a..9d882bcb56e3 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -346,37 +346,37 @@ static struct clk aess_fclk = {
346}; 346};
347 347
348static const struct clksel_rate div31_1to31_rates[] = { 348static const struct clksel_rate div31_1to31_rates[] = {
349 { .div = 1, .val = 0, .flags = RATE_IN_4430 }, 349 { .div = 1, .val = 1, .flags = RATE_IN_4430 },
350 { .div = 2, .val = 1, .flags = RATE_IN_4430 }, 350 { .div = 2, .val = 2, .flags = RATE_IN_4430 },
351 { .div = 3, .val = 2, .flags = RATE_IN_4430 }, 351 { .div = 3, .val = 3, .flags = RATE_IN_4430 },
352 { .div = 4, .val = 3, .flags = RATE_IN_4430 }, 352 { .div = 4, .val = 4, .flags = RATE_IN_4430 },
353 { .div = 5, .val = 4, .flags = RATE_IN_4430 }, 353 { .div = 5, .val = 5, .flags = RATE_IN_4430 },
354 { .div = 6, .val = 5, .flags = RATE_IN_4430 }, 354 { .div = 6, .val = 6, .flags = RATE_IN_4430 },
355 { .div = 7, .val = 6, .flags = RATE_IN_4430 }, 355 { .div = 7, .val = 7, .flags = RATE_IN_4430 },
356 { .div = 8, .val = 7, .flags = RATE_IN_4430 }, 356 { .div = 8, .val = 8, .flags = RATE_IN_4430 },
357 { .div = 9, .val = 8, .flags = RATE_IN_4430 }, 357 { .div = 9, .val = 9, .flags = RATE_IN_4430 },
358 { .div = 10, .val = 9, .flags = RATE_IN_4430 }, 358 { .div = 10, .val = 10, .flags = RATE_IN_4430 },
359 { .div = 11, .val = 10, .flags = RATE_IN_4430 }, 359 { .div = 11, .val = 11, .flags = RATE_IN_4430 },
360 { .div = 12, .val = 11, .flags = RATE_IN_4430 }, 360 { .div = 12, .val = 12, .flags = RATE_IN_4430 },
361 { .div = 13, .val = 12, .flags = RATE_IN_4430 }, 361 { .div = 13, .val = 13, .flags = RATE_IN_4430 },
362 { .div = 14, .val = 13, .flags = RATE_IN_4430 }, 362 { .div = 14, .val = 14, .flags = RATE_IN_4430 },
363 { .div = 15, .val = 14, .flags = RATE_IN_4430 }, 363 { .div = 15, .val = 15, .flags = RATE_IN_4430 },
364 { .div = 16, .val = 15, .flags = RATE_IN_4430 }, 364 { .div = 16, .val = 16, .flags = RATE_IN_4430 },
365 { .div = 17, .val = 16, .flags = RATE_IN_4430 }, 365 { .div = 17, .val = 17, .flags = RATE_IN_4430 },
366 { .div = 18, .val = 17, .flags = RATE_IN_4430 }, 366 { .div = 18, .val = 18, .flags = RATE_IN_4430 },
367 { .div = 19, .val = 18, .flags = RATE_IN_4430 }, 367 { .div = 19, .val = 19, .flags = RATE_IN_4430 },
368 { .div = 20, .val = 19, .flags = RATE_IN_4430 }, 368 { .div = 20, .val = 20, .flags = RATE_IN_4430 },
369 { .div = 21, .val = 20, .flags = RATE_IN_4430 }, 369 { .div = 21, .val = 21, .flags = RATE_IN_4430 },
370 { .div = 22, .val = 21, .flags = RATE_IN_4430 }, 370 { .div = 22, .val = 22, .flags = RATE_IN_4430 },
371 { .div = 23, .val = 22, .flags = RATE_IN_4430 }, 371 { .div = 23, .val = 23, .flags = RATE_IN_4430 },
372 { .div = 24, .val = 23, .flags = RATE_IN_4430 }, 372 { .div = 24, .val = 24, .flags = RATE_IN_4430 },
373 { .div = 25, .val = 24, .flags = RATE_IN_4430 }, 373 { .div = 25, .val = 25, .flags = RATE_IN_4430 },
374 { .div = 26, .val = 25, .flags = RATE_IN_4430 }, 374 { .div = 26, .val = 26, .flags = RATE_IN_4430 },
375 { .div = 27, .val = 26, .flags = RATE_IN_4430 }, 375 { .div = 27, .val = 27, .flags = RATE_IN_4430 },
376 { .div = 28, .val = 27, .flags = RATE_IN_4430 }, 376 { .div = 28, .val = 28, .flags = RATE_IN_4430 },
377 { .div = 29, .val = 28, .flags = RATE_IN_4430 }, 377 { .div = 29, .val = 29, .flags = RATE_IN_4430 },
378 { .div = 30, .val = 29, .flags = RATE_IN_4430 }, 378 { .div = 30, .val = 30, .flags = RATE_IN_4430 },
379 { .div = 31, .val = 30, .flags = RATE_IN_4430 }, 379 { .div = 31, .val = 31, .flags = RATE_IN_4430 },
380 { .div = 0 }, 380 { .div = 0 },
381}; 381};
382 382
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index a26d6a08ae3f..12f0cbfc2894 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -137,7 +137,7 @@ return_sleep_time:
137 local_irq_enable(); 137 local_irq_enable();
138 local_fiq_enable(); 138 local_fiq_enable();
139 139
140 return (u32)timespec_to_ns(&ts_idle)/1000; 140 return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
141} 141}
142 142
143/** 143/**
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index bd8cb5974726..3f1334f62e7a 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -534,6 +534,8 @@ void __init gpmc_init(void)
534 BUG(); 534 BUG();
535 } 535 }
536 536
537 clk_enable(gpmc_l3_clk);
538
537 l = gpmc_read_reg(GPMC_REVISION); 539 l = gpmc_read_reg(GPMC_REVISION);
538 printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); 540 printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
539 /* Set smart idle mode and automatic L3 clock gating */ 541 /* Set smart idle mode and automatic L3 clock gating */
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index a091b53657b9..3d65c50bd017 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -188,6 +188,8 @@ void __init omap3_check_revision(void)
188 u16 hawkeye; 188 u16 hawkeye;
189 u8 rev; 189 u8 rev;
190 190
191 omap_chip.oc = CHIP_IS_OMAP3430;
192
191 /* 193 /*
192 * We cannot access revision registers on ES1.0. 194 * We cannot access revision registers on ES1.0.
193 * If the processor type is Cortex-A8 and the revision is 0x0 195 * If the processor type is Cortex-A8 and the revision is 0x0
@@ -196,6 +198,7 @@ void __init omap3_check_revision(void)
196 cpuid = read_cpuid(CPUID_ID); 198 cpuid = read_cpuid(CPUID_ID);
197 if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { 199 if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) {
198 omap_revision = OMAP3430_REV_ES1_0; 200 omap_revision = OMAP3430_REV_ES1_0;
201 omap_chip.oc |= CHIP_IS_OMAP3430ES1;
199 return; 202 return;
200 } 203 }
201 204
@@ -216,18 +219,28 @@ void __init omap3_check_revision(void)
216 case 0: /* Take care of early samples */ 219 case 0: /* Take care of early samples */
217 case 1: 220 case 1:
218 omap_revision = OMAP3430_REV_ES2_0; 221 omap_revision = OMAP3430_REV_ES2_0;
222 omap_chip.oc |= CHIP_IS_OMAP3430ES2;
219 break; 223 break;
220 case 2: 224 case 2:
221 omap_revision = OMAP3430_REV_ES2_1; 225 omap_revision = OMAP3430_REV_ES2_1;
226 omap_chip.oc |= CHIP_IS_OMAP3430ES2;
222 break; 227 break;
223 case 3: 228 case 3:
224 omap_revision = OMAP3430_REV_ES3_0; 229 omap_revision = OMAP3430_REV_ES3_0;
230 omap_chip.oc |= CHIP_IS_OMAP3430ES3_0;
225 break; 231 break;
226 case 4: 232 case 4:
233 omap_revision = OMAP3430_REV_ES3_1;
234 omap_chip.oc |= CHIP_IS_OMAP3430ES3_1;
235 break;
236 case 7:
227 /* FALLTHROUGH */ 237 /* FALLTHROUGH */
228 default: 238 default:
229 /* Use the latest known revision as default */ 239 /* Use the latest known revision as default */
230 omap_revision = OMAP3430_REV_ES3_1; 240 omap_revision = OMAP3430_REV_ES3_1_2;
241
242 /* REVISIT: Add CHIP_IS_OMAP3430ES3_1_2? */
243 omap_chip.oc |= CHIP_IS_OMAP3430ES3_1;
231 } 244 }
232 break; 245 break;
233 case 0xb868: 246 case 0xb868:
@@ -235,14 +248,18 @@ void __init omap3_check_revision(void)
235 * 248 *
236 * Set the device to be OMAP3505 here. Actual device 249 * Set the device to be OMAP3505 here. Actual device
237 * is identified later based on the features. 250 * is identified later based on the features.
251 *
252 * REVISIT: AM3505/AM3517 should have their own CHIP_IS
238 */ 253 */
239 omap_revision = OMAP3505_REV(rev); 254 omap_revision = OMAP3505_REV(rev);
255 omap_chip.oc |= CHIP_IS_OMAP3430ES3_1;
240 break; 256 break;
241 case 0xb891: 257 case 0xb891:
242 /* FALLTHROUGH */ 258 /* FALLTHROUGH */
243 default: 259 default:
244 /* Unknown default to latest silicon rev as default*/ 260 /* Unknown default to latest silicon rev as default*/
245 omap_revision = OMAP3630_REV_ES1_0; 261 omap_revision = OMAP3630_REV_ES1_0;
262 omap_chip.oc |= CHIP_IS_OMAP3630ES1;
246 } 263 }
247} 264}
248 265
@@ -360,6 +377,7 @@ void __init omap2_check_revision(void)
360 omap3_check_revision(); 377 omap3_check_revision();
361 omap3_check_features(); 378 omap3_check_features();
362 omap3_cpuinfo(); 379 omap3_cpuinfo();
380 return;
363 } else if (cpu_is_omap44xx()) { 381 } else if (cpu_is_omap44xx()) {
364 omap4_check_revision(); 382 omap4_check_revision();
365 return; 383 return;
@@ -374,27 +392,14 @@ void __init omap2_check_revision(void)
374 if (cpu_is_omap243x()) { 392 if (cpu_is_omap243x()) {
375 /* Currently only supports 2430ES2.1 and 2430-all */ 393 /* Currently only supports 2430ES2.1 and 2430-all */
376 omap_chip.oc |= CHIP_IS_OMAP2430; 394 omap_chip.oc |= CHIP_IS_OMAP2430;
395 return;
377 } else if (cpu_is_omap242x()) { 396 } else if (cpu_is_omap242x()) {
378 /* Currently only supports 2420ES2.1.1 and 2420-all */ 397 /* Currently only supports 2420ES2.1.1 and 2420-all */
379 omap_chip.oc |= CHIP_IS_OMAP2420; 398 omap_chip.oc |= CHIP_IS_OMAP2420;
380 } else if (cpu_is_omap3505() || cpu_is_omap3517()) { 399 return;
381 omap_chip.oc = CHIP_IS_OMAP3430 | CHIP_IS_OMAP3430ES3_1;
382 } else if (cpu_is_omap343x()) {
383 omap_chip.oc = CHIP_IS_OMAP3430;
384 if (omap_rev() == OMAP3430_REV_ES1_0)
385 omap_chip.oc |= CHIP_IS_OMAP3430ES1;
386 else if (omap_rev() >= OMAP3430_REV_ES2_0 &&
387 omap_rev() <= OMAP3430_REV_ES2_1)
388 omap_chip.oc |= CHIP_IS_OMAP3430ES2;
389 else if (omap_rev() == OMAP3430_REV_ES3_0)
390 omap_chip.oc |= CHIP_IS_OMAP3430ES3_0;
391 else if (omap_rev() == OMAP3430_REV_ES3_1)
392 omap_chip.oc |= CHIP_IS_OMAP3430ES3_1;
393 else if (omap_rev() == OMAP3630_REV_ES1_0)
394 omap_chip.oc |= CHIP_IS_OMAP3630ES1;
395 } else {
396 pr_err("Uninitialized omap_chip, please fix!\n");
397 } 400 }
401
402 pr_err("Uninitialized omap_chip, please fix!\n");
398} 403}
399 404
400/* 405/*
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index e9bc782fa414..27054025da2b 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -274,4 +274,22 @@ void omap_intc_restore_context(void)
274 } 274 }
275 /* MIRs are saved and restore with other PRCM registers */ 275 /* MIRs are saved and restore with other PRCM registers */
276} 276}
277
278void omap3_intc_suspend(void)
279{
280 /* A pending interrupt would prevent OMAP from entering suspend */
281 omap_ack_irq(0);
282}
283
284void omap3_intc_prepare_idle(void)
285{
286 /* Disable autoidle as it can stall interrupt controller */
287 intc_bank_write_reg(0, &irq_banks[0], INTC_SYSCONFIG);
288}
289
290void omap3_intc_resume_idle(void)
291{
292 /* Re-enable autoidle */
293 intc_bank_write_reg(1, &irq_banks[0], INTC_SYSCONFIG);
294}
277#endif /* CONFIG_ARCH_OMAP3 */ 295#endif /* CONFIG_ARCH_OMAP3 */
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 459ef23ab8a8..3f59bd12cbbf 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -51,7 +51,7 @@ struct omap_mux_entry {
51static unsigned long mux_phys; 51static unsigned long mux_phys;
52static void __iomem *mux_base; 52static void __iomem *mux_base;
53 53
54static inline u16 omap_mux_read(u16 reg) 54u16 omap_mux_read(u16 reg)
55{ 55{
56 if (cpu_is_omap24xx()) 56 if (cpu_is_omap24xx())
57 return __raw_readb(mux_base + reg); 57 return __raw_readb(mux_base + reg);
@@ -59,7 +59,7 @@ static inline u16 omap_mux_read(u16 reg)
59 return __raw_readw(mux_base + reg); 59 return __raw_readw(mux_base + reg);
60} 60}
61 61
62static inline void omap_mux_write(u16 val, u16 reg) 62void omap_mux_write(u16 val, u16 reg)
63{ 63{
64 if (cpu_is_omap24xx()) 64 if (cpu_is_omap24xx())
65 __raw_writeb(val, mux_base + reg); 65 __raw_writeb(val, mux_base + reg);
@@ -67,6 +67,14 @@ static inline void omap_mux_write(u16 val, u16 reg)
67 __raw_writew(val, mux_base + reg); 67 __raw_writew(val, mux_base + reg);
68} 68}
69 69
70void omap_mux_write_array(struct omap_board_mux *board_mux)
71{
72 while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
73 omap_mux_write(board_mux->value, board_mux->reg_offset);
74 board_mux++;
75 }
76}
77
70#if defined(CONFIG_ARCH_OMAP24XX) && defined(CONFIG_OMAP_MUX) 78#if defined(CONFIG_ARCH_OMAP24XX) && defined(CONFIG_OMAP_MUX)
71 79
72static struct omap_mux_cfg arch_mux_cfg; 80static struct omap_mux_cfg arch_mux_cfg;
@@ -833,14 +841,6 @@ static void __init omap_mux_set_cmdline_signals(void)
833 kfree(options); 841 kfree(options);
834} 842}
835 843
836static void __init omap_mux_set_board_signals(struct omap_board_mux *board_mux)
837{
838 while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
839 omap_mux_write(board_mux->value, board_mux->reg_offset);
840 board_mux++;
841 }
842}
843
844static int __init omap_mux_copy_names(struct omap_mux *src, 844static int __init omap_mux_copy_names(struct omap_mux *src,
845 struct omap_mux *dst) 845 struct omap_mux *dst)
846{ 846{
@@ -998,12 +998,15 @@ int __init omap_mux_init(u32 mux_pbase, u32 mux_size,
998 omap_mux_package_fixup(package_subset, superset); 998 omap_mux_package_fixup(package_subset, superset);
999 if (package_balls) 999 if (package_balls)
1000 omap_mux_package_init_balls(package_balls, superset); 1000 omap_mux_package_init_balls(package_balls, superset);
1001 omap_mux_set_cmdline_signals();
1002 omap_mux_set_board_signals(board_mux);
1003#endif 1001#endif
1004 1002
1005 omap_mux_init_list(superset); 1003 omap_mux_init_list(superset);
1006 1004
1005#ifdef CONFIG_OMAP_MUX
1006 omap_mux_set_cmdline_signals();
1007 omap_mux_write_array(board_mux);
1008#endif
1009
1007 return 0; 1010 return 0;
1008} 1011}
1009 1012
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index d8b4d5ad2278..f8c2e7a8f063 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -147,6 +147,30 @@ u16 omap_mux_get_gpio(int gpio);
147void omap_mux_set_gpio(u16 val, int gpio); 147void omap_mux_set_gpio(u16 val, int gpio);
148 148
149/** 149/**
150 * omap_mux_read() - read mux register
151 * @mux_offset: Offset of the mux register
152 *
153 */
154u16 omap_mux_read(u16 mux_offset);
155
156/**
157 * omap_mux_write() - write mux register
158 * @val: New mux register value
159 * @mux_offset: Offset of the mux register
160 *
161 * This should be only needed for dynamic remuxing of non-gpio signals.
162 */
163void omap_mux_write(u16 val, u16 mux_offset);
164
165/**
166 * omap_mux_write_array() - write an array of mux registers
167 * @board_mux: Array of mux registers terminated by MAP_MUX_TERMINATOR
168 *
169 * This should be only needed for dynamic remuxing of non-gpio signals.
170 */
171void omap_mux_write_array(struct omap_board_mux *board_mux);
172
173/**
150 * omap3_mux_init() - initialize mux system with board specific set 174 * omap3_mux_init() - initialize mux system with board specific set
151 * @board_mux: Board specific mux table 175 * @board_mux: Board specific mux table
152 * @flags: OMAP package type used for the board 176 * @flags: OMAP package type used for the board
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d8c8545875b1..478ae585ca39 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -94,7 +94,8 @@ static int _update_sysc_cache(struct omap_hwmod *oh)
94 94
95 oh->_sysc_cache = omap_hwmod_readl(oh, oh->sysconfig->sysc_offs); 95 oh->_sysc_cache = omap_hwmod_readl(oh, oh->sysconfig->sysc_offs);
96 96
97 oh->_int_flags |= _HWMOD_SYSCONFIG_LOADED; 97 if (!(oh->sysconfig->sysc_flags & SYSC_NO_CACHE))
98 oh->_int_flags |= _HWMOD_SYSCONFIG_LOADED;
98 99
99 return 0; 100 return 0;
100} 101}
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 860b755d2220..a0866268aa41 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -54,8 +54,6 @@ int omap2_pm_debug;
54 regs[reg_count++].val = \ 54 regs[reg_count++].val = \
55 __raw_readl(OMAP2_L4_IO_ADDRESS(0x480fe000 + (off))) 55 __raw_readl(OMAP2_L4_IO_ADDRESS(0x480fe000 + (off)))
56 56
57static int __init pm_dbg_init(void);
58
59void omap2_pm_dump(int mode, int resume, unsigned int us) 57void omap2_pm_dump(int mode, int resume, unsigned int us)
60{ 58{
61 struct reg { 59 struct reg {
@@ -167,6 +165,8 @@ struct dentry *pm_dbg_dir;
167 165
168static int pm_dbg_init_done; 166static int pm_dbg_init_done;
169 167
168static int __init pm_dbg_init(void);
169
170enum { 170enum {
171 DEBUG_FILE_COUNTERS = 0, 171 DEBUG_FILE_COUNTERS = 0,
172 DEBUG_FILE_TIMERS, 172 DEBUG_FILE_TIMERS,
@@ -488,9 +488,11 @@ int pm_dbg_regset_init(int reg_set)
488 488
489static int pwrdm_suspend_get(void *data, u64 *val) 489static int pwrdm_suspend_get(void *data, u64 *val)
490{ 490{
491 *val = omap3_pm_get_suspend_state((struct powerdomain *)data); 491 int ret;
492 ret = omap3_pm_get_suspend_state((struct powerdomain *)data);
493 *val = ret;
492 494
493 if (*val >= 0) 495 if (ret >= 0)
494 return 0; 496 return 0;
495 return *val; 497 return *val;
496} 498}
@@ -604,6 +606,4 @@ static int __init pm_dbg_init(void)
604} 606}
605arch_initcall(pm_dbg_init); 607arch_initcall(pm_dbg_init);
606 608
607#else
608void pm_dbg_update_time(struct powerdomain *pwrdm, int prev) {}
609#endif 609#endif
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 0bf345db7147..7a9c2d004511 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -32,12 +32,16 @@ extern struct omap_dm_timer *gptimer_wakeup;
32#ifdef CONFIG_PM_DEBUG 32#ifdef CONFIG_PM_DEBUG
33extern void omap2_pm_dump(int mode, int resume, unsigned int us); 33extern void omap2_pm_dump(int mode, int resume, unsigned int us);
34extern int omap2_pm_debug; 34extern int omap2_pm_debug;
35#else
36#define omap2_pm_dump(mode, resume, us) do {} while (0);
37#define omap2_pm_debug 0
38#endif
39
40#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
35extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev); 41extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev);
36extern int pm_dbg_regset_save(int reg_set); 42extern int pm_dbg_regset_save(int reg_set);
37extern int pm_dbg_regset_init(int reg_set); 43extern int pm_dbg_regset_init(int reg_set);
38#else 44#else
39#define omap2_pm_dump(mode, resume, us) do {} while (0);
40#define omap2_pm_debug 0
41#define pm_dbg_update_time(pwrdm, prev) do {} while (0); 45#define pm_dbg_update_time(pwrdm, prev) do {} while (0);
42#define pm_dbg_regset_save(reg_set) do {} while (0); 46#define pm_dbg_regset_save(reg_set) do {} while (0);
43#define pm_dbg_regset_init(reg_set) do {} while (0); 47#define pm_dbg_regset_init(reg_set) do {} while (0);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index c6cc809afb79..910a7acf542d 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -26,6 +26,7 @@
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/delay.h>
29 30
30#include <plat/sram.h> 31#include <plat/sram.h>
31#include <plat/clockdomain.h> 32#include <plat/clockdomain.h>
@@ -126,7 +127,15 @@ static void omap3_core_save_context(void)
126 /* wait for the save to complete */ 127 /* wait for the save to complete */
127 while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS) 128 while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
128 & PADCONF_SAVE_DONE)) 129 & PADCONF_SAVE_DONE))
129 ; 130 udelay(1);
131
132 /*
133 * Force write last pad into memory, as this can fail in some
134 * cases according to erratas 1.157, 1.185
135 */
136 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
137 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
138
130 /* Save the Interrupt controller context */ 139 /* Save the Interrupt controller context */
131 omap_intc_save_context(); 140 omap_intc_save_context();
132 /* Save the GPMC context */ 141 /* Save the GPMC context */
@@ -392,6 +401,7 @@ void omap_sram_idle(void)
392 prm_set_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN); 401 prm_set_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN);
393 omap3_enable_io_chain(); 402 omap3_enable_io_chain();
394 } 403 }
404 omap3_intc_prepare_idle();
395 405
396 /* 406 /*
397 * On EMU/HS devices ROM code restores a SRDC value 407 * On EMU/HS devices ROM code restores a SRDC value
@@ -438,6 +448,7 @@ void omap_sram_idle(void)
438 OMAP3430_GR_MOD, 448 OMAP3430_GR_MOD,
439 OMAP3_PRM_VOLTCTRL_OFFSET); 449 OMAP3_PRM_VOLTCTRL_OFFSET);
440 } 450 }
451 omap3_intc_resume_idle();
441 452
442 /* PER */ 453 /* PER */
443 if (per_next_state < PWRDM_POWER_ON) { 454 if (per_next_state < PWRDM_POWER_ON) {
@@ -578,6 +589,8 @@ static int omap3_pm_suspend(void)
578 } 589 }
579 590
580 omap_uart_prepare_suspend(); 591 omap_uart_prepare_suspend();
592 omap3_intc_suspend();
593
581 omap_sram_idle(); 594 omap_sram_idle();
582 595
583restore: 596restore:
@@ -835,6 +848,8 @@ static void __init prcm_setup_regs(void)
835 CM_AUTOIDLE); 848 CM_AUTOIDLE);
836 } 849 }
837 850
851 omap_ctrl_writel(OMAP3430_AUTOIDLE, OMAP2_CONTROL_SYSCONFIG);
852
838 /* 853 /*
839 * Set all plls to autoidle. This is needed until autoidle is 854 * Set all plls to autoidle. This is needed until autoidle is
840 * enabled by clockfw 855 * enabled by clockfw
@@ -875,15 +890,23 @@ static void __init prcm_setup_regs(void)
875 prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, 890 prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN,
876 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); 891 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
877 892
893 /* Enable PM_WKEN to support DSS LPR */
894 prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS,
895 OMAP3430_DSS_MOD, PM_WKEN);
896
878 /* Enable wakeups in PER */ 897 /* Enable wakeups in PER */
879 prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 | 898 prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 |
880 OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 | 899 OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 |
881 OMAP3430_EN_GPIO6 | OMAP3430_EN_UART3, 900 OMAP3430_EN_GPIO6 | OMAP3430_EN_UART3 |
901 OMAP3430_EN_MCBSP2 | OMAP3430_EN_MCBSP3 |
902 OMAP3430_EN_MCBSP4,
882 OMAP3430_PER_MOD, PM_WKEN); 903 OMAP3430_PER_MOD, PM_WKEN);
883 /* and allow them to wake up MPU */ 904 /* and allow them to wake up MPU */
884 prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 | 905 prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 |
885 OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 | 906 OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 |
886 OMAP3430_GRPSEL_GPIO6 | OMAP3430_EN_UART3, 907 OMAP3430_GRPSEL_GPIO6 | OMAP3430_EN_UART3 |
908 OMAP3430_EN_MCBSP2 | OMAP3430_EN_MCBSP3 |
909 OMAP3430_EN_MCBSP4,
887 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); 910 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
888 911
889 /* Don't attach IVA interrupts */ 912 /* Don't attach IVA interrupts */
@@ -904,24 +927,6 @@ static void __init prcm_setup_regs(void)
904 /* Clear any pending PRCM interrupts */ 927 /* Clear any pending PRCM interrupts */
905 prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 928 prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
906 929
907 /* Don't attach IVA interrupts */
908 prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
909 prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
910 prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
911 prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
912
913 /* Clear any pending 'reset' flags */
914 prm_write_mod_reg(0xffffffff, MPU_MOD, RM_RSTST);
915 prm_write_mod_reg(0xffffffff, CORE_MOD, RM_RSTST);
916 prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, RM_RSTST);
917 prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, RM_RSTST);
918 prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, RM_RSTST);
919 prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, RM_RSTST);
920 prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, RM_RSTST);
921
922 /* Clear any pending PRCM interrupts */
923 prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
924
925 omap3_iva_idle(); 930 omap3_iva_idle();
926 omap3_d2d_idle(); 931 omap3_d2d_idle();
927} 932}
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 3ea8177ffb25..cf466ea1dffc 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -44,7 +44,6 @@ struct omap3_prcm_regs {
44 u32 iva2_cm_clksel2; 44 u32 iva2_cm_clksel2;
45 u32 cm_sysconfig; 45 u32 cm_sysconfig;
46 u32 sgx_cm_clksel; 46 u32 sgx_cm_clksel;
47 u32 wkup_cm_clksel;
48 u32 dss_cm_clksel; 47 u32 dss_cm_clksel;
49 u32 cam_cm_clksel; 48 u32 cam_cm_clksel;
50 u32 per_cm_clksel; 49 u32 per_cm_clksel;
@@ -53,7 +52,6 @@ struct omap3_prcm_regs {
53 u32 pll_cm_autoidle2; 52 u32 pll_cm_autoidle2;
54 u32 pll_cm_clksel4; 53 u32 pll_cm_clksel4;
55 u32 pll_cm_clksel5; 54 u32 pll_cm_clksel5;
56 u32 pll_cm_clken;
57 u32 pll_cm_clken2; 55 u32 pll_cm_clken2;
58 u32 cm_polctrl; 56 u32 cm_polctrl;
59 u32 iva2_cm_fclken; 57 u32 iva2_cm_fclken;
@@ -77,7 +75,6 @@ struct omap3_prcm_regs {
77 u32 usbhost_cm_iclken; 75 u32 usbhost_cm_iclken;
78 u32 iva2_cm_autiidle2; 76 u32 iva2_cm_autiidle2;
79 u32 mpu_cm_autoidle2; 77 u32 mpu_cm_autoidle2;
80 u32 pll_cm_autoidle;
81 u32 iva2_cm_clkstctrl; 78 u32 iva2_cm_clkstctrl;
82 u32 mpu_cm_clkstctrl; 79 u32 mpu_cm_clkstctrl;
83 u32 core_cm_clkstctrl; 80 u32 core_cm_clkstctrl;
@@ -274,7 +271,6 @@ void omap3_prcm_save_context(void)
274 prcm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG); 271 prcm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG);
275 prcm_context.sgx_cm_clksel = 272 prcm_context.sgx_cm_clksel =
276 cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL); 273 cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL);
277 prcm_context.wkup_cm_clksel = cm_read_mod_reg(WKUP_MOD, CM_CLKSEL);
278 prcm_context.dss_cm_clksel = 274 prcm_context.dss_cm_clksel =
279 cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL); 275 cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL);
280 prcm_context.cam_cm_clksel = 276 prcm_context.cam_cm_clksel =
@@ -291,8 +287,6 @@ void omap3_prcm_save_context(void)
291 cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4); 287 cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4);
292 prcm_context.pll_cm_clksel5 = 288 prcm_context.pll_cm_clksel5 =
293 cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5); 289 cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5);
294 prcm_context.pll_cm_clken =
295 cm_read_mod_reg(PLL_MOD, CM_CLKEN);
296 prcm_context.pll_cm_clken2 = 290 prcm_context.pll_cm_clken2 =
297 cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2); 291 cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2);
298 prcm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL); 292 prcm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL);
@@ -338,8 +332,6 @@ void omap3_prcm_save_context(void)
338 cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2); 332 cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
339 prcm_context.mpu_cm_autoidle2 = 333 prcm_context.mpu_cm_autoidle2 =
340 cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2); 334 cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2);
341 prcm_context.pll_cm_autoidle =
342 cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE);
343 prcm_context.iva2_cm_clkstctrl = 335 prcm_context.iva2_cm_clkstctrl =
344 cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSTCTRL); 336 cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSTCTRL);
345 prcm_context.mpu_cm_clkstctrl = 337 prcm_context.mpu_cm_clkstctrl =
@@ -431,7 +423,6 @@ void omap3_prcm_restore_context(void)
431 __raw_writel(prcm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG); 423 __raw_writel(prcm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG);
432 cm_write_mod_reg(prcm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD, 424 cm_write_mod_reg(prcm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD,
433 CM_CLKSEL); 425 CM_CLKSEL);
434 cm_write_mod_reg(prcm_context.wkup_cm_clksel, WKUP_MOD, CM_CLKSEL);
435 cm_write_mod_reg(prcm_context.dss_cm_clksel, OMAP3430_DSS_MOD, 426 cm_write_mod_reg(prcm_context.dss_cm_clksel, OMAP3430_DSS_MOD,
436 CM_CLKSEL); 427 CM_CLKSEL);
437 cm_write_mod_reg(prcm_context.cam_cm_clksel, OMAP3430_CAM_MOD, 428 cm_write_mod_reg(prcm_context.cam_cm_clksel, OMAP3430_CAM_MOD,
@@ -448,7 +439,6 @@ void omap3_prcm_restore_context(void)
448 OMAP3430ES2_CM_CLKSEL4); 439 OMAP3430ES2_CM_CLKSEL4);
449 cm_write_mod_reg(prcm_context.pll_cm_clksel5, PLL_MOD, 440 cm_write_mod_reg(prcm_context.pll_cm_clksel5, PLL_MOD,
450 OMAP3430ES2_CM_CLKSEL5); 441 OMAP3430ES2_CM_CLKSEL5);
451 cm_write_mod_reg(prcm_context.pll_cm_clken, PLL_MOD, CM_CLKEN);
452 cm_write_mod_reg(prcm_context.pll_cm_clken2, PLL_MOD, 442 cm_write_mod_reg(prcm_context.pll_cm_clken2, PLL_MOD,
453 OMAP3430ES2_CM_CLKEN2); 443 OMAP3430ES2_CM_CLKEN2);
454 __raw_writel(prcm_context.cm_polctrl, OMAP3430_CM_POLCTRL); 444 __raw_writel(prcm_context.cm_polctrl, OMAP3430_CM_POLCTRL);
@@ -487,7 +477,6 @@ void omap3_prcm_restore_context(void)
487 cm_write_mod_reg(prcm_context.iva2_cm_autiidle2, OMAP3430_IVA2_MOD, 477 cm_write_mod_reg(prcm_context.iva2_cm_autiidle2, OMAP3430_IVA2_MOD,
488 CM_AUTOIDLE2); 478 CM_AUTOIDLE2);
489 cm_write_mod_reg(prcm_context.mpu_cm_autoidle2, MPU_MOD, CM_AUTOIDLE2); 479 cm_write_mod_reg(prcm_context.mpu_cm_autoidle2, MPU_MOD, CM_AUTOIDLE2);
490 cm_write_mod_reg(prcm_context.pll_cm_autoidle, PLL_MOD, CM_AUTOIDLE);
491 cm_write_mod_reg(prcm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD, 480 cm_write_mod_reg(prcm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD,
492 CM_CLKSTCTRL); 481 CM_CLKSTCTRL);
493 cm_write_mod_reg(prcm_context.mpu_cm_clkstctrl, MPU_MOD, CM_CLKSTCTRL); 482 cm_write_mod_reg(prcm_context.mpu_cm_clkstctrl, MPU_MOD, CM_CLKSTCTRL);
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index ea050ce188a7..40f006285163 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -24,6 +24,8 @@
24 OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE + (module) + (reg)) 24 OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE + (module) + (reg))
25#define OMAP44XX_PRM_REGADDR(module, reg) \ 25#define OMAP44XX_PRM_REGADDR(module, reg) \
26 OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (module) + (reg)) 26 OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (module) + (reg))
27#define OMAP44XX_CHIRONSS_REGADDR(module, reg) \
28 OMAP2_L4_IO_ADDRESS(OMAP4430_CHIRONSS_BASE + (module) + (reg))
27 29
28#include "prm44xx.h" 30#include "prm44xx.h"
29 31
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index 89be97f0589d..adb2558bb121 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -386,26 +386,26 @@
386 386
387 387
388/* CHIRON_PRCM.CHIRONSS_OCP_SOCKET_PRCM register offsets */ 388/* CHIRON_PRCM.CHIRONSS_OCP_SOCKET_PRCM register offsets */
389#define OMAP4430_REVISION_PRCM OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD, 0x0000) 389#define OMAP4430_REVISION_PRCM OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD, 0x0000)
390 390
391/* CHIRON_PRCM.CHIRONSS_DEVICE_PRM register offsets */ 391/* CHIRON_PRCM.CHIRONSS_DEVICE_PRM register offsets */
392#define OMAP4430_CHIRON_PRCM_PRM_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD, 0x0000) 392#define OMAP4430_CHIRON_PRCM_PRM_RSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD, 0x0000)
393 393
394/* CHIRON_PRCM.CHIRONSS_CPU0 register offsets */ 394/* CHIRON_PRCM.CHIRONSS_CPU0 register offsets */
395#define OMAP4430_PM_PDA_CPU0_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0000) 395#define OMAP4430_PM_PDA_CPU0_PWRSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0000)
396#define OMAP4430_PM_PDA_CPU0_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0004) 396#define OMAP4430_PM_PDA_CPU0_PWRSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0004)
397#define OMAP4430_RM_PDA_CPU0_CPU0_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0008) 397#define OMAP4430_RM_PDA_CPU0_CPU0_CONTEXT OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0008)
398#define OMAP4430_RM_PDA_CPU0_CPU0_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x000c) 398#define OMAP4430_RM_PDA_CPU0_CPU0_RSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x000c)
399#define OMAP4430_RM_PDA_CPU0_CPU0_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0010) 399#define OMAP4430_RM_PDA_CPU0_CPU0_RSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0010)
400#define OMAP4430_CM_PDA_CPU0_CPU0_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0014) 400#define OMAP4430_CM_PDA_CPU0_CPU0_CLKCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0014)
401#define OMAP4430_CM_PDA_CPU0_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0018) 401#define OMAP4430_CM_PDA_CPU0_CLKSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0018)
402 402
403/* CHIRON_PRCM.CHIRONSS_CPU1 register offsets */ 403/* CHIRON_PRCM.CHIRONSS_CPU1 register offsets */
404#define OMAP4430_PM_PDA_CPU1_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0000) 404#define OMAP4430_PM_PDA_CPU1_PWRSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0000)
405#define OMAP4430_PM_PDA_CPU1_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0004) 405#define OMAP4430_PM_PDA_CPU1_PWRSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0004)
406#define OMAP4430_RM_PDA_CPU1_CPU1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0008) 406#define OMAP4430_RM_PDA_CPU1_CPU1_CONTEXT OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0008)
407#define OMAP4430_RM_PDA_CPU1_CPU1_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x000c) 407#define OMAP4430_RM_PDA_CPU1_CPU1_RSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x000c)
408#define OMAP4430_RM_PDA_CPU1_CPU1_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0010) 408#define OMAP4430_RM_PDA_CPU1_CPU1_RSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0010)
409#define OMAP4430_CM_PDA_CPU1_CPU1_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0014) 409#define OMAP4430_CM_PDA_CPU1_CPU1_CLKCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0014)
410#define OMAP4430_CM_PDA_CPU1_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0018) 410#define OMAP4430_CM_PDA_CPU1_CLKSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0018)
411#endif 411#endif
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 15268f8b61de..c3626ea48143 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -245,7 +245,8 @@ restore:
245 mov r1, #0 @ set task id for ROM code in r1 245 mov r1, #0 @ set task id for ROM code in r1
246 mov r2, #4 @ set some flags in r2, r6 246 mov r2, #4 @ set some flags in r2, r6
247 mov r6, #0xff 247 mov r6, #0xff
248 adr r3, write_aux_control_params @ r3 points to parameters 248 ldr r4, scratchpad_base
249 ldr r3, [r4, #0xBC] @ r3 points to parameters
249 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 250 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
250 mcr p15, 0, r0, c7, c10, 5 @ data memory barrier 251 mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
251 .word 0xE1600071 @ call SMI monitor (smi #1) 252 .word 0xE1600071 @ call SMI monitor (smi #1)
@@ -253,14 +254,14 @@ restore:
253 b logic_l1_restore 254 b logic_l1_restore
254l2_inv_api_params: 255l2_inv_api_params:
255 .word 0x1, 0x00 256 .word 0x1, 0x00
256write_aux_control_params:
257 .word 0x1, 0x72
258l2_inv_gp: 257l2_inv_gp:
259 /* Execute smi to invalidate L2 cache */ 258 /* Execute smi to invalidate L2 cache */
260 mov r12, #0x1 @ set up to invalide L2 259 mov r12, #0x1 @ set up to invalide L2
261smi: .word 0xE1600070 @ Call SMI monitor (smieq) 260smi: .word 0xE1600070 @ Call SMI monitor (smieq)
262 /* Write to Aux control register to set some bits */ 261 /* Write to Aux control register to set some bits */
263 mov r0, #0x72 262 ldr r4, scratchpad_base
263 ldr r3, [r4,#0xBC]
264 ldr r0, [r3,#4]
264 mov r12, #0x3 265 mov r12, #0x3
265 .word 0xE1600070 @ Call SMI monitor (smieq) 266 .word 0xE1600070 @ Call SMI monitor (smieq)
266logic_l1_restore: 267logic_l1_restore:
@@ -271,6 +272,7 @@ logic_l1_restore:
271 272
272 ldr r4, scratchpad_base 273 ldr r4, scratchpad_base
273 ldr r3, [r4,#0xBC] 274 ldr r3, [r4,#0xBC]
275 adds r3, r3, #8
274 ldmia r3!, {r4-r6} 276 ldmia r3!, {r4-r6}
275 mov sp, r4 277 mov sp, r4
276 msr spsr_cxsf, r5 278 msr spsr_cxsf, r5
@@ -387,6 +389,9 @@ usettbr0:
387save_context_wfi: 389save_context_wfi:
388 /*b save_context_wfi*/ @ enable to debug save code 390 /*b save_context_wfi*/ @ enable to debug save code
389 mov r8, r0 /* Store SDRAM address in r8 */ 391 mov r8, r0 /* Store SDRAM address in r8 */
392 mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register
393 mov r4, #0x1 @ Number of parameters for restore call
394 stmia r8!, {r4-r5}
390 /* Check what that target sleep state is:stored in r1*/ 395 /* Check what that target sleep state is:stored in r1*/
391 /* 1 - Only L1 and logic lost */ 396 /* 1 - Only L1 and logic lost */
392 /* 2 - Only L2 lost */ 397 /* 2 - Only L2 lost */
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index bf1eaf3a27d4..dddc0273bc8b 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -172,6 +172,32 @@ unsigned long long sched_clock(void)
172 clocksource_32k.mult, clocksource_32k.shift); 172 clocksource_32k.mult, clocksource_32k.shift);
173} 173}
174 174
175/**
176 * read_persistent_clock - Return time from a persistent clock.
177 *
178 * Reads the time from a source which isn't disabled during PM, the
179 * 32k sync timer. Convert the cycles elapsed since last read into
180 * nsecs and adds to a monotonically increasing timespec.
181 */
182static struct timespec persistent_ts;
183static cycles_t cycles, last_cycles;
184void read_persistent_clock(struct timespec *ts)
185{
186 unsigned long long nsecs;
187 cycles_t delta;
188 struct timespec *tsp = &persistent_ts;
189
190 last_cycles = cycles;
191 cycles = clocksource_32k.read(&clocksource_32k);
192 delta = cycles - last_cycles;
193
194 nsecs = clocksource_cyc2ns(delta,
195 clocksource_32k.mult, clocksource_32k.shift);
196
197 timespec_add_ns(tsp, nsecs);
198 *ts = *tsp;
199}
200
175static int __init omap_init_clocksource_32k(void) 201static int __init omap_init_clocksource_32k(void)
176{ 202{
177 static char err[] __initdata = KERN_ERR 203 static char err[] __initdata = KERN_ERR
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 09d82b3c66ce..728c64204184 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1183,7 +1183,7 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
1183 } 1183 }
1184 1184
1185 if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) || 1185 if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1186 (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) { 1186 (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1187 printk(KERN_ERR "omap_dma: You need to stop the DMA channels " 1187 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1188 "before unlinking\n"); 1188 "before unlinking\n");
1189 dump_stack(); 1189 dump_stack();
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 64f407ee0f4e..08ccf8922520 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -551,6 +551,19 @@ void omap_dm_timer_stop(struct omap_dm_timer *timer)
551 if (l & OMAP_TIMER_CTRL_ST) { 551 if (l & OMAP_TIMER_CTRL_ST) {
552 l &= ~0x1; 552 l &= ~0x1;
553 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); 553 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
554#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \
555 defined(CONFIG_ARCH_OMAP4)
556 /* Readback to make sure write has completed */
557 omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
558 /*
559 * Wait for functional clock period x 3.5 to make sure that
560 * timer is stopped
561 */
562 udelay(3500000 / clk_get_rate(timer->fclk) + 1);
563 /* Ack possibly pending interrupt */
564 omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
565 OMAP_TIMER_INT_OVERFLOW);
566#endif
554 } 567 }
555} 568}
556EXPORT_SYMBOL_GPL(omap_dm_timer_stop); 569EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 9a028bdebb06..a162f585b1e3 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -434,6 +434,7 @@ IS_OMAP_TYPE(3517, 0x3517)
434#define OMAP3430_REV_ES2_1 0x34302034 434#define OMAP3430_REV_ES2_1 0x34302034
435#define OMAP3430_REV_ES3_0 0x34303034 435#define OMAP3430_REV_ES3_0 0x34303034
436#define OMAP3430_REV_ES3_1 0x34304034 436#define OMAP3430_REV_ES3_1 0x34304034
437#define OMAP3430_REV_ES3_1_2 0x34305034
437 438
438#define OMAP3630_REV_ES1_0 0x36300034 439#define OMAP3630_REV_ES1_0 0x36300034
439 440
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 97d6c50c3dcb..c0ab7c80f72e 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -499,6 +499,9 @@ extern void omap_init_irq(void);
499extern int omap_irq_pending(void); 499extern int omap_irq_pending(void);
500void omap_intc_save_context(void); 500void omap_intc_save_context(void);
501void omap_intc_restore_context(void); 501void omap_intc_restore_context(void);
502void omap3_intc_suspend(void);
503void omap3_intc_prepare_idle(void);
504void omap3_intc_resume_idle(void);
502#endif 505#endif
503 506
504#include <mach/hardware.h> 507#include <mach/hardware.h>
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 007935a921ea..33933256a226 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -227,6 +227,7 @@ struct omap_hwmod_ocp_if {
227#define SYSC_HAS_SIDLEMODE (1 << 5) 227#define SYSC_HAS_SIDLEMODE (1 << 5)
228#define SYSC_HAS_MIDLEMODE (1 << 6) 228#define SYSC_HAS_MIDLEMODE (1 << 6)
229#define SYSS_MISSING (1 << 7) 229#define SYSS_MISSING (1 << 7)
230#define SYSC_NO_CACHE (1 << 8) /* XXX SW flag, belongs elsewhere */
230 231
231/* omap_hwmod_sysconfig.clockact flags */ 232/* omap_hwmod_sysconfig.clockact flags */
232#define CLOCKACT_TEST_BOTH 0x0 233#define CLOCKACT_TEST_BOTH 0x0
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 108197ac0d56..4097f6a10860 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -64,8 +64,11 @@ config BITS
64 default 64 if SPARC64 64 default 64 if SPARC64
65 65
66config GENERIC_TIME 66config GENERIC_TIME
67 def_bool y
68
69config ARCH_USES_GETTIMEOFFSET
67 bool 70 bool
68 default y if SPARC64 71 default y if SPARC32
69 72
70config GENERIC_CMOS_UPDATE 73config GENERIC_CMOS_UPDATE
71 bool 74 bool
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index 983d59824a28..99a1f191497b 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31 3# Linux kernel version: 2.6.33-rc2
4# Wed Sep 16 00:03:43 2009 4# Mon Jan 11 23:20:31 2010
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_SPARC=y 7CONFIG_SPARC=y
@@ -41,6 +41,7 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
41# 41#
42CONFIG_TREE_RCU=y 42CONFIG_TREE_RCU=y
43# CONFIG_TREE_PREEMPT_RCU is not set 43# CONFIG_TREE_PREEMPT_RCU is not set
44# CONFIG_TINY_RCU is not set
44# CONFIG_RCU_TRACE is not set 45# CONFIG_RCU_TRACE is not set
45CONFIG_RCU_FANOUT=32 46CONFIG_RCU_FANOUT=32
46# CONFIG_RCU_FANOUT_EXACT is not set 47# CONFIG_RCU_FANOUT_EXACT is not set
@@ -88,21 +89,21 @@ CONFIG_TIMERFD=y
88CONFIG_EVENTFD=y 89CONFIG_EVENTFD=y
89CONFIG_SHMEM=y 90CONFIG_SHMEM=y
90CONFIG_AIO=y 91CONFIG_AIO=y
91CONFIG_HAVE_PERF_COUNTERS=y 92CONFIG_HAVE_PERF_EVENTS=y
93CONFIG_PERF_USE_VMALLOC=y
92 94
93# 95#
94# Performance Counters 96# Kernel Performance Events And Counters
95# 97#
98# CONFIG_PERF_EVENTS is not set
96# CONFIG_PERF_COUNTERS is not set 99# CONFIG_PERF_COUNTERS is not set
97CONFIG_VM_EVENT_COUNTERS=y 100CONFIG_VM_EVENT_COUNTERS=y
98CONFIG_PCI_QUIRKS=y 101CONFIG_PCI_QUIRKS=y
99# CONFIG_STRIP_ASM_SYMS is not set
100CONFIG_COMPAT_BRK=y 102CONFIG_COMPAT_BRK=y
101CONFIG_SLAB=y 103CONFIG_SLAB=y
102# CONFIG_SLUB is not set 104# CONFIG_SLUB is not set
103# CONFIG_SLOB is not set 105# CONFIG_SLOB is not set
104# CONFIG_PROFILING is not set 106# CONFIG_PROFILING is not set
105# CONFIG_MARKERS is not set
106CONFIG_HAVE_OPROFILE=y 107CONFIG_HAVE_OPROFILE=y
107CONFIG_HAVE_ARCH_TRACEHOOK=y 108CONFIG_HAVE_ARCH_TRACEHOOK=y
108CONFIG_HAVE_DMA_ATTRS=y 109CONFIG_HAVE_DMA_ATTRS=y
@@ -131,14 +132,41 @@ CONFIG_LBDAF=y
131# IO Schedulers 132# IO Schedulers
132# 133#
133CONFIG_IOSCHED_NOOP=y 134CONFIG_IOSCHED_NOOP=y
134CONFIG_IOSCHED_AS=y
135CONFIG_IOSCHED_DEADLINE=y 135CONFIG_IOSCHED_DEADLINE=y
136CONFIG_IOSCHED_CFQ=y 136CONFIG_IOSCHED_CFQ=y
137# CONFIG_DEFAULT_AS is not set
138# CONFIG_DEFAULT_DEADLINE is not set 137# CONFIG_DEFAULT_DEADLINE is not set
139CONFIG_DEFAULT_CFQ=y 138CONFIG_DEFAULT_CFQ=y
140# CONFIG_DEFAULT_NOOP is not set 139# CONFIG_DEFAULT_NOOP is not set
141CONFIG_DEFAULT_IOSCHED="cfq" 140CONFIG_DEFAULT_IOSCHED="cfq"
141# CONFIG_INLINE_SPIN_TRYLOCK is not set
142# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
143# CONFIG_INLINE_SPIN_LOCK is not set
144# CONFIG_INLINE_SPIN_LOCK_BH is not set
145# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
146# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
147CONFIG_INLINE_SPIN_UNLOCK=y
148# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
149CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
150# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
151# CONFIG_INLINE_READ_TRYLOCK is not set
152# CONFIG_INLINE_READ_LOCK is not set
153# CONFIG_INLINE_READ_LOCK_BH is not set
154# CONFIG_INLINE_READ_LOCK_IRQ is not set
155# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
156CONFIG_INLINE_READ_UNLOCK=y
157# CONFIG_INLINE_READ_UNLOCK_BH is not set
158CONFIG_INLINE_READ_UNLOCK_IRQ=y
159# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
160# CONFIG_INLINE_WRITE_TRYLOCK is not set
161# CONFIG_INLINE_WRITE_LOCK is not set
162# CONFIG_INLINE_WRITE_LOCK_BH is not set
163# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
164# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
165CONFIG_INLINE_WRITE_UNLOCK=y
166# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
167CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
168# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
169# CONFIG_MUTEX_SPIN_ON_OWNER is not set
142# CONFIG_FREEZER is not set 170# CONFIG_FREEZER is not set
143 171
144# 172#
@@ -168,8 +196,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
168# CONFIG_PHYS_ADDR_T_64BIT is not set 196# CONFIG_PHYS_ADDR_T_64BIT is not set
169CONFIG_ZONE_DMA_FLAG=1 197CONFIG_ZONE_DMA_FLAG=1
170CONFIG_BOUNCE=y 198CONFIG_BOUNCE=y
171CONFIG_HAVE_MLOCK=y 199# CONFIG_KSM is not set
172CONFIG_HAVE_MLOCKED_PAGE_BIT=y
173CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 200CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
174CONFIG_SUN_PM=y 201CONFIG_SUN_PM=y
175# CONFIG_SPARC_LED is not set 202# CONFIG_SPARC_LED is not set
@@ -257,6 +284,7 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
257CONFIG_INET6_XFRM_MODE_BEET=m 284CONFIG_INET6_XFRM_MODE_BEET=m
258# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 285# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
259CONFIG_IPV6_SIT=m 286CONFIG_IPV6_SIT=m
287# CONFIG_IPV6_SIT_6RD is not set
260CONFIG_IPV6_NDISC_NODETYPE=y 288CONFIG_IPV6_NDISC_NODETYPE=y
261CONFIG_IPV6_TUNNEL=m 289CONFIG_IPV6_TUNNEL=m
262# CONFIG_IPV6_MULTIPLE_TABLES is not set 290# CONFIG_IPV6_MULTIPLE_TABLES is not set
@@ -295,9 +323,6 @@ CONFIG_NET_PKTGEN=m
295# CONFIG_AF_RXRPC is not set 323# CONFIG_AF_RXRPC is not set
296CONFIG_WIRELESS=y 324CONFIG_WIRELESS=y
297# CONFIG_CFG80211 is not set 325# CONFIG_CFG80211 is not set
298CONFIG_CFG80211_DEFAULT_PS_VALUE=0
299CONFIG_WIRELESS_OLD_REGULATORY=y
300# CONFIG_WIRELESS_EXT is not set
301# CONFIG_LIB80211 is not set 326# CONFIG_LIB80211 is not set
302 327
303# 328#
@@ -335,6 +360,10 @@ CONFIG_BLK_DEV=y
335# CONFIG_BLK_DEV_COW_COMMON is not set 360# CONFIG_BLK_DEV_COW_COMMON is not set
336CONFIG_BLK_DEV_LOOP=m 361CONFIG_BLK_DEV_LOOP=m
337CONFIG_BLK_DEV_CRYPTOLOOP=m 362CONFIG_BLK_DEV_CRYPTOLOOP=m
363
364#
365# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
366#
338# CONFIG_BLK_DEV_NBD is not set 367# CONFIG_BLK_DEV_NBD is not set
339# CONFIG_BLK_DEV_SX8 is not set 368# CONFIG_BLK_DEV_SX8 is not set
340CONFIG_BLK_DEV_RAM=y 369CONFIG_BLK_DEV_RAM=y
@@ -398,8 +427,11 @@ CONFIG_SCSI_LOWLEVEL=y
398# CONFIG_ISCSI_TCP is not set 427# CONFIG_ISCSI_TCP is not set
399# CONFIG_SCSI_CXGB3_ISCSI is not set 428# CONFIG_SCSI_CXGB3_ISCSI is not set
400# CONFIG_SCSI_BNX2_ISCSI is not set 429# CONFIG_SCSI_BNX2_ISCSI is not set
430# CONFIG_BE2ISCSI is not set
401# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 431# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
432# CONFIG_SCSI_HPSA is not set
402# CONFIG_SCSI_3W_9XXX is not set 433# CONFIG_SCSI_3W_9XXX is not set
434# CONFIG_SCSI_3W_SAS is not set
403# CONFIG_SCSI_ACARD is not set 435# CONFIG_SCSI_ACARD is not set
404# CONFIG_SCSI_AACRAID is not set 436# CONFIG_SCSI_AACRAID is not set
405# CONFIG_SCSI_AIC7XXX is not set 437# CONFIG_SCSI_AIC7XXX is not set
@@ -434,7 +466,9 @@ CONFIG_SCSI_QLOGICPTI=m
434# CONFIG_SCSI_DEBUG is not set 466# CONFIG_SCSI_DEBUG is not set
435CONFIG_SCSI_SUNESP=y 467CONFIG_SCSI_SUNESP=y
436# CONFIG_SCSI_PMCRAID is not set 468# CONFIG_SCSI_PMCRAID is not set
469# CONFIG_SCSI_PM8001 is not set
437# CONFIG_SCSI_SRP is not set 470# CONFIG_SCSI_SRP is not set
471# CONFIG_SCSI_BFA_FC is not set
438# CONFIG_SCSI_DH is not set 472# CONFIG_SCSI_DH is not set
439# CONFIG_SCSI_OSD_INITIATOR is not set 473# CONFIG_SCSI_OSD_INITIATOR is not set
440# CONFIG_ATA is not set 474# CONFIG_ATA is not set
@@ -450,7 +484,7 @@ CONFIG_SCSI_SUNESP=y
450# 484#
451 485
452# 486#
453# See the help texts for more information. 487# The newer stack is recommended.
454# 488#
455# CONFIG_FIREWIRE is not set 489# CONFIG_FIREWIRE is not set
456# CONFIG_IEEE1394 is not set 490# CONFIG_IEEE1394 is not set
@@ -487,6 +521,7 @@ CONFIG_SUNQE=m
487# CONFIG_NET_PCI is not set 521# CONFIG_NET_PCI is not set
488# CONFIG_B44 is not set 522# CONFIG_B44 is not set
489# CONFIG_KS8842 is not set 523# CONFIG_KS8842 is not set
524# CONFIG_KS8851_MLL is not set
490# CONFIG_ATL2 is not set 525# CONFIG_ATL2 is not set
491CONFIG_NETDEV_1000=y 526CONFIG_NETDEV_1000=y
492# CONFIG_ACENIC is not set 527# CONFIG_ACENIC is not set
@@ -546,6 +581,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
546# CONFIG_NETCONSOLE is not set 581# CONFIG_NETCONSOLE is not set
547# CONFIG_NETPOLL is not set 582# CONFIG_NETPOLL is not set
548# CONFIG_NET_POLL_CONTROLLER is not set 583# CONFIG_NET_POLL_CONTROLLER is not set
584# CONFIG_VMXNET3 is not set
549# CONFIG_ISDN is not set 585# CONFIG_ISDN is not set
550# CONFIG_PHONE is not set 586# CONFIG_PHONE is not set
551 587
@@ -555,6 +591,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
555CONFIG_INPUT=y 591CONFIG_INPUT=y
556# CONFIG_INPUT_FF_MEMLESS is not set 592# CONFIG_INPUT_FF_MEMLESS is not set
557# CONFIG_INPUT_POLLDEV is not set 593# CONFIG_INPUT_POLLDEV is not set
594# CONFIG_INPUT_SPARSEKMAP is not set
558 595
559# 596#
560# Userland interfaces 597# Userland interfaces
@@ -574,6 +611,7 @@ CONFIG_INPUT_KEYBOARD=y
574CONFIG_KEYBOARD_ATKBD=m 611CONFIG_KEYBOARD_ATKBD=m
575# CONFIG_KEYBOARD_LKKBD is not set 612# CONFIG_KEYBOARD_LKKBD is not set
576# CONFIG_KEYBOARD_NEWTON is not set 613# CONFIG_KEYBOARD_NEWTON is not set
614# CONFIG_KEYBOARD_OPENCORES is not set
577# CONFIG_KEYBOARD_STOWAWAY is not set 615# CONFIG_KEYBOARD_STOWAWAY is not set
578CONFIG_KEYBOARD_SUNKBD=m 616CONFIG_KEYBOARD_SUNKBD=m
579# CONFIG_KEYBOARD_XTKBD is not set 617# CONFIG_KEYBOARD_XTKBD is not set
@@ -604,6 +642,7 @@ CONFIG_SERIO_SERPORT=m
604# CONFIG_SERIO_PCIPS2 is not set 642# CONFIG_SERIO_PCIPS2 is not set
605CONFIG_SERIO_LIBPS2=m 643CONFIG_SERIO_LIBPS2=m
606# CONFIG_SERIO_RAW is not set 644# CONFIG_SERIO_RAW is not set
645# CONFIG_SERIO_ALTERA_PS2 is not set
607# CONFIG_GAMEPORT is not set 646# CONFIG_GAMEPORT is not set
608 647
609# 648#
@@ -636,6 +675,7 @@ CONFIG_SERIAL_CORE=y
636CONFIG_SERIAL_CORE_CONSOLE=y 675CONFIG_SERIAL_CORE_CONSOLE=y
637CONFIG_CONSOLE_POLL=y 676CONFIG_CONSOLE_POLL=y
638# CONFIG_SERIAL_JSM is not set 677# CONFIG_SERIAL_JSM is not set
678# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
639CONFIG_UNIX98_PTYS=y 679CONFIG_UNIX98_PTYS=y
640# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 680# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
641CONFIG_LEGACY_PTYS=y 681CONFIG_LEGACY_PTYS=y
@@ -661,6 +701,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
661# CONFIG_POWER_SUPPLY is not set 701# CONFIG_POWER_SUPPLY is not set
662CONFIG_HWMON=y 702CONFIG_HWMON=y
663# CONFIG_HWMON_VID is not set 703# CONFIG_HWMON_VID is not set
704# CONFIG_HWMON_DEBUG_CHIP is not set
705
706#
707# Native drivers
708#
664# CONFIG_SENSORS_I5K_AMB is not set 709# CONFIG_SENSORS_I5K_AMB is not set
665# CONFIG_SENSORS_F71805F is not set 710# CONFIG_SENSORS_F71805F is not set
666# CONFIG_SENSORS_F71882FG is not set 711# CONFIG_SENSORS_F71882FG is not set
@@ -675,9 +720,7 @@ CONFIG_HWMON=y
675# CONFIG_SENSORS_VT8231 is not set 720# CONFIG_SENSORS_VT8231 is not set
676# CONFIG_SENSORS_W83627HF is not set 721# CONFIG_SENSORS_W83627HF is not set
677# CONFIG_SENSORS_W83627EHF is not set 722# CONFIG_SENSORS_W83627EHF is not set
678# CONFIG_HWMON_DEBUG_CHIP is not set
679# CONFIG_THERMAL is not set 723# CONFIG_THERMAL is not set
680# CONFIG_THERMAL_HWMON is not set
681# CONFIG_WATCHDOG is not set 724# CONFIG_WATCHDOG is not set
682CONFIG_SSB_POSSIBLE=y 725CONFIG_SSB_POSSIBLE=y
683 726
@@ -699,6 +742,7 @@ CONFIG_SSB_POSSIBLE=y
699# 742#
700# Graphics support 743# Graphics support
701# 744#
745CONFIG_VGA_ARB=y
702# CONFIG_VGASTATE is not set 746# CONFIG_VGASTATE is not set
703# CONFIG_VIDEO_OUTPUT_CONTROL is not set 747# CONFIG_VIDEO_OUTPUT_CONTROL is not set
704# CONFIG_FB is not set 748# CONFIG_FB is not set
@@ -776,7 +820,9 @@ CONFIG_RTC_INTF_DEV=y
776# CONFIG_RTC_DRV_M48T86 is not set 820# CONFIG_RTC_DRV_M48T86 is not set
777# CONFIG_RTC_DRV_M48T35 is not set 821# CONFIG_RTC_DRV_M48T35 is not set
778CONFIG_RTC_DRV_M48T59=y 822CONFIG_RTC_DRV_M48T59=y
823# CONFIG_RTC_DRV_MSM6242 is not set
779# CONFIG_RTC_DRV_BQ4802 is not set 824# CONFIG_RTC_DRV_BQ4802 is not set
825# CONFIG_RTC_DRV_RP5C01 is not set
780# CONFIG_RTC_DRV_V3020 is not set 826# CONFIG_RTC_DRV_V3020 is not set
781 827
782# 828#
@@ -955,6 +1001,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
955CONFIG_ENABLE_MUST_CHECK=y 1001CONFIG_ENABLE_MUST_CHECK=y
956CONFIG_FRAME_WARN=1024 1002CONFIG_FRAME_WARN=1024
957CONFIG_MAGIC_SYSRQ=y 1003CONFIG_MAGIC_SYSRQ=y
1004# CONFIG_STRIP_ASM_SYMS is not set
958# CONFIG_UNUSED_SYMBOLS is not set 1005# CONFIG_UNUSED_SYMBOLS is not set
959# CONFIG_DEBUG_FS is not set 1006# CONFIG_DEBUG_FS is not set
960# CONFIG_HEADERS_CHECK is not set 1007# CONFIG_HEADERS_CHECK is not set
@@ -1003,9 +1050,9 @@ CONFIG_KGDB=y
1003CONFIG_KGDB_SERIAL_CONSOLE=y 1050CONFIG_KGDB_SERIAL_CONSOLE=y
1004CONFIG_KGDB_TESTS=y 1051CONFIG_KGDB_TESTS=y
1005# CONFIG_KGDB_TESTS_ON_BOOT is not set 1052# CONFIG_KGDB_TESTS_ON_BOOT is not set
1006# CONFIG_KMEMCHECK is not set
1007# CONFIG_DEBUG_STACK_USAGE is not set 1053# CONFIG_DEBUG_STACK_USAGE is not set
1008# CONFIG_STACK_DEBUG is not set 1054# CONFIG_STACK_DEBUG is not set
1055# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
1009 1056
1010# 1057#
1011# Security options 1058# Security options
@@ -1013,7 +1060,11 @@ CONFIG_KGDB_TESTS=y
1013# CONFIG_KEYS is not set 1060# CONFIG_KEYS is not set
1014# CONFIG_SECURITY is not set 1061# CONFIG_SECURITY is not set
1015# CONFIG_SECURITYFS is not set 1062# CONFIG_SECURITYFS is not set
1016# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1063# CONFIG_DEFAULT_SECURITY_SELINUX is not set
1064# CONFIG_DEFAULT_SECURITY_SMACK is not set
1065# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
1066CONFIG_DEFAULT_SECURITY_DAC=y
1067CONFIG_DEFAULT_SECURITY=""
1017CONFIG_CRYPTO=y 1068CONFIG_CRYPTO=y
1018 1069
1019# 1070#
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index f80b881dfea7..41c5a56aa6f2 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31 3# Linux kernel version: 2.6.33-rc2
4# Tue Sep 15 17:06:03 2009 4# Wed Jan 20 16:31:47 2010
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7CONFIG_SPARC=y 7CONFIG_SPARC=y
@@ -20,6 +20,7 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
20CONFIG_AUDIT_ARCH=y 20CONFIG_AUDIT_ARCH=y
21CONFIG_HAVE_SETUP_PER_CPU_AREA=y 21CONFIG_HAVE_SETUP_PER_CPU_AREA=y
22CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y 22CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
23CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
23CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 24CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
24CONFIG_MMU=y 25CONFIG_MMU=y
25CONFIG_ARCH_NO_VIRT_TO_BUS=y 26CONFIG_ARCH_NO_VIRT_TO_BUS=y
@@ -50,6 +51,7 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
50# 51#
51CONFIG_TREE_RCU=y 52CONFIG_TREE_RCU=y
52# CONFIG_TREE_PREEMPT_RCU is not set 53# CONFIG_TREE_PREEMPT_RCU is not set
54# CONFIG_TINY_RCU is not set
53# CONFIG_RCU_TRACE is not set 55# CONFIG_RCU_TRACE is not set
54CONFIG_RCU_FANOUT=64 56CONFIG_RCU_FANOUT=64
55# CONFIG_RCU_FANOUT_EXACT is not set 57# CONFIG_RCU_FANOUT_EXACT is not set
@@ -62,8 +64,7 @@ CONFIG_RT_GROUP_SCHED=y
62CONFIG_USER_SCHED=y 64CONFIG_USER_SCHED=y
63# CONFIG_CGROUP_SCHED is not set 65# CONFIG_CGROUP_SCHED is not set
64# CONFIG_CGROUPS is not set 66# CONFIG_CGROUPS is not set
65CONFIG_SYSFS_DEPRECATED=y 67# CONFIG_SYSFS_DEPRECATED_V2 is not set
66CONFIG_SYSFS_DEPRECATED_V2=y
67CONFIG_RELAY=y 68CONFIG_RELAY=y
68CONFIG_NAMESPACES=y 69CONFIG_NAMESPACES=y
69# CONFIG_UTS_NS is not set 70# CONFIG_UTS_NS is not set
@@ -97,24 +98,25 @@ CONFIG_TIMERFD=y
97CONFIG_EVENTFD=y 98CONFIG_EVENTFD=y
98CONFIG_SHMEM=y 99CONFIG_SHMEM=y
99CONFIG_AIO=y 100CONFIG_AIO=y
100CONFIG_HAVE_PERF_COUNTERS=y 101CONFIG_HAVE_PERF_EVENTS=y
102CONFIG_PERF_USE_VMALLOC=y
101 103
102# 104#
103# Performance Counters 105# Kernel Performance Events And Counters
104# 106#
105CONFIG_PERF_COUNTERS=y 107CONFIG_PERF_EVENTS=y
106CONFIG_EVENT_PROFILE=y 108CONFIG_EVENT_PROFILE=y
109CONFIG_PERF_COUNTERS=y
110# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
107CONFIG_VM_EVENT_COUNTERS=y 111CONFIG_VM_EVENT_COUNTERS=y
108CONFIG_PCI_QUIRKS=y 112CONFIG_PCI_QUIRKS=y
109CONFIG_SLUB_DEBUG=y 113CONFIG_SLUB_DEBUG=y
110# CONFIG_STRIP_ASM_SYMS is not set
111# CONFIG_COMPAT_BRK is not set 114# CONFIG_COMPAT_BRK is not set
112# CONFIG_SLAB is not set 115# CONFIG_SLAB is not set
113CONFIG_SLUB=y 116CONFIG_SLUB=y
114# CONFIG_SLOB is not set 117# CONFIG_SLOB is not set
115CONFIG_PROFILING=y 118CONFIG_PROFILING=y
116CONFIG_TRACEPOINTS=y 119CONFIG_TRACEPOINTS=y
117CONFIG_MARKERS=y
118CONFIG_OPROFILE=m 120CONFIG_OPROFILE=m
119CONFIG_HAVE_OPROFILE=y 121CONFIG_HAVE_OPROFILE=y
120CONFIG_KPROBES=y 122CONFIG_KPROBES=y
@@ -152,14 +154,41 @@ CONFIG_BLOCK_COMPAT=y
152# IO Schedulers 154# IO Schedulers
153# 155#
154CONFIG_IOSCHED_NOOP=y 156CONFIG_IOSCHED_NOOP=y
155CONFIG_IOSCHED_AS=y
156CONFIG_IOSCHED_DEADLINE=y 157CONFIG_IOSCHED_DEADLINE=y
157CONFIG_IOSCHED_CFQ=y 158CONFIG_IOSCHED_CFQ=y
158CONFIG_DEFAULT_AS=y
159# CONFIG_DEFAULT_DEADLINE is not set 159# CONFIG_DEFAULT_DEADLINE is not set
160# CONFIG_DEFAULT_CFQ is not set 160CONFIG_DEFAULT_CFQ=y
161# CONFIG_DEFAULT_NOOP is not set 161# CONFIG_DEFAULT_NOOP is not set
162CONFIG_DEFAULT_IOSCHED="anticipatory" 162CONFIG_DEFAULT_IOSCHED="cfq"
163# CONFIG_INLINE_SPIN_TRYLOCK is not set
164# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
165# CONFIG_INLINE_SPIN_LOCK is not set
166# CONFIG_INLINE_SPIN_LOCK_BH is not set
167# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
168# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
169CONFIG_INLINE_SPIN_UNLOCK=y
170# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
171CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
172# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
173# CONFIG_INLINE_READ_TRYLOCK is not set
174# CONFIG_INLINE_READ_LOCK is not set
175# CONFIG_INLINE_READ_LOCK_BH is not set
176# CONFIG_INLINE_READ_LOCK_IRQ is not set
177# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
178CONFIG_INLINE_READ_UNLOCK=y
179# CONFIG_INLINE_READ_UNLOCK_BH is not set
180CONFIG_INLINE_READ_UNLOCK_IRQ=y
181# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
182# CONFIG_INLINE_WRITE_TRYLOCK is not set
183# CONFIG_INLINE_WRITE_LOCK is not set
184# CONFIG_INLINE_WRITE_LOCK_BH is not set
185# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
186# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
187CONFIG_INLINE_WRITE_UNLOCK=y
188# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
189CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
190# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
191CONFIG_MUTEX_SPIN_ON_OWNER=y
163# CONFIG_FREEZER is not set 192# CONFIG_FREEZER is not set
164 193
165# 194#
@@ -179,6 +208,7 @@ CONFIG_GENERIC_HWEIGHT=y
179CONFIG_GENERIC_CALIBRATE_DELAY=y 208CONFIG_GENERIC_CALIBRATE_DELAY=y
180CONFIG_ARCH_MAY_HAVE_PC_FDC=y 209CONFIG_ARCH_MAY_HAVE_PC_FDC=y
181CONFIG_SPARC64_SMP=y 210CONFIG_SPARC64_SMP=y
211CONFIG_EARLYFB=y
182CONFIG_SPARC64_PAGE_SIZE_8KB=y 212CONFIG_SPARC64_PAGE_SIZE_8KB=y
183# CONFIG_SPARC64_PAGE_SIZE_64KB is not set 213# CONFIG_SPARC64_PAGE_SIZE_64KB is not set
184CONFIG_SECCOMP=y 214CONFIG_SECCOMP=y
@@ -216,8 +246,7 @@ CONFIG_MIGRATION=y
216CONFIG_PHYS_ADDR_T_64BIT=y 246CONFIG_PHYS_ADDR_T_64BIT=y
217CONFIG_ZONE_DMA_FLAG=0 247CONFIG_ZONE_DMA_FLAG=0
218CONFIG_NR_QUICK=1 248CONFIG_NR_QUICK=1
219CONFIG_HAVE_MLOCK=y 249# CONFIG_KSM is not set
220CONFIG_HAVE_MLOCKED_PAGE_BIT=y
221CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 250CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
222CONFIG_SCHED_SMT=y 251CONFIG_SCHED_SMT=y
223CONFIG_SCHED_MC=y 252CONFIG_SCHED_MC=y
@@ -315,6 +344,7 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
315CONFIG_INET6_XFRM_MODE_BEET=m 344CONFIG_INET6_XFRM_MODE_BEET=m
316# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set 345# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
317CONFIG_IPV6_SIT=m 346CONFIG_IPV6_SIT=m
347# CONFIG_IPV6_SIT_6RD is not set
318CONFIG_IPV6_NDISC_NODETYPE=y 348CONFIG_IPV6_NDISC_NODETYPE=y
319CONFIG_IPV6_TUNNEL=m 349CONFIG_IPV6_TUNNEL=m
320# CONFIG_IPV6_MULTIPLE_TABLES is not set 350# CONFIG_IPV6_MULTIPLE_TABLES is not set
@@ -356,9 +386,6 @@ CONFIG_NET_TCPPROBE=m
356# CONFIG_AF_RXRPC is not set 386# CONFIG_AF_RXRPC is not set
357CONFIG_WIRELESS=y 387CONFIG_WIRELESS=y
358# CONFIG_CFG80211 is not set 388# CONFIG_CFG80211 is not set
359CONFIG_CFG80211_DEFAULT_PS_VALUE=0
360CONFIG_WIRELESS_OLD_REGULATORY=y
361# CONFIG_WIRELESS_EXT is not set
362# CONFIG_LIB80211 is not set 389# CONFIG_LIB80211 is not set
363 390
364# 391#
@@ -376,6 +403,7 @@ CONFIG_WIRELESS_OLD_REGULATORY=y
376# Generic Driver Options 403# Generic Driver Options
377# 404#
378CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 405CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
406# CONFIG_DEVTMPFS is not set
379CONFIG_STANDALONE=y 407CONFIG_STANDALONE=y
380# CONFIG_PREVENT_FIRMWARE_BUILD is not set 408# CONFIG_PREVENT_FIRMWARE_BUILD is not set
381CONFIG_FW_LOADER=y 409CONFIG_FW_LOADER=y
@@ -397,6 +425,11 @@ CONFIG_BLK_DEV=y
397# CONFIG_BLK_DEV_COW_COMMON is not set 425# CONFIG_BLK_DEV_COW_COMMON is not set
398CONFIG_BLK_DEV_LOOP=m 426CONFIG_BLK_DEV_LOOP=m
399CONFIG_BLK_DEV_CRYPTOLOOP=m 427CONFIG_BLK_DEV_CRYPTOLOOP=m
428
429#
430# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
431#
432# CONFIG_BLK_DEV_DRBD is not set
400CONFIG_BLK_DEV_NBD=m 433CONFIG_BLK_DEV_NBD=m
401# CONFIG_BLK_DEV_SX8 is not set 434# CONFIG_BLK_DEV_SX8 is not set
402# CONFIG_BLK_DEV_UB is not set 435# CONFIG_BLK_DEV_UB is not set
@@ -408,6 +441,7 @@ CONFIG_ATA_OVER_ETH=m
408CONFIG_SUNVDC=m 441CONFIG_SUNVDC=m
409# CONFIG_BLK_DEV_HD is not set 442# CONFIG_BLK_DEV_HD is not set
410CONFIG_MISC_DEVICES=y 443CONFIG_MISC_DEVICES=y
444# CONFIG_AD525X_DPOT is not set
411# CONFIG_PHANTOM is not set 445# CONFIG_PHANTOM is not set
412# CONFIG_SGI_IOC4 is not set 446# CONFIG_SGI_IOC4 is not set
413# CONFIG_TIFM_CORE is not set 447# CONFIG_TIFM_CORE is not set
@@ -415,6 +449,7 @@ CONFIG_MISC_DEVICES=y
415# CONFIG_ENCLOSURE_SERVICES is not set 449# CONFIG_ENCLOSURE_SERVICES is not set
416# CONFIG_HP_ILO is not set 450# CONFIG_HP_ILO is not set
417# CONFIG_ISL29003 is not set 451# CONFIG_ISL29003 is not set
452# CONFIG_DS1682 is not set
418# CONFIG_C2PORT is not set 453# CONFIG_C2PORT is not set
419 454
420# 455#
@@ -522,8 +557,11 @@ CONFIG_SCSI_LOWLEVEL=y
522# CONFIG_ISCSI_TCP is not set 557# CONFIG_ISCSI_TCP is not set
523# CONFIG_SCSI_CXGB3_ISCSI is not set 558# CONFIG_SCSI_CXGB3_ISCSI is not set
524# CONFIG_SCSI_BNX2_ISCSI is not set 559# CONFIG_SCSI_BNX2_ISCSI is not set
560# CONFIG_BE2ISCSI is not set
525# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 561# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
562# CONFIG_SCSI_HPSA is not set
526# CONFIG_SCSI_3W_9XXX is not set 563# CONFIG_SCSI_3W_9XXX is not set
564# CONFIG_SCSI_3W_SAS is not set
527# CONFIG_SCSI_ACARD is not set 565# CONFIG_SCSI_ACARD is not set
528# CONFIG_SCSI_AACRAID is not set 566# CONFIG_SCSI_AACRAID is not set
529# CONFIG_SCSI_AIC7XXX is not set 567# CONFIG_SCSI_AIC7XXX is not set
@@ -557,7 +595,9 @@ CONFIG_SCSI_LOWLEVEL=y
557# CONFIG_SCSI_DEBUG is not set 595# CONFIG_SCSI_DEBUG is not set
558# CONFIG_SCSI_SUNESP is not set 596# CONFIG_SCSI_SUNESP is not set
559# CONFIG_SCSI_PMCRAID is not set 597# CONFIG_SCSI_PMCRAID is not set
598# CONFIG_SCSI_PM8001 is not set
560# CONFIG_SCSI_SRP is not set 599# CONFIG_SCSI_SRP is not set
600# CONFIG_SCSI_BFA_FC is not set
561# CONFIG_SCSI_DH is not set 601# CONFIG_SCSI_DH is not set
562# CONFIG_SCSI_OSD_INITIATOR is not set 602# CONFIG_SCSI_OSD_INITIATOR is not set
563# CONFIG_ATA is not set 603# CONFIG_ATA is not set
@@ -568,7 +608,9 @@ CONFIG_MD_RAID0=m
568CONFIG_MD_RAID1=m 608CONFIG_MD_RAID1=m
569CONFIG_MD_RAID10=m 609CONFIG_MD_RAID10=m
570CONFIG_MD_RAID456=m 610CONFIG_MD_RAID456=m
611# CONFIG_MULTICORE_RAID456 is not set
571CONFIG_MD_RAID6_PQ=m 612CONFIG_MD_RAID6_PQ=m
613# CONFIG_ASYNC_RAID6_TEST is not set
572CONFIG_MD_MULTIPATH=m 614CONFIG_MD_MULTIPATH=m
573# CONFIG_MD_FAULTY is not set 615# CONFIG_MD_FAULTY is not set
574CONFIG_BLK_DEV_DM=m 616CONFIG_BLK_DEV_DM=m
@@ -592,7 +634,7 @@ CONFIG_DM_ZERO=m
592# 634#
593 635
594# 636#
595# See the help texts for more information. 637# The newer stack is recommended.
596# 638#
597# CONFIG_FIREWIRE is not set 639# CONFIG_FIREWIRE is not set
598# CONFIG_IEEE1394 is not set 640# CONFIG_IEEE1394 is not set
@@ -664,6 +706,7 @@ CONFIG_NET_PCI=y
664# CONFIG_SUNDANCE is not set 706# CONFIG_SUNDANCE is not set
665# CONFIG_TLAN is not set 707# CONFIG_TLAN is not set
666# CONFIG_KS8842 is not set 708# CONFIG_KS8842 is not set
709# CONFIG_KS8851_MLL is not set
667# CONFIG_VIA_RHINE is not set 710# CONFIG_VIA_RHINE is not set
668# CONFIG_SC92031 is not set 711# CONFIG_SC92031 is not set
669# CONFIG_ATL2 is not set 712# CONFIG_ATL2 is not set
@@ -745,6 +788,7 @@ CONFIG_SLHC=m
745# CONFIG_NETCONSOLE is not set 788# CONFIG_NETCONSOLE is not set
746# CONFIG_NETPOLL is not set 789# CONFIG_NETPOLL is not set
747# CONFIG_NET_POLL_CONTROLLER is not set 790# CONFIG_NET_POLL_CONTROLLER is not set
791# CONFIG_VMXNET3 is not set
748# CONFIG_ISDN is not set 792# CONFIG_ISDN is not set
749# CONFIG_PHONE is not set 793# CONFIG_PHONE is not set
750 794
@@ -754,6 +798,7 @@ CONFIG_SLHC=m
754CONFIG_INPUT=y 798CONFIG_INPUT=y
755# CONFIG_INPUT_FF_MEMLESS is not set 799# CONFIG_INPUT_FF_MEMLESS is not set
756# CONFIG_INPUT_POLLDEV is not set 800# CONFIG_INPUT_POLLDEV is not set
801# CONFIG_INPUT_SPARSEKMAP is not set
757 802
758# 803#
759# Userland interfaces 804# Userland interfaces
@@ -770,9 +815,13 @@ CONFIG_INPUT_EVDEV=y
770# Input Device Drivers 815# Input Device Drivers
771# 816#
772CONFIG_INPUT_KEYBOARD=y 817CONFIG_INPUT_KEYBOARD=y
818# CONFIG_KEYBOARD_ADP5588 is not set
773CONFIG_KEYBOARD_ATKBD=y 819CONFIG_KEYBOARD_ATKBD=y
820# CONFIG_QT2160 is not set
774CONFIG_KEYBOARD_LKKBD=m 821CONFIG_KEYBOARD_LKKBD=m
822# CONFIG_KEYBOARD_MAX7359 is not set
775# CONFIG_KEYBOARD_NEWTON is not set 823# CONFIG_KEYBOARD_NEWTON is not set
824# CONFIG_KEYBOARD_OPENCORES is not set
776# CONFIG_KEYBOARD_STOWAWAY is not set 825# CONFIG_KEYBOARD_STOWAWAY is not set
777CONFIG_KEYBOARD_SUNKBD=y 826CONFIG_KEYBOARD_SUNKBD=y
778# CONFIG_KEYBOARD_XTKBD is not set 827# CONFIG_KEYBOARD_XTKBD is not set
@@ -812,6 +861,7 @@ CONFIG_SERIO_I8042=y
812CONFIG_SERIO_PCIPS2=m 861CONFIG_SERIO_PCIPS2=m
813CONFIG_SERIO_LIBPS2=y 862CONFIG_SERIO_LIBPS2=y
814CONFIG_SERIO_RAW=m 863CONFIG_SERIO_RAW=m
864# CONFIG_SERIO_ALTERA_PS2 is not set
815# CONFIG_GAMEPORT is not set 865# CONFIG_GAMEPORT is not set
816 866
817# 867#
@@ -844,6 +894,7 @@ CONFIG_SERIAL_SUNHV=y
844CONFIG_SERIAL_CORE=y 894CONFIG_SERIAL_CORE=y
845CONFIG_SERIAL_CORE_CONSOLE=y 895CONFIG_SERIAL_CORE_CONSOLE=y
846# CONFIG_SERIAL_JSM is not set 896# CONFIG_SERIAL_JSM is not set
897# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
847CONFIG_UNIX98_PTYS=y 898CONFIG_UNIX98_PTYS=y
848# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 899# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
849# CONFIG_LEGACY_PTYS is not set 900# CONFIG_LEGACY_PTYS is not set
@@ -858,6 +909,7 @@ CONFIG_HW_RANDOM_N2RNG=m
858CONFIG_DEVPORT=y 909CONFIG_DEVPORT=y
859CONFIG_I2C=y 910CONFIG_I2C=y
860CONFIG_I2C_BOARDINFO=y 911CONFIG_I2C_BOARDINFO=y
912CONFIG_I2C_COMPAT=y
861# CONFIG_I2C_CHARDEV is not set 913# CONFIG_I2C_CHARDEV is not set
862CONFIG_I2C_HELPER_AUTO=y 914CONFIG_I2C_HELPER_AUTO=y
863CONFIG_I2C_ALGOBIT=y 915CONFIG_I2C_ALGOBIT=y
@@ -898,11 +950,6 @@ CONFIG_I2C_ALGOBIT=y
898# CONFIG_I2C_TINY_USB is not set 950# CONFIG_I2C_TINY_USB is not set
899 951
900# 952#
901# Graphics adapter I2C/DDC channel drivers
902#
903# CONFIG_I2C_VOODOO3 is not set
904
905#
906# Other I2C/SMBus bus drivers 953# Other I2C/SMBus bus drivers
907# 954#
908# CONFIG_I2C_PCA_PLATFORM is not set 955# CONFIG_I2C_PCA_PLATFORM is not set
@@ -911,10 +958,6 @@ CONFIG_I2C_ALGOBIT=y
911# 958#
912# Miscellaneous I2C Chip support 959# Miscellaneous I2C Chip support
913# 960#
914# CONFIG_DS1682 is not set
915# CONFIG_SENSORS_PCF8574 is not set
916# CONFIG_PCF8575 is not set
917# CONFIG_SENSORS_PCA9539 is not set
918# CONFIG_SENSORS_TSL2550 is not set 961# CONFIG_SENSORS_TSL2550 is not set
919# CONFIG_I2C_DEBUG_CORE is not set 962# CONFIG_I2C_DEBUG_CORE is not set
920# CONFIG_I2C_DEBUG_ALGO is not set 963# CONFIG_I2C_DEBUG_ALGO is not set
@@ -932,6 +975,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
932# CONFIG_POWER_SUPPLY is not set 975# CONFIG_POWER_SUPPLY is not set
933CONFIG_HWMON=y 976CONFIG_HWMON=y
934# CONFIG_HWMON_VID is not set 977# CONFIG_HWMON_VID is not set
978# CONFIG_HWMON_DEBUG_CHIP is not set
979
980#
981# Native drivers
982#
935# CONFIG_SENSORS_AD7414 is not set 983# CONFIG_SENSORS_AD7414 is not set
936# CONFIG_SENSORS_AD7418 is not set 984# CONFIG_SENSORS_AD7418 is not set
937# CONFIG_SENSORS_ADM1021 is not set 985# CONFIG_SENSORS_ADM1021 is not set
@@ -955,6 +1003,7 @@ CONFIG_HWMON=y
955# CONFIG_SENSORS_GL520SM is not set 1003# CONFIG_SENSORS_GL520SM is not set
956# CONFIG_SENSORS_IT87 is not set 1004# CONFIG_SENSORS_IT87 is not set
957# CONFIG_SENSORS_LM63 is not set 1005# CONFIG_SENSORS_LM63 is not set
1006# CONFIG_SENSORS_LM73 is not set
958# CONFIG_SENSORS_LM75 is not set 1007# CONFIG_SENSORS_LM75 is not set
959# CONFIG_SENSORS_LM77 is not set 1008# CONFIG_SENSORS_LM77 is not set
960# CONFIG_SENSORS_LM78 is not set 1009# CONFIG_SENSORS_LM78 is not set
@@ -981,6 +1030,7 @@ CONFIG_HWMON=y
981# CONFIG_SENSORS_ADS7828 is not set 1030# CONFIG_SENSORS_ADS7828 is not set
982# CONFIG_SENSORS_THMC50 is not set 1031# CONFIG_SENSORS_THMC50 is not set
983# CONFIG_SENSORS_TMP401 is not set 1032# CONFIG_SENSORS_TMP401 is not set
1033# CONFIG_SENSORS_TMP421 is not set
984# CONFIG_SENSORS_VIA686A is not set 1034# CONFIG_SENSORS_VIA686A is not set
985# CONFIG_SENSORS_VT1211 is not set 1035# CONFIG_SENSORS_VT1211 is not set
986# CONFIG_SENSORS_VT8231 is not set 1036# CONFIG_SENSORS_VT8231 is not set
@@ -993,9 +1043,8 @@ CONFIG_HWMON=y
993# CONFIG_SENSORS_W83627HF is not set 1043# CONFIG_SENSORS_W83627HF is not set
994# CONFIG_SENSORS_W83627EHF is not set 1044# CONFIG_SENSORS_W83627EHF is not set
995# CONFIG_SENSORS_ULTRA45 is not set 1045# CONFIG_SENSORS_ULTRA45 is not set
996# CONFIG_HWMON_DEBUG_CHIP is not set 1046# CONFIG_SENSORS_LIS3_I2C is not set
997# CONFIG_THERMAL is not set 1047# CONFIG_THERMAL is not set
998# CONFIG_THERMAL_HWMON is not set
999# CONFIG_WATCHDOG is not set 1048# CONFIG_WATCHDOG is not set
1000CONFIG_SSB_POSSIBLE=y 1049CONFIG_SSB_POSSIBLE=y
1001 1050
@@ -1013,16 +1062,20 @@ CONFIG_SSB_POSSIBLE=y
1013# CONFIG_TWL4030_CORE is not set 1062# CONFIG_TWL4030_CORE is not set
1014# CONFIG_MFD_TMIO is not set 1063# CONFIG_MFD_TMIO is not set
1015# CONFIG_PMIC_DA903X is not set 1064# CONFIG_PMIC_DA903X is not set
1065# CONFIG_PMIC_ADP5520 is not set
1016# CONFIG_MFD_WM8400 is not set 1066# CONFIG_MFD_WM8400 is not set
1067# CONFIG_MFD_WM831X is not set
1017# CONFIG_MFD_WM8350_I2C is not set 1068# CONFIG_MFD_WM8350_I2C is not set
1018# CONFIG_MFD_PCF50633 is not set 1069# CONFIG_MFD_PCF50633 is not set
1019# CONFIG_AB3100_CORE is not set 1070# CONFIG_AB3100_CORE is not set
1071# CONFIG_MFD_88PM8607 is not set
1020# CONFIG_REGULATOR is not set 1072# CONFIG_REGULATOR is not set
1021# CONFIG_MEDIA_SUPPORT is not set 1073# CONFIG_MEDIA_SUPPORT is not set
1022 1074
1023# 1075#
1024# Graphics support 1076# Graphics support
1025# 1077#
1078CONFIG_VGA_ARB=y
1026# CONFIG_DRM is not set 1079# CONFIG_DRM is not set
1027# CONFIG_VGASTATE is not set 1080# CONFIG_VGASTATE is not set
1028# CONFIG_VIDEO_OUTPUT_CONTROL is not set 1081# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1176,6 +1229,7 @@ CONFIG_SND_ALI5451=m
1176# CONFIG_SND_OXYGEN is not set 1229# CONFIG_SND_OXYGEN is not set
1177# CONFIG_SND_CS4281 is not set 1230# CONFIG_SND_CS4281 is not set
1178# CONFIG_SND_CS46XX is not set 1231# CONFIG_SND_CS46XX is not set
1232# CONFIG_SND_CS5535AUDIO is not set
1179# CONFIG_SND_CTXFI is not set 1233# CONFIG_SND_CTXFI is not set
1180# CONFIG_SND_DARLA20 is not set 1234# CONFIG_SND_DARLA20 is not set
1181# CONFIG_SND_GINA20 is not set 1235# CONFIG_SND_GINA20 is not set
@@ -1311,6 +1365,7 @@ CONFIG_USB_EHCI_HCD=m
1311# CONFIG_USB_OXU210HP_HCD is not set 1365# CONFIG_USB_OXU210HP_HCD is not set
1312# CONFIG_USB_ISP116X_HCD is not set 1366# CONFIG_USB_ISP116X_HCD is not set
1313# CONFIG_USB_ISP1760_HCD is not set 1367# CONFIG_USB_ISP1760_HCD is not set
1368# CONFIG_USB_ISP1362_HCD is not set
1314CONFIG_USB_OHCI_HCD=y 1369CONFIG_USB_OHCI_HCD=y
1315# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 1370# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
1316# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set 1371# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1426,6 +1481,7 @@ CONFIG_RTC_INTF_DEV=y
1426# CONFIG_RTC_DRV_PCF8563 is not set 1481# CONFIG_RTC_DRV_PCF8563 is not set
1427# CONFIG_RTC_DRV_PCF8583 is not set 1482# CONFIG_RTC_DRV_PCF8583 is not set
1428# CONFIG_RTC_DRV_M41T80 is not set 1483# CONFIG_RTC_DRV_M41T80 is not set
1484# CONFIG_RTC_DRV_BQ32K is not set
1429# CONFIG_RTC_DRV_S35390A is not set 1485# CONFIG_RTC_DRV_S35390A is not set
1430# CONFIG_RTC_DRV_FM3130 is not set 1486# CONFIG_RTC_DRV_FM3130 is not set
1431# CONFIG_RTC_DRV_RX8581 is not set 1487# CONFIG_RTC_DRV_RX8581 is not set
@@ -1447,7 +1503,9 @@ CONFIG_RTC_DRV_CMOS=y
1447# CONFIG_RTC_DRV_M48T86 is not set 1503# CONFIG_RTC_DRV_M48T86 is not set
1448# CONFIG_RTC_DRV_M48T35 is not set 1504# CONFIG_RTC_DRV_M48T35 is not set
1449CONFIG_RTC_DRV_M48T59=y 1505CONFIG_RTC_DRV_M48T59=y
1506# CONFIG_RTC_DRV_MSM6242 is not set
1450CONFIG_RTC_DRV_BQ4802=y 1507CONFIG_RTC_DRV_BQ4802=y
1508# CONFIG_RTC_DRV_RP5C01 is not set
1451# CONFIG_RTC_DRV_V3020 is not set 1509# CONFIG_RTC_DRV_V3020 is not set
1452 1510
1453# 1511#
@@ -1625,6 +1683,7 @@ CONFIG_PRINTK_TIME=y
1625CONFIG_ENABLE_MUST_CHECK=y 1683CONFIG_ENABLE_MUST_CHECK=y
1626CONFIG_FRAME_WARN=2048 1684CONFIG_FRAME_WARN=2048
1627CONFIG_MAGIC_SYSRQ=y 1685CONFIG_MAGIC_SYSRQ=y
1686# CONFIG_STRIP_ASM_SYMS is not set
1628# CONFIG_UNUSED_SYMBOLS is not set 1687# CONFIG_UNUSED_SYMBOLS is not set
1629CONFIG_DEBUG_FS=y 1688CONFIG_DEBUG_FS=y
1630# CONFIG_HEADERS_CHECK is not set 1689# CONFIG_HEADERS_CHECK is not set
@@ -1678,9 +1737,11 @@ CONFIG_NOP_TRACER=y
1678CONFIG_HAVE_FUNCTION_TRACER=y 1737CONFIG_HAVE_FUNCTION_TRACER=y
1679CONFIG_HAVE_DYNAMIC_FTRACE=y 1738CONFIG_HAVE_DYNAMIC_FTRACE=y
1680CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y 1739CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
1740CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
1681CONFIG_RING_BUFFER=y 1741CONFIG_RING_BUFFER=y
1682CONFIG_EVENT_TRACING=y 1742CONFIG_EVENT_TRACING=y
1683CONFIG_CONTEXT_SWITCH_TRACER=y 1743CONFIG_CONTEXT_SWITCH_TRACER=y
1744CONFIG_RING_BUFFER_ALLOW_SWAP=y
1684CONFIG_TRACING=y 1745CONFIG_TRACING=y
1685CONFIG_GENERIC_TRACER=y 1746CONFIG_GENERIC_TRACER=y
1686CONFIG_TRACING_SUPPORT=y 1747CONFIG_TRACING_SUPPORT=y
@@ -1688,6 +1749,7 @@ CONFIG_FTRACE=y
1688# CONFIG_FUNCTION_TRACER is not set 1749# CONFIG_FUNCTION_TRACER is not set
1689# CONFIG_IRQSOFF_TRACER is not set 1750# CONFIG_IRQSOFF_TRACER is not set
1690# CONFIG_SCHED_TRACER is not set 1751# CONFIG_SCHED_TRACER is not set
1752# CONFIG_FTRACE_SYSCALLS is not set
1691# CONFIG_BOOT_TRACER is not set 1753# CONFIG_BOOT_TRACER is not set
1692CONFIG_BRANCH_PROFILE_NONE=y 1754CONFIG_BRANCH_PROFILE_NONE=y
1693# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set 1755# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
@@ -1706,6 +1768,7 @@ CONFIG_HAVE_ARCH_KGDB=y
1706# CONFIG_DEBUG_STACK_USAGE is not set 1768# CONFIG_DEBUG_STACK_USAGE is not set
1707# CONFIG_DEBUG_DCFLUSH is not set 1769# CONFIG_DEBUG_DCFLUSH is not set
1708# CONFIG_STACK_DEBUG is not set 1770# CONFIG_STACK_DEBUG is not set
1771# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
1709 1772
1710# 1773#
1711# Security options 1774# Security options
@@ -1714,11 +1777,17 @@ CONFIG_KEYS=y
1714# CONFIG_KEYS_DEBUG_PROC_KEYS is not set 1777# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
1715# CONFIG_SECURITY is not set 1778# CONFIG_SECURITY is not set
1716# CONFIG_SECURITYFS is not set 1779# CONFIG_SECURITYFS is not set
1717# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1780# CONFIG_DEFAULT_SECURITY_SELINUX is not set
1781# CONFIG_DEFAULT_SECURITY_SMACK is not set
1782# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
1783CONFIG_DEFAULT_SECURITY_DAC=y
1784CONFIG_DEFAULT_SECURITY=""
1718CONFIG_XOR_BLOCKS=m 1785CONFIG_XOR_BLOCKS=m
1719CONFIG_ASYNC_CORE=m 1786CONFIG_ASYNC_CORE=m
1720CONFIG_ASYNC_MEMCPY=m 1787CONFIG_ASYNC_MEMCPY=m
1721CONFIG_ASYNC_XOR=m 1788CONFIG_ASYNC_XOR=m
1789CONFIG_ASYNC_PQ=m
1790CONFIG_ASYNC_RAID6_RECOV=m
1722CONFIG_CRYPTO=y 1791CONFIG_CRYPTO=y
1723 1792
1724# 1793#
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index 93fe21e02c86..679c7504625a 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -8,7 +8,7 @@
8#include <asm/page.h> /* IO address mapping routines need this */ 8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h> 9#include <asm/system.h>
10 10
11#define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT) 11#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
12 12
13static inline u32 flip_dword (u32 l) 13static inline u32 flip_dword (u32 l)
14{ 14{
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index f72080bdda94..156707b0f18d 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -143,7 +143,7 @@ extern unsigned long pfn_base;
143#define phys_to_virt __va 143#define phys_to_virt __va
144 144
145#define ARCH_PFN_OFFSET (pfn_base) 145#define ARCH_PFN_OFFSET (pfn_base)
146#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT))) 146#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
147 147
148#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) 148#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
149#define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr) 149#define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
diff --git a/arch/sparc/include/asm/param.h b/arch/sparc/include/asm/param.h
index 9836d9a3cb9a..0bc356bf8c50 100644
--- a/arch/sparc/include/asm/param.h
+++ b/arch/sparc/include/asm/param.h
@@ -1,22 +1,7 @@
1#ifndef _ASMSPARC_PARAM_H 1#ifndef _ASMSPARC_PARAM_H
2#define _ASMSPARC_PARAM_H 2#define _ASMSPARC_PARAM_H
3 3
4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
6# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
7# define CLOCKS_PER_SEC (USER_HZ)
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 8192 /* Thanks for sun4's we carry baggage... */ 4#define EXEC_PAGESIZE 8192 /* Thanks for sun4's we carry baggage... */
5#include <asm-generic/param.h>
15 6
16#ifndef NOGROUP 7#endif /* _ASMSPARC_PARAM_H */
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif
diff --git a/arch/sparc/include/asm/timex_32.h b/arch/sparc/include/asm/timex_32.h
index b6ccdb0d6f7d..a254750e4c03 100644
--- a/arch/sparc/include/asm/timex_32.h
+++ b/arch/sparc/include/asm/timex_32.h
@@ -12,4 +12,5 @@
12typedef unsigned long cycles_t; 12typedef unsigned long cycles_t;
13#define get_cycles() (0) 13#define get_cycles() (0)
14 14
15extern u32 (*do_arch_gettimeoffset)(void);
15#endif 16#endif
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index 600a79035fa1..1c79f32734a0 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -12,7 +12,9 @@ static inline int cpu_to_node(int cpu)
12 12
13#define parent_node(node) (node) 13#define parent_node(node) (node)
14 14
15#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) 15#define cpumask_of_node(node) ((node) == -1 ? \
16 cpu_all_mask : \
17 &numa_cpumask_lookup_table[node])
16 18
17struct pci_bus; 19struct pci_bus;
18#ifdef CONFIG_PCI 20#ifdef CONFIG_PCI
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 489d2ba92bcb..25f1d10155e8 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -274,7 +274,7 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
274 274
275 if (unlikely(sz != -1 && sz < n)) { 275 if (unlikely(sz != -1 && sz < n)) {
276 copy_from_user_overflow(); 276 copy_from_user_overflow();
277 return -EFAULT; 277 return n;
278 } 278 }
279 279
280 if (n && __access_ok((unsigned long) from, n)) 280 if (n && __access_ok((unsigned long) from, n))
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index dbc141660994..2406788bfe5f 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -221,8 +221,8 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
221static inline unsigned long __must_check 221static inline unsigned long __must_check
222copy_from_user(void *to, const void __user *from, unsigned long size) 222copy_from_user(void *to, const void __user *from, unsigned long size)
223{ 223{
224 unsigned long ret = (unsigned long) -EFAULT;
225 int sz = __compiletime_object_size(to); 224 int sz = __compiletime_object_size(to);
225 unsigned long ret = size;
226 226
227 if (likely(sz == -1 || sz >= size)) { 227 if (likely(sz == -1 || sz >= size)) {
228 ret = ___copy_from_user(to, from, size); 228 ret = ___copy_from_user(to, from, size);
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index f3b5466c389c..4589ca33220f 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -99,7 +99,7 @@ static int __devinit clock_board_probe(struct of_device *op,
99 99
100 p->leds_resource.start = (unsigned long) 100 p->leds_resource.start = (unsigned long)
101 (p->clock_regs + CLOCK_CTRL); 101 (p->clock_regs + CLOCK_CTRL);
102 p->leds_resource.end = p->leds_resource.end; 102 p->leds_resource.end = p->leds_resource.start;
103 p->leds_resource.name = "leds"; 103 p->leds_resource.name = "leds";
104 104
105 p->leds_pdev.name = "sunfire-clockboard-leds"; 105 p->leds_pdev.name = "sunfire-clockboard-leds";
@@ -194,7 +194,7 @@ static int __devinit fhc_probe(struct of_device *op,
194 if (!p->central) { 194 if (!p->central) {
195 p->leds_resource.start = (unsigned long) 195 p->leds_resource.start = (unsigned long)
196 (p->pregs + FHC_PREGS_CTRL); 196 (p->pregs + FHC_PREGS_CTRL);
197 p->leds_resource.end = p->leds_resource.end; 197 p->leds_resource.end = p->leds_resource.start;
198 p->leds_resource.name = "leds"; 198 p->leds_resource.name = "leds";
199 199
200 p->leds_pdev.name = "sunfire-fhc-leds"; 200 p->leds_pdev.name = "sunfire-fhc-leds";
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 8d6882bb480a..f2179cce1e4d 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -250,12 +250,12 @@ struct irq_handler_data {
250}; 250};
251 251
252#ifdef CONFIG_SMP 252#ifdef CONFIG_SMP
253static int irq_choose_cpu(unsigned int virt_irq) 253static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
254{ 254{
255 cpumask_t mask; 255 cpumask_t mask;
256 int cpuid; 256 int cpuid;
257 257
258 cpumask_copy(&mask, irq_desc[virt_irq].affinity); 258 cpumask_copy(&mask, affinity);
259 if (cpus_equal(mask, cpu_online_map)) { 259 if (cpus_equal(mask, cpu_online_map)) {
260 cpuid = map_to_cpu(virt_irq); 260 cpuid = map_to_cpu(virt_irq);
261 } else { 261 } else {
@@ -268,7 +268,7 @@ static int irq_choose_cpu(unsigned int virt_irq)
268 return cpuid; 268 return cpuid;
269} 269}
270#else 270#else
271static int irq_choose_cpu(unsigned int virt_irq) 271static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
272{ 272{
273 return real_hard_smp_processor_id(); 273 return real_hard_smp_processor_id();
274} 274}
@@ -282,7 +282,8 @@ static void sun4u_irq_enable(unsigned int virt_irq)
282 unsigned long cpuid, imap, val; 282 unsigned long cpuid, imap, val;
283 unsigned int tid; 283 unsigned int tid;
284 284
285 cpuid = irq_choose_cpu(virt_irq); 285 cpuid = irq_choose_cpu(virt_irq,
286 irq_desc[virt_irq].affinity);
286 imap = data->imap; 287 imap = data->imap;
287 288
288 tid = sun4u_compute_tid(imap, cpuid); 289 tid = sun4u_compute_tid(imap, cpuid);
@@ -299,7 +300,24 @@ static void sun4u_irq_enable(unsigned int virt_irq)
299static int sun4u_set_affinity(unsigned int virt_irq, 300static int sun4u_set_affinity(unsigned int virt_irq,
300 const struct cpumask *mask) 301 const struct cpumask *mask)
301{ 302{
302 sun4u_irq_enable(virt_irq); 303 struct irq_handler_data *data = get_irq_chip_data(virt_irq);
304
305 if (likely(data)) {
306 unsigned long cpuid, imap, val;
307 unsigned int tid;
308
309 cpuid = irq_choose_cpu(virt_irq, mask);
310 imap = data->imap;
311
312 tid = sun4u_compute_tid(imap, cpuid);
313
314 val = upa_readq(imap);
315 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
316 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
317 val |= tid | IMAP_VALID;
318 upa_writeq(val, imap);
319 upa_writeq(ICLR_IDLE, data->iclr);
320 }
303 321
304 return 0; 322 return 0;
305} 323}
@@ -340,7 +358,8 @@ static void sun4u_irq_eoi(unsigned int virt_irq)
340static void sun4v_irq_enable(unsigned int virt_irq) 358static void sun4v_irq_enable(unsigned int virt_irq)
341{ 359{
342 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 360 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
343 unsigned long cpuid = irq_choose_cpu(virt_irq); 361 unsigned long cpuid = irq_choose_cpu(virt_irq,
362 irq_desc[virt_irq].affinity);
344 int err; 363 int err;
345 364
346 err = sun4v_intr_settarget(ino, cpuid); 365 err = sun4v_intr_settarget(ino, cpuid);
@@ -361,7 +380,7 @@ static int sun4v_set_affinity(unsigned int virt_irq,
361 const struct cpumask *mask) 380 const struct cpumask *mask)
362{ 381{
363 unsigned int ino = virt_irq_table[virt_irq].dev_ino; 382 unsigned int ino = virt_irq_table[virt_irq].dev_ino;
364 unsigned long cpuid = irq_choose_cpu(virt_irq); 383 unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
365 int err; 384 int err;
366 385
367 err = sun4v_intr_settarget(ino, cpuid); 386 err = sun4v_intr_settarget(ino, cpuid);
@@ -403,7 +422,7 @@ static void sun4v_virq_enable(unsigned int virt_irq)
403 unsigned long cpuid, dev_handle, dev_ino; 422 unsigned long cpuid, dev_handle, dev_ino;
404 int err; 423 int err;
405 424
406 cpuid = irq_choose_cpu(virt_irq); 425 cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
407 426
408 dev_handle = virt_irq_table[virt_irq].dev_handle; 427 dev_handle = virt_irq_table[virt_irq].dev_handle;
409 dev_ino = virt_irq_table[virt_irq].dev_ino; 428 dev_ino = virt_irq_table[virt_irq].dev_ino;
@@ -433,7 +452,7 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq,
433 unsigned long cpuid, dev_handle, dev_ino; 452 unsigned long cpuid, dev_handle, dev_ino;
434 int err; 453 int err;
435 454
436 cpuid = irq_choose_cpu(virt_irq); 455 cpuid = irq_choose_cpu(virt_irq, mask);
437 456
438 dev_handle = virt_irq_table[virt_irq].dev_handle; 457 dev_handle = virt_irq_table[virt_irq].dev_handle;
439 dev_ino = virt_irq_table[virt_irq].dev_ino; 458 dev_ino = virt_irq_table[virt_irq].dev_ino;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 85e7037429b9..4e2724ec2bb6 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -30,6 +30,7 @@
30#include <asm/oplib.h> 30#include <asm/oplib.h>
31#include <asm/prom.h> 31#include <asm/prom.h>
32#include <asm/pcic.h> 32#include <asm/pcic.h>
33#include <asm/timex.h>
33#include <asm/timer.h> 34#include <asm/timer.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/irq_regs.h> 36#include <asm/irq_regs.h>
@@ -163,8 +164,6 @@ void __iomem *pcic_regs;
163volatile int pcic_speculative; 164volatile int pcic_speculative;
164volatile int pcic_trapped; 165volatile int pcic_trapped;
165 166
166static void pci_do_gettimeofday(struct timeval *tv);
167static int pci_do_settimeofday(struct timespec *tv);
168 167
169#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) 168#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3))
170 169
@@ -716,19 +715,27 @@ static irqreturn_t pcic_timer_handler (int irq, void *h)
716#define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */ 715#define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */
717#define TICK_TIMER_LIMIT ((100*1000000/4)/100) 716#define TICK_TIMER_LIMIT ((100*1000000/4)/100)
718 717
718u32 pci_gettimeoffset(void)
719{
720 /*
721 * We divide all by 100
722 * to have microsecond resolution and to avoid overflow
723 */
724 unsigned long count =
725 readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW;
726 count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100);
727 return count * 1000;
728}
729
730
719void __init pci_time_init(void) 731void __init pci_time_init(void)
720{ 732{
721 struct linux_pcic *pcic = &pcic0; 733 struct linux_pcic *pcic = &pcic0;
722 unsigned long v; 734 unsigned long v;
723 int timer_irq, irq; 735 int timer_irq, irq;
724 736
725 /* A hack until do_gettimeofday prototype is moved to arch specific headers 737 do_arch_gettimeoffset = pci_gettimeoffset;
726 and btfixupped. Patch do_gettimeofday with ba pci_do_gettimeofday; nop */ 738
727 ((unsigned int *)do_gettimeofday)[0] =
728 0x10800000 | ((((unsigned long)pci_do_gettimeofday -
729 (unsigned long)do_gettimeofday) >> 2) & 0x003fffff);
730 ((unsigned int *)do_gettimeofday)[1] = 0x01000000;
731 BTFIXUPSET_CALL(bus_do_settimeofday, pci_do_settimeofday, BTFIXUPCALL_NORM);
732 btfixup(); 739 btfixup();
733 740
734 writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); 741 writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT);
@@ -746,84 +753,6 @@ void __init pci_time_init(void)
746 local_irq_enable(); 753 local_irq_enable();
747} 754}
748 755
749static inline unsigned long do_gettimeoffset(void)
750{
751 /*
752 * We divide all by 100
753 * to have microsecond resolution and to avoid overflow
754 */
755 unsigned long count =
756 readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW;
757 count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100);
758 return count;
759}
760
761static void pci_do_gettimeofday(struct timeval *tv)
762{
763 unsigned long flags;
764 unsigned long seq;
765 unsigned long usec, sec;
766 unsigned long max_ntp_tick = tick_usec - tickadj;
767
768 do {
769 seq = read_seqbegin_irqsave(&xtime_lock, flags);
770 usec = do_gettimeoffset();
771
772 /*
773 * If time_adjust is negative then NTP is slowing the clock
774 * so make sure not to go into next possible interval.
775 * Better to lose some accuracy than have time go backwards..
776 */
777 if (unlikely(time_adjust < 0))
778 usec = min(usec, max_ntp_tick);
779
780 sec = xtime.tv_sec;
781 usec += (xtime.tv_nsec / 1000);
782 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
783
784 while (usec >= 1000000) {
785 usec -= 1000000;
786 sec++;
787 }
788
789 tv->tv_sec = sec;
790 tv->tv_usec = usec;
791}
792
793static int pci_do_settimeofday(struct timespec *tv)
794{
795 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
796 return -EINVAL;
797
798 /*
799 * This is revolting. We need to set "xtime" correctly. However, the
800 * value in this location is the value at the most recent update of
801 * wall time. Discover what correction gettimeofday() would have
802 * made, and then undo it!
803 */
804 tv->tv_nsec -= 1000 * do_gettimeoffset();
805 while (tv->tv_nsec < 0) {
806 tv->tv_nsec += NSEC_PER_SEC;
807 tv->tv_sec--;
808 }
809
810 wall_to_monotonic.tv_sec += xtime.tv_sec - tv->tv_sec;
811 wall_to_monotonic.tv_nsec += xtime.tv_nsec - tv->tv_nsec;
812
813 if (wall_to_monotonic.tv_nsec > NSEC_PER_SEC) {
814 wall_to_monotonic.tv_nsec -= NSEC_PER_SEC;
815 wall_to_monotonic.tv_sec++;
816 }
817 if (wall_to_monotonic.tv_nsec < 0) {
818 wall_to_monotonic.tv_nsec += NSEC_PER_SEC;
819 wall_to_monotonic.tv_sec--;
820 }
821
822 xtime.tv_sec = tv->tv_sec;
823 xtime.tv_nsec = tv->tv_nsec;
824 ntp_clear();
825 return 0;
826}
827 756
828#if 0 757#if 0
829static void watchdog_reset() { 758static void watchdog_reset() {
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 198fb4e79ba2..e856456ec02f 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1,6 +1,6 @@
1/* Performance event support for sparc64. 1/* Performance event support for sparc64.
2 * 2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
4 * 4 *
5 * This code is based almost entirely upon the x86 perf event 5 * This code is based almost entirely upon the x86 perf event
6 * code, which is: 6 * code, which is:
@@ -18,11 +18,15 @@
18#include <linux/kdebug.h> 18#include <linux/kdebug.h>
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20 20
21#include <asm/stacktrace.h>
21#include <asm/cpudata.h> 22#include <asm/cpudata.h>
23#include <asm/uaccess.h>
22#include <asm/atomic.h> 24#include <asm/atomic.h>
23#include <asm/nmi.h> 25#include <asm/nmi.h>
24#include <asm/pcr.h> 26#include <asm/pcr.h>
25 27
28#include "kstack.h"
29
26/* Sparc64 chips have two performance counters, 32-bits each, with 30/* Sparc64 chips have two performance counters, 32-bits each, with
27 * overflow interrupts generated on transition from 0xffffffff to 0. 31 * overflow interrupts generated on transition from 0xffffffff to 0.
28 * The counters are accessed in one go using a 64-bit register. 32 * The counters are accessed in one go using a 64-bit register.
@@ -51,16 +55,49 @@
51 55
52#define PIC_UPPER_INDEX 0 56#define PIC_UPPER_INDEX 0
53#define PIC_LOWER_INDEX 1 57#define PIC_LOWER_INDEX 1
58#define PIC_NO_INDEX -1
54 59
55struct cpu_hw_events { 60struct cpu_hw_events {
56 struct perf_event *events[MAX_HWEVENTS]; 61 /* Number of events currently scheduled onto this cpu.
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 62 * This tells how many entries in the arrays below
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 63 * are valid.
64 */
65 int n_events;
66
67 /* Number of new events added since the last hw_perf_disable().
68 * This works because the perf event layer always adds new
69 * events inside of a perf_{disable,enable}() sequence.
70 */
71 int n_added;
72
73 /* Array of events current scheduled on this cpu. */
74 struct perf_event *event[MAX_HWEVENTS];
75
76 /* Array of encoded longs, specifying the %pcr register
77 * encoding and the mask of PIC counters this even can
78 * be scheduled on. See perf_event_encode() et al.
79 */
80 unsigned long events[MAX_HWEVENTS];
81
82 /* The current counter index assigned to an event. When the
83 * event hasn't been programmed into the cpu yet, this will
84 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
85 * we ought to schedule the event.
86 */
87 int current_idx[MAX_HWEVENTS];
88
89 /* Software copy of %pcr register on this cpu. */
59 u64 pcr; 90 u64 pcr;
91
92 /* Enabled/disable state. */
60 int enabled; 93 int enabled;
61}; 94};
62DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 95DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
63 96
97/* An event map describes the characteristics of a performance
98 * counter event. In particular it gives the encoding as well as
99 * a mask telling which counters the event can be measured on.
100 */
64struct perf_event_map { 101struct perf_event_map {
65 u16 encoding; 102 u16 encoding;
66 u8 pic_mask; 103 u8 pic_mask;
@@ -69,15 +106,20 @@ struct perf_event_map {
69#define PIC_LOWER 0x02 106#define PIC_LOWER 0x02
70}; 107};
71 108
109/* Encode a perf_event_map entry into a long. */
72static unsigned long perf_event_encode(const struct perf_event_map *pmap) 110static unsigned long perf_event_encode(const struct perf_event_map *pmap)
73{ 111{
74 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; 112 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
75} 113}
76 114
77static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) 115static u8 perf_event_get_msk(unsigned long val)
78{ 116{
79 *msk = val & 0xff; 117 return val & 0xff;
80 *enc = val >> 16; 118}
119
120static u64 perf_event_get_enc(unsigned long val)
121{
122 return val >> 16;
81} 123}
82 124
83#define C(x) PERF_COUNT_HW_CACHE_##x 125#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -491,53 +533,6 @@ static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw
491 pcr_ops->write(cpuc->pcr); 533 pcr_ops->write(cpuc->pcr);
492} 534}
493 535
494void hw_perf_enable(void)
495{
496 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
497 u64 val;
498 int i;
499
500 if (cpuc->enabled)
501 return;
502
503 cpuc->enabled = 1;
504 barrier();
505
506 val = cpuc->pcr;
507
508 for (i = 0; i < MAX_HWEVENTS; i++) {
509 struct perf_event *cp = cpuc->events[i];
510 struct hw_perf_event *hwc;
511
512 if (!cp)
513 continue;
514 hwc = &cp->hw;
515 val |= hwc->config_base;
516 }
517
518 cpuc->pcr = val;
519
520 pcr_ops->write(cpuc->pcr);
521}
522
523void hw_perf_disable(void)
524{
525 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
526 u64 val;
527
528 if (!cpuc->enabled)
529 return;
530
531 cpuc->enabled = 0;
532
533 val = cpuc->pcr;
534 val &= ~(PCR_UTRACE | PCR_STRACE |
535 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
536 cpuc->pcr = val;
537
538 pcr_ops->write(cpuc->pcr);
539}
540
541static u32 read_pmc(int idx) 536static u32 read_pmc(int idx)
542{ 537{
543 u64 val; 538 u64 val;
@@ -566,6 +561,30 @@ static void write_pmc(int idx, u64 val)
566 write_pic(pic); 561 write_pic(pic);
567} 562}
568 563
564static u64 sparc_perf_event_update(struct perf_event *event,
565 struct hw_perf_event *hwc, int idx)
566{
567 int shift = 64 - 32;
568 u64 prev_raw_count, new_raw_count;
569 s64 delta;
570
571again:
572 prev_raw_count = atomic64_read(&hwc->prev_count);
573 new_raw_count = read_pmc(idx);
574
575 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
576 new_raw_count) != prev_raw_count)
577 goto again;
578
579 delta = (new_raw_count << shift) - (prev_raw_count << shift);
580 delta >>= shift;
581
582 atomic64_add(delta, &event->count);
583 atomic64_sub(delta, &hwc->period_left);
584
585 return new_raw_count;
586}
587
569static int sparc_perf_event_set_period(struct perf_event *event, 588static int sparc_perf_event_set_period(struct perf_event *event,
570 struct hw_perf_event *hwc, int idx) 589 struct hw_perf_event *hwc, int idx)
571{ 590{
@@ -598,81 +617,166 @@ static int sparc_perf_event_set_period(struct perf_event *event,
598 return ret; 617 return ret;
599} 618}
600 619
601static int sparc_pmu_enable(struct perf_event *event) 620/* If performance event entries have been added, move existing
621 * events around (if necessary) and then assign new entries to
622 * counters.
623 */
624static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
602{ 625{
603 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 626 int i;
604 struct hw_perf_event *hwc = &event->hw;
605 int idx = hwc->idx;
606 627
607 if (test_and_set_bit(idx, cpuc->used_mask)) 628 if (!cpuc->n_added)
608 return -EAGAIN; 629 goto out;
609 630
610 sparc_pmu_disable_event(cpuc, hwc, idx); 631 /* Read in the counters which are moving. */
632 for (i = 0; i < cpuc->n_events; i++) {
633 struct perf_event *cp = cpuc->event[i];
611 634
612 cpuc->events[idx] = event; 635 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
613 set_bit(idx, cpuc->active_mask); 636 cpuc->current_idx[i] != cp->hw.idx) {
637 sparc_perf_event_update(cp, &cp->hw,
638 cpuc->current_idx[i]);
639 cpuc->current_idx[i] = PIC_NO_INDEX;
640 }
641 }
614 642
615 sparc_perf_event_set_period(event, hwc, idx); 643 /* Assign to counters all unassigned events. */
616 sparc_pmu_enable_event(cpuc, hwc, idx); 644 for (i = 0; i < cpuc->n_events; i++) {
617 perf_event_update_userpage(event); 645 struct perf_event *cp = cpuc->event[i];
618 return 0; 646 struct hw_perf_event *hwc = &cp->hw;
647 int idx = hwc->idx;
648 u64 enc;
649
650 if (cpuc->current_idx[i] != PIC_NO_INDEX)
651 continue;
652
653 sparc_perf_event_set_period(cp, hwc, idx);
654 cpuc->current_idx[i] = idx;
655
656 enc = perf_event_get_enc(cpuc->events[i]);
657 pcr |= event_encoding(enc, idx);
658 }
659out:
660 return pcr;
619} 661}
620 662
621static u64 sparc_perf_event_update(struct perf_event *event, 663void hw_perf_enable(void)
622 struct hw_perf_event *hwc, int idx)
623{ 664{
624 int shift = 64 - 32; 665 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
625 u64 prev_raw_count, new_raw_count; 666 u64 pcr;
626 s64 delta;
627 667
628again: 668 if (cpuc->enabled)
629 prev_raw_count = atomic64_read(&hwc->prev_count); 669 return;
630 new_raw_count = read_pmc(idx);
631 670
632 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 671 cpuc->enabled = 1;
633 new_raw_count) != prev_raw_count) 672 barrier();
634 goto again;
635 673
636 delta = (new_raw_count << shift) - (prev_raw_count << shift); 674 pcr = cpuc->pcr;
637 delta >>= shift; 675 if (!cpuc->n_events) {
676 pcr = 0;
677 } else {
678 pcr = maybe_change_configuration(cpuc, pcr);
638 679
639 atomic64_add(delta, &event->count); 680 /* We require that all of the events have the same
640 atomic64_sub(delta, &hwc->period_left); 681 * configuration, so just fetch the settings from the
682 * first entry.
683 */
684 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
685 }
641 686
642 return new_raw_count; 687 pcr_ops->write(cpuc->pcr);
688}
689
690void hw_perf_disable(void)
691{
692 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
693 u64 val;
694
695 if (!cpuc->enabled)
696 return;
697
698 cpuc->enabled = 0;
699 cpuc->n_added = 0;
700
701 val = cpuc->pcr;
702 val &= ~(PCR_UTRACE | PCR_STRACE |
703 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
704 cpuc->pcr = val;
705
706 pcr_ops->write(cpuc->pcr);
643} 707}
644 708
645static void sparc_pmu_disable(struct perf_event *event) 709static void sparc_pmu_disable(struct perf_event *event)
646{ 710{
647 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 711 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
648 struct hw_perf_event *hwc = &event->hw; 712 struct hw_perf_event *hwc = &event->hw;
649 int idx = hwc->idx; 713 unsigned long flags;
714 int i;
650 715
651 clear_bit(idx, cpuc->active_mask); 716 local_irq_save(flags);
652 sparc_pmu_disable_event(cpuc, hwc, idx); 717 perf_disable();
718
719 for (i = 0; i < cpuc->n_events; i++) {
720 if (event == cpuc->event[i]) {
721 int idx = cpuc->current_idx[i];
722
723 /* Shift remaining entries down into
724 * the existing slot.
725 */
726 while (++i < cpuc->n_events) {
727 cpuc->event[i - 1] = cpuc->event[i];
728 cpuc->events[i - 1] = cpuc->events[i];
729 cpuc->current_idx[i - 1] =
730 cpuc->current_idx[i];
731 }
732
733 /* Absorb the final count and turn off the
734 * event.
735 */
736 sparc_pmu_disable_event(cpuc, hwc, idx);
737 barrier();
738 sparc_perf_event_update(event, hwc, idx);
653 739
654 barrier(); 740 perf_event_update_userpage(event);
655 741
656 sparc_perf_event_update(event, hwc, idx); 742 cpuc->n_events--;
657 cpuc->events[idx] = NULL; 743 break;
658 clear_bit(idx, cpuc->used_mask); 744 }
745 }
659 746
660 perf_event_update_userpage(event); 747 perf_enable();
748 local_irq_restore(flags);
749}
750
751static int active_event_index(struct cpu_hw_events *cpuc,
752 struct perf_event *event)
753{
754 int i;
755
756 for (i = 0; i < cpuc->n_events; i++) {
757 if (cpuc->event[i] == event)
758 break;
759 }
760 BUG_ON(i == cpuc->n_events);
761 return cpuc->current_idx[i];
661} 762}
662 763
663static void sparc_pmu_read(struct perf_event *event) 764static void sparc_pmu_read(struct perf_event *event)
664{ 765{
766 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
767 int idx = active_event_index(cpuc, event);
665 struct hw_perf_event *hwc = &event->hw; 768 struct hw_perf_event *hwc = &event->hw;
666 769
667 sparc_perf_event_update(event, hwc, hwc->idx); 770 sparc_perf_event_update(event, hwc, idx);
668} 771}
669 772
670static void sparc_pmu_unthrottle(struct perf_event *event) 773static void sparc_pmu_unthrottle(struct perf_event *event)
671{ 774{
672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 775 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
776 int idx = active_event_index(cpuc, event);
673 struct hw_perf_event *hwc = &event->hw; 777 struct hw_perf_event *hwc = &event->hw;
674 778
675 sparc_pmu_enable_event(cpuc, hwc, hwc->idx); 779 sparc_pmu_enable_event(cpuc, hwc, idx);
676} 780}
677 781
678static atomic_t active_events = ATOMIC_INIT(0); 782static atomic_t active_events = ATOMIC_INIT(0);
@@ -750,43 +854,75 @@ static void hw_perf_event_destroy(struct perf_event *event)
750/* Make sure all events can be scheduled into the hardware at 854/* Make sure all events can be scheduled into the hardware at
751 * the same time. This is simplified by the fact that we only 855 * the same time. This is simplified by the fact that we only
752 * need to support 2 simultaneous HW events. 856 * need to support 2 simultaneous HW events.
857 *
858 * As a side effect, the evts[]->hw.idx values will be assigned
859 * on success. These are pending indexes. When the events are
860 * actually programmed into the chip, these values will propagate
861 * to the per-cpu cpuc->current_idx[] slots, see the code in
862 * maybe_change_configuration() for details.
753 */ 863 */
754static int sparc_check_constraints(unsigned long *events, int n_ev) 864static int sparc_check_constraints(struct perf_event **evts,
865 unsigned long *events, int n_ev)
755{ 866{
756 if (n_ev <= perf_max_events) { 867 u8 msk0 = 0, msk1 = 0;
757 u8 msk1, msk2; 868 int idx0 = 0;
758 u16 dummy; 869
759 870 /* This case is possible when we are invoked from
760 if (n_ev == 1) 871 * hw_perf_group_sched_in().
761 return 0; 872 */
762 BUG_ON(n_ev != 2); 873 if (!n_ev)
763 perf_event_decode(events[0], &dummy, &msk1); 874 return 0;
764 perf_event_decode(events[1], &dummy, &msk2); 875
765 876 if (n_ev > perf_max_events)
766 /* If both events can go on any counter, OK. */ 877 return -1;
767 if (msk1 == (PIC_UPPER | PIC_LOWER) && 878
768 msk2 == (PIC_UPPER | PIC_LOWER)) 879 msk0 = perf_event_get_msk(events[0]);
769 return 0; 880 if (n_ev == 1) {
770 881 if (msk0 & PIC_LOWER)
771 /* If one event is limited to a specific counter, 882 idx0 = 1;
772 * and the other can go on both, OK. 883 goto success;
773 */
774 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
775 msk2 == (PIC_UPPER | PIC_LOWER))
776 return 0;
777 if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
778 msk1 == (PIC_UPPER | PIC_LOWER))
779 return 0;
780
781 /* If the events are fixed to different counters, OK. */
782 if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
783 (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
784 return 0;
785
786 /* Otherwise, there is a conflict. */
787 } 884 }
885 BUG_ON(n_ev != 2);
886 msk1 = perf_event_get_msk(events[1]);
887
888 /* If both events can go on any counter, OK. */
889 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
890 msk1 == (PIC_UPPER | PIC_LOWER))
891 goto success;
788 892
893 /* If one event is limited to a specific counter,
894 * and the other can go on both, OK.
895 */
896 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
897 msk1 == (PIC_UPPER | PIC_LOWER)) {
898 if (msk0 & PIC_LOWER)
899 idx0 = 1;
900 goto success;
901 }
902
903 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
904 msk0 == (PIC_UPPER | PIC_LOWER)) {
905 if (msk1 & PIC_UPPER)
906 idx0 = 1;
907 goto success;
908 }
909
910 /* If the events are fixed to different counters, OK. */
911 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
912 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
913 if (msk0 & PIC_LOWER)
914 idx0 = 1;
915 goto success;
916 }
917
918 /* Otherwise, there is a conflict. */
789 return -1; 919 return -1;
920
921success:
922 evts[0]->hw.idx = idx0;
923 if (n_ev == 2)
924 evts[1]->hw.idx = idx0 ^ 1;
925 return 0;
790} 926}
791 927
792static int check_excludes(struct perf_event **evts, int n_prev, int n_new) 928static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
@@ -818,7 +954,8 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
818} 954}
819 955
820static int collect_events(struct perf_event *group, int max_count, 956static int collect_events(struct perf_event *group, int max_count,
821 struct perf_event *evts[], unsigned long *events) 957 struct perf_event *evts[], unsigned long *events,
958 int *current_idx)
822{ 959{
823 struct perf_event *event; 960 struct perf_event *event;
824 int n = 0; 961 int n = 0;
@@ -827,7 +964,8 @@ static int collect_events(struct perf_event *group, int max_count,
827 if (n >= max_count) 964 if (n >= max_count)
828 return -1; 965 return -1;
829 evts[n] = group; 966 evts[n] = group;
830 events[n++] = group->hw.event_base; 967 events[n] = group->hw.event_base;
968 current_idx[n++] = PIC_NO_INDEX;
831 } 969 }
832 list_for_each_entry(event, &group->sibling_list, group_entry) { 970 list_for_each_entry(event, &group->sibling_list, group_entry) {
833 if (!is_software_event(event) && 971 if (!is_software_event(event) &&
@@ -835,20 +973,100 @@ static int collect_events(struct perf_event *group, int max_count,
835 if (n >= max_count) 973 if (n >= max_count)
836 return -1; 974 return -1;
837 evts[n] = event; 975 evts[n] = event;
838 events[n++] = event->hw.event_base; 976 events[n] = event->hw.event_base;
977 current_idx[n++] = PIC_NO_INDEX;
839 } 978 }
840 } 979 }
841 return n; 980 return n;
842} 981}
843 982
983static void event_sched_in(struct perf_event *event, int cpu)
984{
985 event->state = PERF_EVENT_STATE_ACTIVE;
986 event->oncpu = cpu;
987 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
988 if (is_software_event(event))
989 event->pmu->enable(event);
990}
991
992int hw_perf_group_sched_in(struct perf_event *group_leader,
993 struct perf_cpu_context *cpuctx,
994 struct perf_event_context *ctx, int cpu)
995{
996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
997 struct perf_event *sub;
998 int n0, n;
999
1000 if (!sparc_pmu)
1001 return 0;
1002
1003 n0 = cpuc->n_events;
1004 n = collect_events(group_leader, perf_max_events - n0,
1005 &cpuc->event[n0], &cpuc->events[n0],
1006 &cpuc->current_idx[n0]);
1007 if (n < 0)
1008 return -EAGAIN;
1009 if (check_excludes(cpuc->event, n0, n))
1010 return -EINVAL;
1011 if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
1012 return -EAGAIN;
1013 cpuc->n_events = n0 + n;
1014 cpuc->n_added += n;
1015
1016 cpuctx->active_oncpu += n;
1017 n = 1;
1018 event_sched_in(group_leader, cpu);
1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1020 if (sub->state != PERF_EVENT_STATE_OFF) {
1021 event_sched_in(sub, cpu);
1022 n++;
1023 }
1024 }
1025 ctx->nr_active += n;
1026
1027 return 1;
1028}
1029
1030static int sparc_pmu_enable(struct perf_event *event)
1031{
1032 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1033 int n0, ret = -EAGAIN;
1034 unsigned long flags;
1035
1036 local_irq_save(flags);
1037 perf_disable();
1038
1039 n0 = cpuc->n_events;
1040 if (n0 >= perf_max_events)
1041 goto out;
1042
1043 cpuc->event[n0] = event;
1044 cpuc->events[n0] = event->hw.event_base;
1045 cpuc->current_idx[n0] = PIC_NO_INDEX;
1046
1047 if (check_excludes(cpuc->event, n0, 1))
1048 goto out;
1049 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1050 goto out;
1051
1052 cpuc->n_events++;
1053 cpuc->n_added++;
1054
1055 ret = 0;
1056out:
1057 perf_enable();
1058 local_irq_restore(flags);
1059 return ret;
1060}
1061
844static int __hw_perf_event_init(struct perf_event *event) 1062static int __hw_perf_event_init(struct perf_event *event)
845{ 1063{
846 struct perf_event_attr *attr = &event->attr; 1064 struct perf_event_attr *attr = &event->attr;
847 struct perf_event *evts[MAX_HWEVENTS]; 1065 struct perf_event *evts[MAX_HWEVENTS];
848 struct hw_perf_event *hwc = &event->hw; 1066 struct hw_perf_event *hwc = &event->hw;
849 unsigned long events[MAX_HWEVENTS]; 1067 unsigned long events[MAX_HWEVENTS];
1068 int current_idx_dmy[MAX_HWEVENTS];
850 const struct perf_event_map *pmap; 1069 const struct perf_event_map *pmap;
851 u64 enc;
852 int n; 1070 int n;
853 1071
854 if (atomic_read(&nmi_active) < 0) 1072 if (atomic_read(&nmi_active) < 0)
@@ -865,10 +1083,7 @@ static int __hw_perf_event_init(struct perf_event *event)
865 } else 1083 } else
866 return -EOPNOTSUPP; 1084 return -EOPNOTSUPP;
867 1085
868 /* We save the enable bits in the config_base. So to 1086 /* We save the enable bits in the config_base. */
869 * turn off sampling just write 'config', and to enable
870 * things write 'config | config_base'.
871 */
872 hwc->config_base = sparc_pmu->irq_bit; 1087 hwc->config_base = sparc_pmu->irq_bit;
873 if (!attr->exclude_user) 1088 if (!attr->exclude_user)
874 hwc->config_base |= PCR_UTRACE; 1089 hwc->config_base |= PCR_UTRACE;
@@ -879,13 +1094,11 @@ static int __hw_perf_event_init(struct perf_event *event)
879 1094
880 hwc->event_base = perf_event_encode(pmap); 1095 hwc->event_base = perf_event_encode(pmap);
881 1096
882 enc = pmap->encoding;
883
884 n = 0; 1097 n = 0;
885 if (event->group_leader != event) { 1098 if (event->group_leader != event) {
886 n = collect_events(event->group_leader, 1099 n = collect_events(event->group_leader,
887 perf_max_events - 1, 1100 perf_max_events - 1,
888 evts, events); 1101 evts, events, current_idx_dmy);
889 if (n < 0) 1102 if (n < 0)
890 return -EINVAL; 1103 return -EINVAL;
891 } 1104 }
@@ -895,9 +1108,11 @@ static int __hw_perf_event_init(struct perf_event *event)
895 if (check_excludes(evts, n, 1)) 1108 if (check_excludes(evts, n, 1))
896 return -EINVAL; 1109 return -EINVAL;
897 1110
898 if (sparc_check_constraints(events, n + 1)) 1111 if (sparc_check_constraints(evts, events, n + 1))
899 return -EINVAL; 1112 return -EINVAL;
900 1113
1114 hwc->idx = PIC_NO_INDEX;
1115
901 /* Try to do all error checking before this point, as unwinding 1116 /* Try to do all error checking before this point, as unwinding
902 * state after grabbing the PMC is difficult. 1117 * state after grabbing the PMC is difficult.
903 */ 1118 */
@@ -910,15 +1125,6 @@ static int __hw_perf_event_init(struct perf_event *event)
910 atomic64_set(&hwc->period_left, hwc->sample_period); 1125 atomic64_set(&hwc->period_left, hwc->sample_period);
911 } 1126 }
912 1127
913 if (pmap->pic_mask & PIC_UPPER) {
914 hwc->idx = PIC_UPPER_INDEX;
915 enc <<= sparc_pmu->upper_shift;
916 } else {
917 hwc->idx = PIC_LOWER_INDEX;
918 enc <<= sparc_pmu->lower_shift;
919 }
920
921 hwc->config |= enc;
922 return 0; 1128 return 0;
923} 1129}
924 1130
@@ -968,7 +1174,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
968 struct perf_sample_data data; 1174 struct perf_sample_data data;
969 struct cpu_hw_events *cpuc; 1175 struct cpu_hw_events *cpuc;
970 struct pt_regs *regs; 1176 struct pt_regs *regs;
971 int idx; 1177 int i;
972 1178
973 if (!atomic_read(&active_events)) 1179 if (!atomic_read(&active_events))
974 return NOTIFY_DONE; 1180 return NOTIFY_DONE;
@@ -997,13 +1203,12 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
997 if (sparc_pmu->irq_bit) 1203 if (sparc_pmu->irq_bit)
998 pcr_ops->write(cpuc->pcr); 1204 pcr_ops->write(cpuc->pcr);
999 1205
1000 for (idx = 0; idx < MAX_HWEVENTS; idx++) { 1206 for (i = 0; i < cpuc->n_events; i++) {
1001 struct perf_event *event = cpuc->events[idx]; 1207 struct perf_event *event = cpuc->event[i];
1208 int idx = cpuc->current_idx[i];
1002 struct hw_perf_event *hwc; 1209 struct hw_perf_event *hwc;
1003 u64 val; 1210 u64 val;
1004 1211
1005 if (!test_bit(idx, cpuc->active_mask))
1006 continue;
1007 hwc = &event->hw; 1212 hwc = &event->hw;
1008 val = sparc_perf_event_update(event, hwc, idx); 1213 val = sparc_perf_event_update(event, hwc, idx);
1009 if (val & (1ULL << 31)) 1214 if (val & (1ULL << 31))
@@ -1055,10 +1260,122 @@ void __init init_hw_perf_events(void)
1055 1260
1056 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1261 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1057 1262
1058 /* All sparc64 PMUs currently have 2 events. But this simple 1263 /* All sparc64 PMUs currently have 2 events. */
1059 * driver only supports one active event at a time. 1264 perf_max_events = 2;
1060 */
1061 perf_max_events = 1;
1062 1265
1063 register_die_notifier(&perf_event_nmi_notifier); 1266 register_die_notifier(&perf_event_nmi_notifier);
1064} 1267}
1268
1269static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1270{
1271 if (entry->nr < PERF_MAX_STACK_DEPTH)
1272 entry->ip[entry->nr++] = ip;
1273}
1274
1275static void perf_callchain_kernel(struct pt_regs *regs,
1276 struct perf_callchain_entry *entry)
1277{
1278 unsigned long ksp, fp;
1279
1280 callchain_store(entry, PERF_CONTEXT_KERNEL);
1281 callchain_store(entry, regs->tpc);
1282
1283 ksp = regs->u_regs[UREG_I6];
1284 fp = ksp + STACK_BIAS;
1285 do {
1286 struct sparc_stackf *sf;
1287 struct pt_regs *regs;
1288 unsigned long pc;
1289
1290 if (!kstack_valid(current_thread_info(), fp))
1291 break;
1292
1293 sf = (struct sparc_stackf *) fp;
1294 regs = (struct pt_regs *) (sf + 1);
1295
1296 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1297 if (user_mode(regs))
1298 break;
1299 pc = regs->tpc;
1300 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1301 } else {
1302 pc = sf->callers_pc;
1303 fp = (unsigned long)sf->fp + STACK_BIAS;
1304 }
1305 callchain_store(entry, pc);
1306 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1307}
1308
1309static void perf_callchain_user_64(struct pt_regs *regs,
1310 struct perf_callchain_entry *entry)
1311{
1312 unsigned long ufp;
1313
1314 callchain_store(entry, PERF_CONTEXT_USER);
1315 callchain_store(entry, regs->tpc);
1316
1317 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1318 do {
1319 struct sparc_stackf *usf, sf;
1320 unsigned long pc;
1321
1322 usf = (struct sparc_stackf *) ufp;
1323 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1324 break;
1325
1326 pc = sf.callers_pc;
1327 ufp = (unsigned long)sf.fp + STACK_BIAS;
1328 callchain_store(entry, pc);
1329 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1330}
1331
1332static void perf_callchain_user_32(struct pt_regs *regs,
1333 struct perf_callchain_entry *entry)
1334{
1335 unsigned long ufp;
1336
1337 callchain_store(entry, PERF_CONTEXT_USER);
1338 callchain_store(entry, regs->tpc);
1339
1340 ufp = regs->u_regs[UREG_I6];
1341 do {
1342 struct sparc_stackf32 *usf, sf;
1343 unsigned long pc;
1344
1345 usf = (struct sparc_stackf32 *) ufp;
1346 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1347 break;
1348
1349 pc = sf.callers_pc;
1350 ufp = (unsigned long)sf.fp;
1351 callchain_store(entry, pc);
1352 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1353}
1354
1355/* Like powerpc we can't get PMU interrupts within the PMU handler,
1356 * so no need for seperate NMI and IRQ chains as on x86.
1357 */
1358static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
1359
1360struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1361{
1362 struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
1363
1364 entry->nr = 0;
1365 if (!user_mode(regs)) {
1366 stack_trace_flush();
1367 perf_callchain_kernel(regs, entry);
1368 if (current->mm)
1369 regs = task_pt_regs(current);
1370 else
1371 regs = NULL;
1372 }
1373 if (regs) {
1374 flushw_user();
1375 if (test_thread_flag(TIF_32BIT))
1376 perf_callchain_user_32(regs, entry);
1377 else
1378 perf_callchain_user_64(regs, entry);
1379 }
1380 return entry;
1381}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index cfa0e19abe3b..d77f54316948 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -365,6 +365,7 @@ EXPORT_SYMBOL(get_fb_unmapped_area);
365void arch_pick_mmap_layout(struct mm_struct *mm) 365void arch_pick_mmap_layout(struct mm_struct *mm)
366{ 366{
367 unsigned long random_factor = 0UL; 367 unsigned long random_factor = 0UL;
368 unsigned long gap;
368 369
369 if (current->flags & PF_RANDOMIZE) { 370 if (current->flags & PF_RANDOMIZE) {
370 random_factor = get_random_int(); 371 random_factor = get_random_int();
@@ -379,9 +380,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
379 * Fall back to the standard layout if the personality 380 * Fall back to the standard layout if the personality
380 * bit is set, or if the expected stack growth is unlimited: 381 * bit is set, or if the expected stack growth is unlimited:
381 */ 382 */
383 gap = rlimit(RLIMIT_STACK);
382 if (!test_thread_flag(TIF_32BIT) || 384 if (!test_thread_flag(TIF_32BIT) ||
383 (current->personality & ADDR_COMPAT_LAYOUT) || 385 (current->personality & ADDR_COMPAT_LAYOUT) ||
384 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || 386 gap == RLIM_INFINITY ||
385 sysctl_legacy_va_layout) { 387 sysctl_legacy_va_layout) {
386 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 388 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
387 mm->get_unmapped_area = arch_get_unmapped_area; 389 mm->get_unmapped_area = arch_get_unmapped_area;
@@ -389,9 +391,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
389 } else { 391 } else {
390 /* We know it's 32-bit */ 392 /* We know it's 32-bit */
391 unsigned long task_size = STACK_TOP32; 393 unsigned long task_size = STACK_TOP32;
392 unsigned long gap;
393 394
394 gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
395 if (gap < 128 * 1024 * 1024) 395 if (gap < 128 * 1024 * 1024)
396 gap = 128 * 1024 * 1024; 396 gap = 128 * 1024 * 1024;
397 if (gap > (task_size / 6 * 5)) 397 if (gap > (task_size / 6 * 5))
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 5b2f595fe65b..0d4c09b15efc 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -35,6 +35,7 @@
35#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36 36
37#include <asm/oplib.h> 37#include <asm/oplib.h>
38#include <asm/timex.h>
38#include <asm/timer.h> 39#include <asm/timer.h>
39#include <asm/system.h> 40#include <asm/system.h>
40#include <asm/irq.h> 41#include <asm/irq.h>
@@ -51,7 +52,6 @@ DEFINE_SPINLOCK(rtc_lock);
51EXPORT_SYMBOL(rtc_lock); 52EXPORT_SYMBOL(rtc_lock);
52 53
53static int set_rtc_mmss(unsigned long); 54static int set_rtc_mmss(unsigned long);
54static int sbus_do_settimeofday(struct timespec *tv);
55 55
56unsigned long profile_pc(struct pt_regs *regs) 56unsigned long profile_pc(struct pt_regs *regs)
57{ 57{
@@ -76,6 +76,8 @@ EXPORT_SYMBOL(profile_pc);
76 76
77__volatile__ unsigned int *master_l10_counter; 77__volatile__ unsigned int *master_l10_counter;
78 78
79u32 (*do_arch_gettimeoffset)(void);
80
79/* 81/*
80 * timer_interrupt() needs to keep up the real-time clock, 82 * timer_interrupt() needs to keep up the real-time clock,
81 * as well as call the "do_timer()" routine every clocktick 83 * as well as call the "do_timer()" routine every clocktick
@@ -196,35 +198,14 @@ static int __init clock_init(void)
196{ 198{
197 return of_register_driver(&clock_driver, &of_platform_bus_type); 199 return of_register_driver(&clock_driver, &of_platform_bus_type);
198} 200}
199
200/* Must be after subsys_initcall() so that busses are probed. Must 201/* Must be after subsys_initcall() so that busses are probed. Must
201 * be before device_initcall() because things like the RTC driver 202 * be before device_initcall() because things like the RTC driver
202 * need to see the clock registers. 203 * need to see the clock registers.
203 */ 204 */
204fs_initcall(clock_init); 205fs_initcall(clock_init);
205 206
206static void __init sbus_time_init(void)
207{
208
209 BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM);
210 btfixup();
211
212 sparc_init_timers(timer_interrupt);
213}
214
215void __init time_init(void)
216{
217#ifdef CONFIG_PCI
218 extern void pci_time_init(void);
219 if (pcic_present()) {
220 pci_time_init();
221 return;
222 }
223#endif
224 sbus_time_init();
225}
226 207
227static inline unsigned long do_gettimeoffset(void) 208u32 sbus_do_gettimeoffset(void)
228{ 209{
229 unsigned long val = *master_l10_counter; 210 unsigned long val = *master_l10_counter;
230 unsigned long usec = (val >> 10) & 0x1fffff; 211 unsigned long usec = (val >> 10) & 0x1fffff;
@@ -233,86 +214,39 @@ static inline unsigned long do_gettimeoffset(void)
233 if (val & 0x80000000) 214 if (val & 0x80000000)
234 usec += 1000000 / HZ; 215 usec += 1000000 / HZ;
235 216
236 return usec; 217 return usec * 1000;
237} 218}
238 219
239/* Ok, my cute asm atomicity trick doesn't work anymore.
240 * There are just too many variables that need to be protected
241 * now (both members of xtime, et al.)
242 */
243void do_gettimeofday(struct timeval *tv)
244{
245 unsigned long flags;
246 unsigned long seq;
247 unsigned long usec, sec;
248 unsigned long max_ntp_tick = tick_usec - tickadj;
249
250 do {
251 seq = read_seqbegin_irqsave(&xtime_lock, flags);
252 usec = do_gettimeoffset();
253
254 /*
255 * If time_adjust is negative then NTP is slowing the clock
256 * so make sure not to go into next possible interval.
257 * Better to lose some accuracy than have time go backwards..
258 */
259 if (unlikely(time_adjust < 0))
260 usec = min(usec, max_ntp_tick);
261
262 sec = xtime.tv_sec;
263 usec += (xtime.tv_nsec / 1000);
264 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
265
266 while (usec >= 1000000) {
267 usec -= 1000000;
268 sec++;
269 }
270 220
271 tv->tv_sec = sec; 221u32 arch_gettimeoffset(void)
272 tv->tv_usec = usec;
273}
274
275EXPORT_SYMBOL(do_gettimeofday);
276
277int do_settimeofday(struct timespec *tv)
278{ 222{
279 int ret; 223 if (unlikely(!do_arch_gettimeoffset))
280 224 return 0;
281 write_seqlock_irq(&xtime_lock); 225 return do_arch_gettimeoffset();
282 ret = bus_do_settimeofday(tv);
283 write_sequnlock_irq(&xtime_lock);
284 clock_was_set();
285 return ret;
286} 226}
287 227
288EXPORT_SYMBOL(do_settimeofday); 228static void __init sbus_time_init(void)
289
290static int sbus_do_settimeofday(struct timespec *tv)
291{ 229{
292 time_t wtm_sec, sec = tv->tv_sec; 230 do_arch_gettimeoffset = sbus_do_gettimeoffset;
293 long wtm_nsec, nsec = tv->tv_nsec;
294 231
295 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 232 btfixup();
296 return -EINVAL;
297
298 /*
299 * This is revolting. We need to set "xtime" correctly. However, the
300 * value in this location is the value at the most recent update of
301 * wall time. Discover what correction gettimeofday() would have
302 * made, and then undo it!
303 */
304 nsec -= 1000 * do_gettimeoffset();
305
306 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
307 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
308 233
309 set_normalized_timespec(&xtime, sec, nsec); 234 sparc_init_timers(timer_interrupt);
310 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 235}
311 236
312 ntp_clear(); 237void __init time_init(void)
313 return 0; 238{
239#ifdef CONFIG_PCI
240 extern void pci_time_init(void);
241 if (pcic_present()) {
242 pci_time_init();
243 return;
244 }
245#endif
246 sbus_time_init();
314} 247}
315 248
249
316static int set_rtc_mmss(unsigned long secs) 250static int set_rtc_mmss(unsigned long secs)
317{ 251{
318 struct rtc_device *rtc = rtc_class_open("rtc0"); 252 struct rtc_device *rtc = rtc_class_open("rtc0");
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index b99f81c4906f..a3413acb8f12 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -18,6 +18,7 @@
18#include <linux/signal.h> 18#include <linux/signal.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21#include <linux/perf_event.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/kdebug.h> 24#include <linux/kdebug.h>
@@ -203,6 +204,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
203 if (in_atomic() || !mm) 204 if (in_atomic() || !mm)
204 goto no_context; 205 goto no_context;
205 206
207 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
208
206 down_read(&mm->mmap_sem); 209 down_read(&mm->mmap_sem);
207 210
208 /* 211 /*
@@ -249,10 +252,15 @@ good_area:
249 goto do_sigbus; 252 goto do_sigbus;
250 BUG(); 253 BUG();
251 } 254 }
252 if (fault & VM_FAULT_MAJOR) 255 if (fault & VM_FAULT_MAJOR) {
253 current->maj_flt++; 256 current->maj_flt++;
254 else 257 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
258 regs, address);
259 } else {
255 current->min_flt++; 260 current->min_flt++;
261 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
262 regs, address);
263 }
256 up_read(&mm->mmap_sem); 264 up_read(&mm->mmap_sem);
257 return; 265 return;
258 266
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 6081936bf03b..b9d4ff02b8fc 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -16,6 +16,7 @@
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/perf_event.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
20#include <linux/kprobes.h> 21#include <linux/kprobes.h>
21#include <linux/kdebug.h> 22#include <linux/kdebug.h>
@@ -296,6 +297,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
296 if (in_atomic() || !mm) 297 if (in_atomic() || !mm)
297 goto intr_or_no_mm; 298 goto intr_or_no_mm;
298 299
300 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
301
299 if (!down_read_trylock(&mm->mmap_sem)) { 302 if (!down_read_trylock(&mm->mmap_sem)) {
300 if ((regs->tstate & TSTATE_PRIV) && 303 if ((regs->tstate & TSTATE_PRIV) &&
301 !search_exception_tables(regs->tpc)) { 304 !search_exception_tables(regs->tpc)) {
@@ -400,11 +403,15 @@ good_area:
400 goto do_sigbus; 403 goto do_sigbus;
401 BUG(); 404 BUG();
402 } 405 }
403 if (fault & VM_FAULT_MAJOR) 406 if (fault & VM_FAULT_MAJOR) {
404 current->maj_flt++; 407 current->maj_flt++;
405 else 408 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
409 regs, address);
410 } else {
406 current->min_flt++; 411 current->min_flt++;
407 412 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
413 regs, address);
414 }
408 up_read(&mm->mmap_sem); 415 up_read(&mm->mmap_sem);
409 416
410 mm_rss = get_mm_rss(mm); 417 mm_rss = get_mm_rss(mm);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index defcaf108460..f665b05592f3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -633,8 +633,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
633 return NULL; 633 return NULL;
634 } 634 }
635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { 635 if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
636 printk(KERN_WARNING "integrated sync not supported\n"); 636 printk(KERN_WARNING "composite sync not supported\n");
637 return NULL;
638 } 637 }
639 638
640 /* it is incorrect if hsync/vsync width is zero */ 639 /* it is incorrect if hsync/vsync width is zero */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1c2b7d44ec05..0f9e90552dc4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
389 break; 389 break;
390 /* Display: Off; HSync: On, VSync: On */ 390 /* Display: Off; HSync: On, VSync: On */
391 case FB_BLANK_NORMAL: 391 case FB_BLANK_NORMAL:
392 drm_fb_helper_off(info, DRM_MODE_DPMS_ON); 392 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
393 break; 393 break;
394 /* Display: Off; HSync: Off, VSync: On */ 394 /* Display: Off; HSync: Off, VSync: On */
395 case FB_BLANK_HSYNC_SUSPEND: 395 case FB_BLANK_HSYNC_SUSPEND:
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index ba143972769f..d7f8d8b4a4b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -310,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg)
310 struct drm_device *dev = bios->dev; 310 struct drm_device *dev = bios->dev;
311 311
312 /* C51 has misaligned regs on purpose. Marvellous */ 312 /* C51 has misaligned regs on purpose. Marvellous */
313 if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) { 313 if (reg & 0x2 ||
314 NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n", 314 (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
315 reg); 315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
316 return 0; 316
317 } 317 /* warn on C51 regs that haven't been verified accessible in tracing */
318 /*
319 * Warn on C51 regs that have not been verified accessible in
320 * mmiotracing
321 */
322 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 318 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
323 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
324 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
325 reg); 321 reg);
326 322
327 /* Trust the init scripts on G80 */ 323 if (reg >= (8*1024*1024)) {
328 if (dev_priv->card_type >= NV_50) 324 NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
329 return 1; 325 return 0;
330
331 #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
332 if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
333 return 1;
334 if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
335 return 1;
336 if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
337 return 1;
338 if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
339 (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
340 return 1;
341 if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
342 WITHIN(reg, 0xc000, 0x48))
343 return 1;
344 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
345 return 1;
346 if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
347 if (reg == 0x00011014 || reg == 0x00020328)
348 return 1;
349 if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
350 return 1;
351 } 326 }
352 if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
353 return 1;
354 if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
355 return 1;
356 if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
357 return 1;
358 if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
359 return 1;
360 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
361 return 1;
362 if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
363 WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
364 return 1;
365 #undef WITHIN
366 327
367 NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg); 328 return 1;
368
369 return 0;
370} 329}
371 330
372static bool 331static bool
@@ -3196,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3196 } 3155 }
3197#ifdef __powerpc__ 3156#ifdef __powerpc__
3198 /* Powerbook specific quirks */ 3157 /* Powerbook specific quirks */
3199 if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329)) 3158 if ((dev->pci_device & 0xffff) == 0x0179 ||
3200 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); 3159 (dev->pci_device & 0xffff) == 0x0189 ||
3201 if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) { 3160 (dev->pci_device & 0xffff) == 0x0329) {
3202 if (script == LVDS_PANEL_ON) { 3161 if (script == LVDS_RESET) {
3203 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31)); 3162 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3204 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1); 3163
3205 } 3164 } else if (script == LVDS_PANEL_ON) {
3206 if (script == LVDS_PANEL_OFF) { 3165 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3207 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31)); 3166 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3208 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3); 3167 | (1 << 31));
3168 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3169 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3170
3171 } else if (script == LVDS_PANEL_OFF) {
3172 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3173 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3174 & ~(1 << 31));
3175 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3176 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3209 } 3177 }
3210 } 3178 }
3211#endif 3179#endif
@@ -5434,52 +5402,49 @@ static bool
5434parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5402parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5435 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5403 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5436{ 5404{
5437 if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 && 5405 switch (conn & 0x0000000f) {
5438 conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 && 5406 case 0:
5439 conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 && 5407 entry->type = OUTPUT_ANALOG;
5440 conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 && 5408 break;
5441 conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 && 5409 case 1:
5442 conn != 0xf2205004 && conn != 0xf2209004) { 5410 entry->type = OUTPUT_TV;
5443 NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n"); 5411 break;
5444 5412 case 2:
5445 /* cause output setting to fail for !TV, so message is seen */ 5413 case 3:
5446 if ((conn & 0xf) != 0x1)
5447 dcb->entries = 0;
5448
5449 return false;
5450 }
5451 /* most of the below is a "best guess" atm */
5452 entry->type = conn & 0xf;
5453 if (entry->type == 2)
5454 /* another way of specifying straps based lvds... */
5455 entry->type = OUTPUT_LVDS; 5414 entry->type = OUTPUT_LVDS;
5456 if (entry->type == 4) { /* digital */ 5415 break;
5457 if (conn & 0x10) 5416 case 4:
5458 entry->type = OUTPUT_LVDS; 5417 switch ((conn & 0x000000f0) >> 4) {
5459 else 5418 case 0:
5460 entry->type = OUTPUT_TMDS; 5419 entry->type = OUTPUT_TMDS;
5420 break;
5421 case 1:
5422 entry->type = OUTPUT_LVDS;
5423 break;
5424 default:
5425 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
5426 (conn & 0x000000f0) >> 4);
5427 return false;
5428 }
5429 break;
5430 default:
5431 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
5432 return false;
5461 } 5433 }
5462 /* what's in bits 5-13? could be some encoder maker thing, in tv case */ 5434
5463 entry->i2c_index = (conn >> 14) & 0xf; 5435 entry->i2c_index = (conn & 0x0003c000) >> 14;
5464 /* raw heads field is in range 0-1, so move to 1-2 */ 5436 entry->heads = ((conn & 0x001c0000) >> 18) + 1;
5465 entry->heads = ((conn >> 18) & 0x7) + 1; 5437 entry->or = entry->heads; /* same as heads, hopefully safe enough */
5466 entry->location = (conn >> 21) & 0xf; 5438 entry->location = (conn & 0x01e00000) >> 21;
5467 /* unused: entry->bus = (conn >> 25) & 0x7; */ 5439 entry->bus = (conn & 0x0e000000) >> 25;
5468 /* set or to be same as heads -- hopefully safe enough */
5469 entry->or = entry->heads;
5470 entry->duallink_possible = false; 5440 entry->duallink_possible = false;
5471 5441
5472 switch (entry->type) { 5442 switch (entry->type) {
5473 case OUTPUT_ANALOG: 5443 case OUTPUT_ANALOG:
5474 entry->crtconf.maxfreq = (conf & 0xffff) * 10; 5444 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5475 break; 5445 break;
5476 case OUTPUT_LVDS: 5446 case OUTPUT_TV:
5477 /* 5447 entry->tvconf.has_component_output = false;
5478 * This is probably buried in conn's unknown bits.
5479 * This will upset EDID-ful models, if they exist
5480 */
5481 entry->lvdsconf.use_straps_for_mode = true;
5482 entry->lvdsconf.use_power_scripts = true;
5483 break; 5448 break;
5484 case OUTPUT_TMDS: 5449 case OUTPUT_TMDS:
5485 /* 5450 /*
@@ -5488,8 +5453,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5488 */ 5453 */
5489 fabricate_vga_output(dcb, entry->i2c_index, entry->heads); 5454 fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
5490 break; 5455 break;
5491 case OUTPUT_TV: 5456 case OUTPUT_LVDS:
5492 entry->tvconf.has_component_output = false; 5457 if ((conn & 0x00003f00) != 0x10)
5458 entry->lvdsconf.use_straps_for_mode = true;
5459 entry->lvdsconf.use_power_scripts = true;
5460 break;
5461 default:
5493 break; 5462 break;
5494 } 5463 }
5495 5464
@@ -5564,11 +5533,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
5564 dcb->entries = newentries; 5533 dcb->entries = newentries;
5565} 5534}
5566 5535
5567static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5536static int
5537parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5568{ 5538{
5539 struct drm_nouveau_private *dev_priv = dev->dev_private;
5569 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5540 struct bios_parsed_dcb *bdcb = &bios->bdcb;
5570 struct parsed_dcb *dcb; 5541 struct parsed_dcb *dcb;
5571 uint16_t dcbptr, i2ctabptr = 0; 5542 uint16_t dcbptr = 0, i2ctabptr = 0;
5572 uint8_t *dcbtable; 5543 uint8_t *dcbtable;
5573 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5544 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
5574 bool configblock = true; 5545 bool configblock = true;
@@ -5579,16 +5550,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two
5579 dcb->entries = 0; 5550 dcb->entries = 0;
5580 5551
5581 /* get the offset from 0x36 */ 5552 /* get the offset from 0x36 */
5582 dcbptr = ROM16(bios->data[0x36]); 5553 if (dev_priv->card_type > NV_04) {
5554 dcbptr = ROM16(bios->data[0x36]);
5555 if (dcbptr == 0x0000)
5556 NV_WARN(dev, "No output data (DCB) found in BIOS\n");
5557 }
5583 5558
5559 /* this situation likely means a really old card, pre DCB */
5584 if (dcbptr == 0x0) { 5560 if (dcbptr == 0x0) {
5585 NV_WARN(dev, "No output data (DCB) found in BIOS, " 5561 NV_INFO(dev, "Assuming a CRT output exists\n");
5586 "assuming a CRT output exists\n");
5587 /* this situation likely means a really old card, pre DCB */
5588 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); 5562 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5589 5563
5590 if (nv04_tv_identify(dev, 5564 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
5591 bios->legacy.i2c_indices.tv) >= 0)
5592 fabricate_tv_output(dcb, twoHeads); 5565 fabricate_tv_output(dcb, twoHeads);
5593 5566
5594 return 0; 5567 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e342a418d434..db0ed4c13f98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -469,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
469 469
470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 470 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
471 evict, no_wait, new_mem); 471 evict, no_wait, new_mem);
472 if (nvbo->channel && nvbo->channel != chan)
473 ret = nouveau_fence_wait(fence, NULL, false, false);
472 nouveau_fence_unref((void *)&fence); 474 nouveau_fence_unref((void *)&fence);
473 return ret; 475 return ret;
474} 476}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 5a10deb8bdbd..7e6d673f3a23 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -24,9 +24,12 @@
24 * 24 *
25 */ 25 */
26 26
27#include <acpi/button.h>
28
27#include "drmP.h" 29#include "drmP.h"
28#include "drm_edid.h" 30#include "drm_edid.h"
29#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32
30#include "nouveau_reg.h" 33#include "nouveau_reg.h"
31#include "nouveau_drv.h" 34#include "nouveau_drv.h"
32#include "nouveau_encoder.h" 35#include "nouveau_encoder.h"
@@ -83,14 +86,16 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
83static void 86static void
84nouveau_connector_destroy(struct drm_connector *drm_connector) 87nouveau_connector_destroy(struct drm_connector *drm_connector)
85{ 88{
86 struct nouveau_connector *connector = nouveau_connector(drm_connector); 89 struct nouveau_connector *nv_connector =
87 struct drm_device *dev = connector->base.dev; 90 nouveau_connector(drm_connector);
91 struct drm_device *dev = nv_connector->base.dev;
88 92
89 NV_DEBUG_KMS(dev, "\n"); 93 NV_DEBUG_KMS(dev, "\n");
90 94
91 if (!connector) 95 if (!nv_connector)
92 return; 96 return;
93 97
98 kfree(nv_connector->edid);
94 drm_sysfs_connector_remove(drm_connector); 99 drm_sysfs_connector_remove(drm_connector);
95 drm_connector_cleanup(drm_connector); 100 drm_connector_cleanup(drm_connector);
96 kfree(drm_connector); 101 kfree(drm_connector);
@@ -233,10 +238,21 @@ nouveau_connector_detect(struct drm_connector *connector)
233 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 238 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
234 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 239 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
235 if (nv_encoder && nv_connector->native_mode) { 240 if (nv_encoder && nv_connector->native_mode) {
241#ifdef CONFIG_ACPI
242 if (!nouveau_ignorelid && !acpi_lid_open())
243 return connector_status_disconnected;
244#endif
236 nouveau_connector_set_encoder(connector, nv_encoder); 245 nouveau_connector_set_encoder(connector, nv_encoder);
237 return connector_status_connected; 246 return connector_status_connected;
238 } 247 }
239 248
249 /* Cleanup the previous EDID block. */
250 if (nv_connector->edid) {
251 drm_mode_connector_update_edid_property(connector, NULL);
252 kfree(nv_connector->edid);
253 nv_connector->edid = NULL;
254 }
255
240 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); 256 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
241 if (i2c) { 257 if (i2c) {
242 nouveau_connector_ddc_prepare(connector, &flags); 258 nouveau_connector_ddc_prepare(connector, &flags);
@@ -247,7 +263,7 @@ nouveau_connector_detect(struct drm_connector *connector)
247 if (!nv_connector->edid) { 263 if (!nv_connector->edid) {
248 NV_ERROR(dev, "DDC responded, but no EDID for %s\n", 264 NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
249 drm_get_connector_name(connector)); 265 drm_get_connector_name(connector));
250 return connector_status_disconnected; 266 goto detect_analog;
251 } 267 }
252 268
253 if (nv_encoder->dcb->type == OUTPUT_DP && 269 if (nv_encoder->dcb->type == OUTPUT_DP &&
@@ -281,6 +297,7 @@ nouveau_connector_detect(struct drm_connector *connector)
281 return connector_status_connected; 297 return connector_status_connected;
282 } 298 }
283 299
300detect_analog:
284 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 301 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
285 if (!nv_encoder) 302 if (!nv_encoder)
286 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 303 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
@@ -687,8 +704,12 @@ nouveau_connector_create_lvds(struct drm_device *dev,
687 */ 704 */
688 if (!nv_connector->edid && !nv_connector->native_mode && 705 if (!nv_connector->edid && !nv_connector->native_mode &&
689 !dev_priv->VBIOS.pub.fp_no_ddc) { 706 !dev_priv->VBIOS.pub.fp_no_ddc) {
690 nv_connector->edid = 707 struct edid *edid =
691 (struct edid *)nouveau_bios_embedded_edid(dev); 708 (struct edid *)nouveau_bios_embedded_edid(dev);
709 if (edid) {
710 nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
711 *(nv_connector->edid) = *edid;
712 }
692 } 713 }
693 714
694 if (!nv_connector->edid) 715 if (!nv_connector->edid)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 7afbe8b40d51..50d9e67745af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -126,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
126 chan->dma.cur += nr_dwords; 126 chan->dma.cur += nr_dwords;
127} 127}
128 128
129static inline bool 129/* Fetch and adjust GPU GET pointer
130READ_GET(struct nouveau_channel *chan, uint32_t *get) 130 *
131 * Returns:
132 * value >= 0, the adjusted GET pointer
133 * -EINVAL if GET pointer currently outside main push buffer
134 * -EBUSY if timeout exceeded
135 */
136static inline int
137READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
131{ 138{
132 uint32_t val; 139 uint32_t val;
133 140
134 val = nvchan_rd32(chan, chan->user_get); 141 val = nvchan_rd32(chan, chan->user_get);
135 if (val < chan->pushbuf_base || 142
136 val > chan->pushbuf_base + (chan->dma.max << 2)) { 143 /* reset counter as long as GET is still advancing, this is
137 /* meaningless to dma_wait() except to know whether the 144 * to avoid misdetecting a GPU lockup if the GPU happens to
138 * GPU has stalled or not 145 * just be processing an operation that takes a long time
139 */ 146 */
140 *get = val; 147 if (val != *prev_get) {
141 return false; 148 *prev_get = val;
149 *timeout = 0;
150 }
151
152 if ((++*timeout & 0xff) == 0) {
153 DRM_UDELAY(1);
154 if (*timeout > 100000)
155 return -EBUSY;
142 } 156 }
143 157
144 *get = (val - chan->pushbuf_base) >> 2; 158 if (val < chan->pushbuf_base ||
145 return true; 159 val > chan->pushbuf_base + (chan->dma.max << 2))
160 return -EINVAL;
161
162 return (val - chan->pushbuf_base) >> 2;
146} 163}
147 164
148int 165int
149nouveau_dma_wait(struct nouveau_channel *chan, int size) 166nouveau_dma_wait(struct nouveau_channel *chan, int size)
150{ 167{
151 uint32_t get, prev_get = 0, cnt = 0; 168 uint32_t prev_get = 0, cnt = 0;
152 bool get_valid; 169 int get;
153 170
154 while (chan->dma.free < size) { 171 while (chan->dma.free < size) {
155 /* reset counter as long as GET is still advancing, this is 172 get = READ_GET(chan, &prev_get, &cnt);
156 * to avoid misdetecting a GPU lockup if the GPU happens to 173 if (unlikely(get == -EBUSY))
157 * just be processing an operation that takes a long time 174 return -EBUSY;
158 */
159 get_valid = READ_GET(chan, &get);
160 if (get != prev_get) {
161 prev_get = get;
162 cnt = 0;
163 }
164
165 if ((++cnt & 0xff) == 0) {
166 DRM_UDELAY(1);
167 if (cnt > 100000)
168 return -EBUSY;
169 }
170 175
171 /* loop until we have a usable GET pointer. the value 176 /* loop until we have a usable GET pointer. the value
172 * we read from the GPU may be outside the main ring if 177 * we read from the GPU may be outside the main ring if
@@ -177,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
177 * from the SKIPS area, so the code below doesn't have to deal 182 * from the SKIPS area, so the code below doesn't have to deal
178 * with some fun corner cases. 183 * with some fun corner cases.
179 */ 184 */
180 if (!get_valid || get < NOUVEAU_DMA_SKIPS) 185 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
181 continue; 186 continue;
182 187
183 if (get <= chan->dma.cur) { 188 if (get <= chan->dma.cur) {
@@ -203,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
203 * after processing the currently pending commands. 208 * after processing the currently pending commands.
204 */ 209 */
205 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 210 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
211
212 /* wait for GET to depart from the skips area.
213 * prevents writing GET==PUT and causing a race
214 * condition that causes us to think the GPU is
215 * idle when it's not.
216 */
217 do {
218 get = READ_GET(chan, &prev_get, &cnt);
219 if (unlikely(get == -EBUSY))
220 return -EBUSY;
221 if (unlikely(get == -EINVAL))
222 continue;
223 } while (get <= NOUVEAU_DMA_SKIPS);
206 WRITE_PUT(NOUVEAU_DMA_SKIPS); 224 WRITE_PUT(NOUVEAU_DMA_SKIPS);
207 225
208 /* we're now submitting commands at the start of 226 /* we're now submitting commands at the start of
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 9e2926c48579..dd4937224220 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { 490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", 491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
492 nv_rd32(dev, NV50_AUXCH_CTRL(index))); 492 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
493 return -EBUSY; 493 ret = -EBUSY;
494 goto out;
494 } 495 }
495 496
496 udelay(400); 497 udelay(400);
@@ -501,6 +502,11 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
501 break; 502 break;
502 } 503 }
503 504
505 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
506 ret = -EREMOTEIO;
507 goto out;
508 }
509
504 if (cmd & 1) { 510 if (cmd & 1) {
505 for (i = 0; i < 4; i++) { 511 for (i = 0; i < 4; i++) {
506 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 06eb993e0883..343ab7f17ccc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -71,6 +71,10 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
71int nouveau_uscript_tmds = -1; 71int nouveau_uscript_tmds = -1;
72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400); 72module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
73 73
74MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77
74MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 78MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
75 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 79 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
76 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 80 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 026419fe8791..6b9690418bc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -509,6 +509,8 @@ struct drm_nouveau_private {
509 void __iomem *ramin; 509 void __iomem *ramin;
510 uint32_t ramin_size; 510 uint32_t ramin_size;
511 511
512 struct nouveau_bo *vga_ram;
513
512 struct workqueue_struct *wq; 514 struct workqueue_struct *wq;
513 struct work_struct irq_work; 515 struct work_struct irq_work;
514 516
@@ -675,6 +677,7 @@ extern char *nouveau_tv_norm;
675extern int nouveau_reg_debug; 677extern int nouveau_reg_debug;
676extern char *nouveau_vbios; 678extern char *nouveau_vbios;
677extern int nouveau_ctxfw; 679extern int nouveau_ctxfw;
680extern int nouveau_ignorelid;
678 681
679/* nouveau_state.c */ 682/* nouveau_state.c */
680extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 683extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2009db2426c3..6ac804b0c9f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -321,6 +321,7 @@ retry:
321 else { 321 else {
322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 322 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
323 b->valid_domains); 323 b->valid_domains);
324 list_add_tail(&nvbo->entry, &op->both_list);
324 validate_fini(op, NULL); 325 validate_fini(op, NULL);
325 return -EINVAL; 326 return -EINVAL;
326 } 327 }
@@ -466,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
466static int 467static int
467nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
468 struct drm_nouveau_gem_pushbuf_bo *bo, 469 struct drm_nouveau_gem_pushbuf_bo *bo,
469 int nr_relocs, uint64_t ptr_relocs, 470 unsigned nr_relocs, uint64_t ptr_relocs,
470 int nr_dwords, int first_dword, 471 unsigned nr_dwords, unsigned first_dword,
471 uint32_t *pushbuf, bool is_iomem) 472 uint32_t *pushbuf, bool is_iomem)
472{ 473{
473 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
474 struct drm_device *dev = chan->dev; 475 struct drm_device *dev = chan->dev;
475 int ret = 0, i; 476 int ret = 0;
477 unsigned i;
476 478
477 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
478 if (IS_ERR(reloc)) 480 if (IS_ERR(reloc))
@@ -667,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
667 } 669 }
668 pbbo = nouveau_gem_object(gem); 670 pbbo = nouveau_gem_object(gem);
669 671
672 if ((req->offset & 3) || req->nr_dwords < 2 ||
673 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674 (unsigned long)req->nr_dwords >
675 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676 NV_ERROR(dev, "pb call misaligned or out of bounds: "
677 "%d + %d * 4 > %ld\n",
678 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679 ret = -EINVAL;
680 drm_gem_object_unreference(gem);
681 goto out;
682 }
683
670 ret = ttm_bo_reserve(&pbbo->bo, false, false, true, 684 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
671 chan->fence.sequence); 685 chan->fence.sequence);
672 if (ret) { 686 if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 919a619ca7fa..3b9bad66162a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -483,6 +483,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
484 if (nouveau_pgraph_intr_swmthd(dev, &trap)) 484 if (nouveau_pgraph_intr_swmthd(dev, &trap))
485 unhandled = 1; 485 unhandled = 1;
486 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
487 uint32_t v = nv_rd32(dev, 0x402000);
488 nv_wr32(dev, 0x402000, v);
489
490 /* dump the error anyway for now: it's useful for
491 Gallium development */
492 unhandled = 1;
486 } else { 493 } else {
487 unhandled = 1; 494 unhandled = 1;
488 } 495 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fb9bdd6edf1f..8f3a12f614ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -383,9 +383,8 @@ void nouveau_mem_close(struct drm_device *dev)
383{ 383{
384 struct drm_nouveau_private *dev_priv = dev->dev_private; 384 struct drm_nouveau_private *dev_priv = dev->dev_private;
385 385
386 if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type) 386 nouveau_bo_unpin(dev_priv->vga_ram);
387 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0); 387 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
388 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
389 388
390 ttm_bo_device_release(&dev_priv->ttm.bdev); 389 ttm_bo_device_release(&dev_priv->ttm.bdev);
391 390
@@ -622,6 +621,15 @@ nouveau_mem_init(struct drm_device *dev)
622 return ret; 621 return ret;
623 } 622 }
624 623
624 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
625 0, 0, true, true, &dev_priv->vga_ram);
626 if (ret == 0)
627 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
628 if (ret) {
629 NV_WARN(dev, "failed to reserve VGA memory\n");
630 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
631 }
632
625 /* GART */ 633 /* GART */
626#if !defined(__powerpc__) && !defined(__ia64__) 634#if !defined(__powerpc__) && !defined(__ia64__)
627 if (drm_device_is_agp(dev) && dev->agp) { 635 if (drm_device_is_agp(dev) && dev->agp) {
@@ -653,6 +661,7 @@ nouveau_mem_init(struct drm_device *dev)
653 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), 661 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
654 drm_get_resource_len(dev, 1), 662 drm_get_resource_len(dev, 1),
655 DRM_MTRR_WC); 663 DRM_MTRR_WC);
664
656 return 0; 665 return 0;
657} 666}
658 667
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 09b9a46dfc0e..f2d0187ba152 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -525,6 +525,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
525 engine->mc.takedown(dev); 525 engine->mc.takedown(dev);
526 526
527 mutex_lock(&dev->struct_mutex); 527 mutex_lock(&dev->struct_mutex);
528 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
528 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); 529 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
529 mutex_unlock(&dev->struct_mutex); 530 mutex_unlock(&dev->struct_mutex);
530 nouveau_sgdma_takedown(dev); 531 nouveau_sgdma_takedown(dev);
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a20c206625a2..a3b9563a6f60 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev)
30 * of vram. For now, only reserve a small piece until we know 30 * of vram. For now, only reserve a small piece until we know
31 * more about what each chipset requires. 31 * more about what each chipset requires.
32 */ 32 */
33 switch (dev_priv->chipset & 0xf0) { 33 switch (dev_priv->chipset) {
34 case 0x40: 34 case 0x40:
35 case 0x47: 35 case 0x47:
36 case 0x49: 36 case 0x49:
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 118d3285fd8c..40b7360841f8 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -432,6 +432,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
433 struct drm_device *dev = crtc->dev; 433 struct drm_device *dev = crtc->dev;
434 struct drm_encoder *encoder; 434 struct drm_encoder *encoder;
435 uint32_t dac = 0, sor = 0;
435 436
436 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 437 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
437 438
@@ -439,9 +440,28 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 440 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
440 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 441 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
441 442
442 if (drm_helper_encoder_in_use(encoder)) 443 if (!drm_helper_encoder_in_use(encoder))
443 continue; 444 continue;
444 445
446 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
447 nv_encoder->dcb->type == OUTPUT_TV)
448 dac |= (1 << nv_encoder->or);
449 else
450 sor |= (1 << nv_encoder->or);
451 }
452
453 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
454 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
455
456 if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
457 nv_encoder->dcb->type == OUTPUT_TV) {
458 if (dac & (1 << nv_encoder->or))
459 continue;
460 } else {
461 if (sor & (1 << nv_encoder->or))
462 continue;
463 }
464
445 nv_encoder->disconnect(nv_encoder); 465 nv_encoder->disconnect(nv_encoder);
446 } 466 }
447 467
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 39caf167587d..32b244bcb482 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
272 return ret; 272 return ret;
273 ramfc = chan->ramfc->gpuobj; 273 ramfc = chan->ramfc->gpuobj;
274 274
275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256, 275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
276 0, &chan->cache); 276 0, &chan->cache);
277 if (ret) 277 if (ret)
278 return ret; 278 return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ca79f32be44c..20319e59d368 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
84 nv_wr32(dev, 0x400804, 0xc0000000); 84 nv_wr32(dev, 0x400804, 0xc0000000);
85 nv_wr32(dev, 0x406800, 0xc0000000); 85 nv_wr32(dev, 0x406800, 0xc0000000);
86 nv_wr32(dev, 0x400c04, 0xc0000000); 86 nv_wr32(dev, 0x400c04, 0xc0000000);
87 nv_wr32(dev, 0x401804, 0xc0000000); 87 nv_wr32(dev, 0x401800, 0xc0000000);
88 nv_wr32(dev, 0x405018, 0xc0000000); 88 nv_wr32(dev, 0x405018, 0xc0000000);
89 nv_wr32(dev, 0x402000, 0xc0000000); 89 nv_wr32(dev, 0x402000, 0xc0000000);
90 90
@@ -282,6 +282,7 @@ nv50_graph_unload_context(struct drm_device *dev)
282 return 0; 282 return 0;
283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
284 284
285 nouveau_wait_for_idle(dev);
285 nv_wr32(dev, 0x400500, fifo & ~1); 286 nv_wr32(dev, 0x400500, fifo & ~1);
286 nv_wr32(dev, 0x400784, inst); 287 nv_wr32(dev, 0x400784, inst);
287 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 288 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index e395c16d30f5..ecf1936b8224 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -90,11 +90,24 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
90{ 90{
91 struct drm_device *dev = encoder->dev; 91 struct drm_device *dev = encoder->dev;
92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
93 struct drm_encoder *enc;
93 uint32_t val; 94 uint32_t val;
94 int or = nv_encoder->or; 95 int or = nv_encoder->or;
95 96
96 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 97 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
97 98
99 nv_encoder->last_dpms = mode;
100 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102
103 if (nvenc == nv_encoder ||
104 nvenc->dcb->or != nv_encoder->dcb->or)
105 continue;
106
107 if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
108 return;
109 }
110
98 /* wait for it to be done */ 111 /* wait for it to be done */
99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), 112 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
100 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { 113 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 388140a7e651..e3b44562d265 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -246,6 +246,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
246 case ATOM_WS_ATTRIBUTES: 246 case ATOM_WS_ATTRIBUTES:
247 val = gctx->io_attr; 247 val = gctx->io_attr;
248 break; 248 break;
249 case ATOM_WS_REGPTR:
250 val = gctx->reg_block;
251 break;
249 default: 252 default:
250 val = ctx->ws[idx]; 253 val = ctx->ws[idx];
251 } 254 }
@@ -385,6 +388,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
385 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 388 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
386} 389}
387 390
391static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
392{
393 uint32_t val = 0xCDCDCDCD;
394
395 switch (align) {
396 case ATOM_SRC_DWORD:
397 val = U32(*ptr);
398 (*ptr) += 4;
399 break;
400 case ATOM_SRC_WORD0:
401 case ATOM_SRC_WORD8:
402 case ATOM_SRC_WORD16:
403 val = U16(*ptr);
404 (*ptr) += 2;
405 break;
406 case ATOM_SRC_BYTE0:
407 case ATOM_SRC_BYTE8:
408 case ATOM_SRC_BYTE16:
409 case ATOM_SRC_BYTE24:
410 val = U8(*ptr);
411 (*ptr)++;
412 break;
413 }
414 return val;
415}
416
388static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, 417static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
389 int *ptr, uint32_t *saved, int print) 418 int *ptr, uint32_t *saved, int print)
390{ 419{
@@ -482,6 +511,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
482 case ATOM_WS_ATTRIBUTES: 511 case ATOM_WS_ATTRIBUTES:
483 gctx->io_attr = val; 512 gctx->io_attr = val;
484 break; 513 break;
514 case ATOM_WS_REGPTR:
515 gctx->reg_block = val;
516 break;
485 default: 517 default:
486 ctx->ws[idx] = val; 518 ctx->ws[idx] = val;
487 } 519 }
@@ -677,7 +709,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
677 SDEBUG(" dst: "); 709 SDEBUG(" dst: ");
678 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 710 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
679 SDEBUG(" src1: "); 711 SDEBUG(" src1: ");
680 src1 = atom_get_src(ctx, attr, ptr); 712 src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
681 SDEBUG(" src2: "); 713 SDEBUG(" src2: ");
682 src2 = atom_get_src(ctx, attr, ptr); 714 src2 = atom_get_src(ctx, attr, ptr);
683 dst &= src1; 715 dst &= src1;
@@ -809,6 +841,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
809 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); 841 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
810} 842}
811 843
844static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
845{
846 uint8_t attr = U8((*ptr)++), shift;
847 uint32_t saved, dst;
848 int dptr = *ptr;
849 attr &= 0x38;
850 attr |= atom_def_dst[attr >> 3] << 6;
851 SDEBUG(" dst: ");
852 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
853 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
854 SDEBUG(" shift: %d\n", shift);
855 dst <<= shift;
856 SDEBUG(" dst: ");
857 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
858}
859
860static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
861{
862 uint8_t attr = U8((*ptr)++), shift;
863 uint32_t saved, dst;
864 int dptr = *ptr;
865 attr &= 0x38;
866 attr |= atom_def_dst[attr >> 3] << 6;
867 SDEBUG(" dst: ");
868 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
869 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
870 SDEBUG(" shift: %d\n", shift);
871 dst >>= shift;
872 SDEBUG(" dst: ");
873 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
874}
875
812static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 876static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
813{ 877{
814 uint8_t attr = U8((*ptr)++), shift; 878 uint8_t attr = U8((*ptr)++), shift;
@@ -818,7 +882,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
818 attr |= atom_def_dst[attr >> 3] << 6; 882 attr |= atom_def_dst[attr >> 3] << 6;
819 SDEBUG(" dst: "); 883 SDEBUG(" dst: ");
820 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 884 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
821 shift = U8((*ptr)++); 885 shift = atom_get_src(ctx, attr, ptr);
822 SDEBUG(" shift: %d\n", shift); 886 SDEBUG(" shift: %d\n", shift);
823 dst <<= shift; 887 dst <<= shift;
824 SDEBUG(" dst: "); 888 SDEBUG(" dst: ");
@@ -834,7 +898,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
834 attr |= atom_def_dst[attr >> 3] << 6; 898 attr |= atom_def_dst[attr >> 3] << 6;
835 SDEBUG(" dst: "); 899 SDEBUG(" dst: ");
836 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 900 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
837 shift = U8((*ptr)++); 901 shift = atom_get_src(ctx, attr, ptr);
838 SDEBUG(" shift: %d\n", shift); 902 SDEBUG(" shift: %d\n", shift);
839 dst >>= shift; 903 dst >>= shift;
840 SDEBUG(" dst: "); 904 SDEBUG(" dst: ");
@@ -937,18 +1001,18 @@ static struct {
937 atom_op_or, ATOM_ARG_FB}, { 1001 atom_op_or, ATOM_ARG_FB}, {
938 atom_op_or, ATOM_ARG_PLL}, { 1002 atom_op_or, ATOM_ARG_PLL}, {
939 atom_op_or, ATOM_ARG_MC}, { 1003 atom_op_or, ATOM_ARG_MC}, {
940 atom_op_shl, ATOM_ARG_REG}, { 1004 atom_op_shift_left, ATOM_ARG_REG}, {
941 atom_op_shl, ATOM_ARG_PS}, { 1005 atom_op_shift_left, ATOM_ARG_PS}, {
942 atom_op_shl, ATOM_ARG_WS}, { 1006 atom_op_shift_left, ATOM_ARG_WS}, {
943 atom_op_shl, ATOM_ARG_FB}, { 1007 atom_op_shift_left, ATOM_ARG_FB}, {
944 atom_op_shl, ATOM_ARG_PLL}, { 1008 atom_op_shift_left, ATOM_ARG_PLL}, {
945 atom_op_shl, ATOM_ARG_MC}, { 1009 atom_op_shift_left, ATOM_ARG_MC}, {
946 atom_op_shr, ATOM_ARG_REG}, { 1010 atom_op_shift_right, ATOM_ARG_REG}, {
947 atom_op_shr, ATOM_ARG_PS}, { 1011 atom_op_shift_right, ATOM_ARG_PS}, {
948 atom_op_shr, ATOM_ARG_WS}, { 1012 atom_op_shift_right, ATOM_ARG_WS}, {
949 atom_op_shr, ATOM_ARG_FB}, { 1013 atom_op_shift_right, ATOM_ARG_FB}, {
950 atom_op_shr, ATOM_ARG_PLL}, { 1014 atom_op_shift_right, ATOM_ARG_PLL}, {
951 atom_op_shr, ATOM_ARG_MC}, { 1015 atom_op_shift_right, ATOM_ARG_MC}, {
952 atom_op_mul, ATOM_ARG_REG}, { 1016 atom_op_mul, ATOM_ARG_REG}, {
953 atom_op_mul, ATOM_ARG_PS}, { 1017 atom_op_mul, ATOM_ARG_PS}, {
954 atom_op_mul, ATOM_ARG_WS}, { 1018 atom_op_mul, ATOM_ARG_WS}, {
@@ -1058,8 +1122,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1058 1122
1059 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); 1123 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1060 1124
1061 /* reset reg block */
1062 ctx->reg_block = 0;
1063 ectx.ctx = ctx; 1125 ectx.ctx = ctx;
1064 ectx.ps_shift = ps / 4; 1126 ectx.ps_shift = ps / 4;
1065 ectx.start = base; 1127 ectx.start = base;
@@ -1096,6 +1158,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1096void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1158void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1097{ 1159{
1098 mutex_lock(&ctx->mutex); 1160 mutex_lock(&ctx->mutex);
1161 /* reset reg block */
1162 ctx->reg_block = 0;
1163 /* reset fb window */
1164 ctx->fb_base = 0;
1165 /* reset io mode */
1166 ctx->io_mode = ATOM_IO_MM;
1099 atom_execute_table_locked(ctx, index, params); 1167 atom_execute_table_locked(ctx, index, params);
1100 mutex_unlock(&ctx->mutex); 1168 mutex_unlock(&ctx->mutex);
1101} 1169}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 47fd943f6d14..bc73781423a1 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -91,6 +91,7 @@
91#define ATOM_WS_AND_MASK 0x45 91#define ATOM_WS_AND_MASK 0x45
92#define ATOM_WS_FB_WINDOW 0x46 92#define ATOM_WS_FB_WINDOW 0x46
93#define ATOM_WS_ATTRIBUTES 0x47 93#define ATOM_WS_ATTRIBUTES 0x47
94#define ATOM_WS_REGPTR 0x48
94 95
95#define ATOM_IIO_NOP 0 96#define ATOM_IIO_NOP 0
96#define ATOM_IIO_START 1 97#define ATOM_IIO_START 1
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 260fcf59f00c..af464e351fbd 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 307 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
308 args.ucCRTC = radeon_crtc->crtc_id; 308 args.ucCRTC = radeon_crtc->crtc_id;
309 309
310 printk("executing set crtc dtd timing\n");
311 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 310 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
312} 311}
313 312
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
347 args.susModeMiscInfo.usAccess = cpu_to_le16(misc); 346 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
348 args.ucCRTC = radeon_crtc->crtc_id; 347 args.ucCRTC = radeon_crtc->crtc_id;
349 348
350 printk("executing set crtc timing\n");
351 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 349 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
352} 350}
353 351
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
409 } 407 }
410} 408}
411 409
412void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 410union adjust_pixel_clock {
411 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
412};
413
414static u32 atombios_adjust_pll(struct drm_crtc *crtc,
415 struct drm_display_mode *mode,
416 struct radeon_pll *pll)
413{ 417{
414 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
415 struct drm_device *dev = crtc->dev; 418 struct drm_device *dev = crtc->dev;
416 struct radeon_device *rdev = dev->dev_private; 419 struct radeon_device *rdev = dev->dev_private;
417 struct drm_encoder *encoder = NULL; 420 struct drm_encoder *encoder = NULL;
418 struct radeon_encoder *radeon_encoder = NULL; 421 struct radeon_encoder *radeon_encoder = NULL;
419 uint8_t frev, crev; 422 u32 adjusted_clock = mode->clock;
420 int index;
421 SET_PIXEL_CLOCK_PS_ALLOCATION args;
422 PIXEL_CLOCK_PARAMETERS *spc1_ptr;
423 PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
424 PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
425 uint32_t pll_clock = mode->clock;
426 uint32_t adjusted_clock;
427 uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
428 struct radeon_pll *pll;
429 int pll_flags = 0;
430 423
431 memset(&args, 0, sizeof(args)); 424 /* reset the pll flags */
425 pll->flags = 0;
432 426
433 if (ASIC_IS_AVIVO(rdev)) { 427 if (ASIC_IS_AVIVO(rdev)) {
434 if ((rdev->family == CHIP_RS600) || 428 if ((rdev->family == CHIP_RS600) ||
435 (rdev->family == CHIP_RS690) || 429 (rdev->family == CHIP_RS690) ||
436 (rdev->family == CHIP_RS740)) 430 (rdev->family == CHIP_RS740))
437 pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | 431 pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
438 RADEON_PLL_PREFER_CLOSEST_LOWER); 432 RADEON_PLL_PREFER_CLOSEST_LOWER);
439 433
440 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 434 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
441 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 435 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
442 else 436 else
443 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 437 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
444 } else { 438 } else {
445 pll_flags |= RADEON_PLL_LEGACY; 439 pll->flags |= RADEON_PLL_LEGACY;
446 440
447 if (mode->clock > 200000) /* range limits??? */ 441 if (mode->clock > 200000) /* range limits??? */
448 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 442 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
449 else 443 else
450 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 444 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
451 445
452 } 446 }
453 447
454 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 448 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
455 if (encoder->crtc == crtc) { 449 if (encoder->crtc == crtc) {
456 if (!ASIC_IS_AVIVO(rdev)) {
457 if (encoder->encoder_type !=
458 DRM_MODE_ENCODER_DAC)
459 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
460 if (encoder->encoder_type ==
461 DRM_MODE_ENCODER_LVDS)
462 pll_flags |= RADEON_PLL_USE_REF_DIV;
463 }
464 radeon_encoder = to_radeon_encoder(encoder); 450 radeon_encoder = to_radeon_encoder(encoder);
451 if (ASIC_IS_AVIVO(rdev)) {
452 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
453 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
454 adjusted_clock = mode->clock * 2;
455 } else {
456 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
457 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
458 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
459 pll->flags |= RADEON_PLL_USE_REF_DIV;
460 }
465 break; 461 break;
466 } 462 }
467 } 463 }
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
471 * special hw requirements. 467 * special hw requirements.
472 */ 468 */
473 if (ASIC_IS_DCE3(rdev)) { 469 if (ASIC_IS_DCE3(rdev)) {
474 ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args; 470 union adjust_pixel_clock args;
471 struct radeon_encoder_atom_dig *dig;
472 u8 frev, crev;
473 int index;
475 474
476 if (!encoder) 475 if (!radeon_encoder->enc_priv)
477 return; 476 return adjusted_clock;
478 477 dig = radeon_encoder->enc_priv;
479 memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
480 adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
481 adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
482 adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
483 478
484 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 479 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
485 atom_execute_table(rdev->mode_info.atom_context, 480 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
486 index, (uint32_t *)&adjust_pll_args); 481 &crev);
487 adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10; 482
488 } else { 483 memset(&args, 0, sizeof(args));
489 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 484
490 if (ASIC_IS_AVIVO(rdev) && 485 switch (frev) {
491 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) 486 case 1:
492 adjusted_clock = mode->clock * 2; 487 switch (crev) {
493 else 488 case 1:
494 adjusted_clock = mode->clock; 489 case 2:
490 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
491 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
492 args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
493
494 atom_execute_table(rdev->mode_info.atom_context,
495 index, (uint32_t *)&args);
496 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
497 break;
498 default:
499 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
500 return adjusted_clock;
501 }
502 break;
503 default:
504 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
505 return adjusted_clock;
506 }
495 } 507 }
508 return adjusted_clock;
509}
510
511union set_pixel_clock {
512 SET_PIXEL_CLOCK_PS_ALLOCATION base;
513 PIXEL_CLOCK_PARAMETERS v1;
514 PIXEL_CLOCK_PARAMETERS_V2 v2;
515 PIXEL_CLOCK_PARAMETERS_V3 v3;
516};
517
518void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
519{
520 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
521 struct drm_device *dev = crtc->dev;
522 struct radeon_device *rdev = dev->dev_private;
523 struct drm_encoder *encoder = NULL;
524 struct radeon_encoder *radeon_encoder = NULL;
525 u8 frev, crev;
526 int index;
527 union set_pixel_clock args;
528 u32 pll_clock = mode->clock;
529 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
530 struct radeon_pll *pll;
531 u32 adjusted_clock;
532
533 memset(&args, 0, sizeof(args));
534
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
536 if (encoder->crtc == crtc) {
537 radeon_encoder = to_radeon_encoder(encoder);
538 break;
539 }
540 }
541
542 if (!radeon_encoder)
543 return;
496 544
497 if (radeon_crtc->crtc_id == 0) 545 if (radeon_crtc->crtc_id == 0)
498 pll = &rdev->clock.p1pll; 546 pll = &rdev->clock.p1pll;
499 else 547 else
500 pll = &rdev->clock.p2pll; 548 pll = &rdev->clock.p2pll;
501 549
550 /* adjust pixel clock as needed */
551 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
552
502 if (ASIC_IS_AVIVO(rdev)) { 553 if (ASIC_IS_AVIVO(rdev)) {
503 if (radeon_new_pll) 554 if (radeon_new_pll)
504 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, 555 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
505 &fb_div, &frac_fb_div, 556 &fb_div, &frac_fb_div,
506 &ref_div, &post_div, pll_flags); 557 &ref_div, &post_div);
507 else 558 else
508 radeon_compute_pll(pll, adjusted_clock, &pll_clock, 559 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
509 &fb_div, &frac_fb_div, 560 &fb_div, &frac_fb_div,
510 &ref_div, &post_div, pll_flags); 561 &ref_div, &post_div);
511 } else 562 } else
512 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 563 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
513 &ref_div, &post_div, pll_flags); 564 &ref_div, &post_div);
514 565
515 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 566 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
516 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 567 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
520 case 1: 571 case 1:
521 switch (crev) { 572 switch (crev) {
522 case 1: 573 case 1:
523 spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput; 574 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
524 spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 575 args.v1.usRefDiv = cpu_to_le16(ref_div);
525 spc1_ptr->usRefDiv = cpu_to_le16(ref_div); 576 args.v1.usFbDiv = cpu_to_le16(fb_div);
526 spc1_ptr->usFbDiv = cpu_to_le16(fb_div); 577 args.v1.ucFracFbDiv = frac_fb_div;
527 spc1_ptr->ucFracFbDiv = frac_fb_div; 578 args.v1.ucPostDiv = post_div;
528 spc1_ptr->ucPostDiv = post_div; 579 args.v1.ucPpll =
529 spc1_ptr->ucPpll =
530 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 580 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
531 spc1_ptr->ucCRTC = radeon_crtc->crtc_id; 581 args.v1.ucCRTC = radeon_crtc->crtc_id;
532 spc1_ptr->ucRefDivSrc = 1; 582 args.v1.ucRefDivSrc = 1;
533 break; 583 break;
534 case 2: 584 case 2:
535 spc2_ptr = 585 args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
536 (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput; 586 args.v2.usRefDiv = cpu_to_le16(ref_div);
537 spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 587 args.v2.usFbDiv = cpu_to_le16(fb_div);
538 spc2_ptr->usRefDiv = cpu_to_le16(ref_div); 588 args.v2.ucFracFbDiv = frac_fb_div;
539 spc2_ptr->usFbDiv = cpu_to_le16(fb_div); 589 args.v2.ucPostDiv = post_div;
540 spc2_ptr->ucFracFbDiv = frac_fb_div; 590 args.v2.ucPpll =
541 spc2_ptr->ucPostDiv = post_div;
542 spc2_ptr->ucPpll =
543 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 591 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
544 spc2_ptr->ucCRTC = radeon_crtc->crtc_id; 592 args.v2.ucCRTC = radeon_crtc->crtc_id;
545 spc2_ptr->ucRefDivSrc = 1; 593 args.v2.ucRefDivSrc = 1;
546 break; 594 break;
547 case 3: 595 case 3:
548 if (!encoder) 596 args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
549 return; 597 args.v3.usRefDiv = cpu_to_le16(ref_div);
550 spc3_ptr = 598 args.v3.usFbDiv = cpu_to_le16(fb_div);
551 (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput; 599 args.v3.ucFracFbDiv = frac_fb_div;
552 spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10); 600 args.v3.ucPostDiv = post_div;
553 spc3_ptr->usRefDiv = cpu_to_le16(ref_div); 601 args.v3.ucPpll =
554 spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
555 spc3_ptr->ucFracFbDiv = frac_fb_div;
556 spc3_ptr->ucPostDiv = post_div;
557 spc3_ptr->ucPpll =
558 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 602 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
559 spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2); 603 args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
560 spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id; 604 args.v3.ucTransmitterId = radeon_encoder->encoder_id;
561 spc3_ptr->ucEncoderMode = 605 args.v3.ucEncoderMode =
562 atombios_get_encoder_mode(encoder); 606 atombios_get_encoder_mode(encoder);
563 break; 607 break;
564 default: 608 default:
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
571 return; 615 return;
572 } 616 }
573 617
574 printk("executing set pll\n");
575 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
576} 619}
577 620
578int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, 621static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
579 struct drm_framebuffer *old_fb) 622 struct drm_framebuffer *old_fb)
580{ 623{
581 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 624 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
582 struct drm_device *dev = crtc->dev; 625 struct drm_device *dev = crtc->dev;
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
706 return 0; 749 return 0;
707} 750}
708 751
752int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
753 struct drm_framebuffer *old_fb)
754{
755 struct drm_device *dev = crtc->dev;
756 struct radeon_device *rdev = dev->dev_private;
757
758 if (ASIC_IS_AVIVO(rdev))
759 return avivo_crtc_set_base(crtc, x, y, old_fb);
760 else
761 return radeon_crtc_set_base(crtc, x, y, old_fb);
762}
763
764/* properly set additional regs when using atombios */
765static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
766{
767 struct drm_device *dev = crtc->dev;
768 struct radeon_device *rdev = dev->dev_private;
769 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
770 u32 disp_merge_cntl;
771
772 switch (radeon_crtc->crtc_id) {
773 case 0:
774 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
775 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
776 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
777 break;
778 case 1:
779 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
780 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
781 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
782 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
783 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
784 break;
785 }
786}
787
709int atombios_crtc_mode_set(struct drm_crtc *crtc, 788int atombios_crtc_mode_set(struct drm_crtc *crtc,
710 struct drm_display_mode *mode, 789 struct drm_display_mode *mode,
711 struct drm_display_mode *adjusted_mode, 790 struct drm_display_mode *adjusted_mode,
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
727 else { 806 else {
728 if (radeon_crtc->crtc_id == 0) 807 if (radeon_crtc->crtc_id == 0)
729 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 808 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
730 radeon_crtc_set_base(crtc, x, y, old_fb); 809 atombios_crtc_set_base(crtc, x, y, old_fb);
731 radeon_legacy_atom_set_surface(crtc); 810 radeon_legacy_atom_fixup(crtc);
732 } 811 }
733 atombios_overscan_setup(crtc, mode, adjusted_mode); 812 atombios_overscan_setup(crtc, mode, adjusted_mode);
734 atombios_scaler_setup(crtc); 813 atombios_scaler_setup(crtc);
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
746 825
747static void atombios_crtc_prepare(struct drm_crtc *crtc) 826static void atombios_crtc_prepare(struct drm_crtc *crtc)
748{ 827{
749 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
750 atombios_lock_crtc(crtc, 1); 828 atombios_lock_crtc(crtc, 1);
829 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
751} 830}
752 831
753static void atombios_crtc_commit(struct drm_crtc *crtc) 832static void atombios_crtc_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8760d66e058a..11c9a3fe6810 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1504,6 +1504,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1504 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1504 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1505 return -EINVAL; 1505 return -EINVAL;
1506 } 1506 }
1507 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1507 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); 1508 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1508 track->immd_dwords = pkt->count - 1; 1509 track->immd_dwords = pkt->count - 1;
1509 r = r100_cs_track_check(p->rdev, track); 1510 r = r100_cs_track_check(p->rdev, track);
@@ -3399,9 +3400,7 @@ int r100_mc_init(struct radeon_device *rdev)
3399 if (rdev->flags & RADEON_IS_AGP) { 3400 if (rdev->flags & RADEON_IS_AGP) {
3400 r = radeon_agp_init(rdev); 3401 r = radeon_agp_init(rdev);
3401 if (r) { 3402 if (r) {
3402 printk(KERN_WARNING "[drm] Disabling AGP\n"); 3403 radeon_agp_disable(rdev);
3403 rdev->flags &= ~RADEON_IS_AGP;
3404 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
3405 } else { 3404 } else {
3406 rdev->mc.gtt_location = rdev->mc.agp_base; 3405 rdev->mc.gtt_location = rdev->mc.agp_base;
3407 } 3406 }
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 20942127c46b..ff1e0cd608bf 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
371 case 5: 371 case 5:
372 case 6: 372 case 6:
373 case 7: 373 case 7:
374 /* 1D/2D */
374 track->textures[i].tex_coord_type = 0; 375 track->textures[i].tex_coord_type = 0;
375 break; 376 break;
376 case 1: 377 case 1:
377 track->textures[i].tex_coord_type = 1; 378 /* CUBE */
379 track->textures[i].tex_coord_type = 2;
378 break; 380 break;
379 case 2: 381 case 2:
380 track->textures[i].tex_coord_type = 2; 382 /* 3D */
383 track->textures[i].tex_coord_type = 1;
381 break; 384 break;
382 } 385 }
383 break; 386 break;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 053404e71a9d..4526faaacca8 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -50,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
50 if (rdev->flags & RADEON_IS_AGP) { 50 if (rdev->flags & RADEON_IS_AGP) {
51 r = radeon_agp_init(rdev); 51 r = radeon_agp_init(rdev);
52 if (r) { 52 if (r) {
53 printk(KERN_WARNING "[drm] Disabling AGP\n"); 53 radeon_agp_disable(rdev);
54 rdev->flags &= ~RADEON_IS_AGP;
55 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
56 } else { 54 } else {
57 rdev->mc.gtt_location = rdev->mc.agp_base; 55 rdev->mc.gtt_location = rdev->mc.agp_base;
58 } 56 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f5ff3490929f..da9aa3c31bcf 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -624,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
624 fixed20_12 a; 624 fixed20_12 a;
625 u32 tmp; 625 u32 tmp;
626 int chansize, numchan; 626 int chansize, numchan;
627 int r;
628 627
629 /* Get VRAM informations */ 628 /* Get VRAM informations */
630 rdev->mc.vram_is_ddr = true; 629 rdev->mc.vram_is_ddr = true;
@@ -667,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
667 rdev->mc.real_vram_size = rdev->mc.aper_size; 666 rdev->mc.real_vram_size = rdev->mc.aper_size;
668 667
669 if (rdev->flags & RADEON_IS_AGP) { 668 if (rdev->flags & RADEON_IS_AGP) {
670 r = radeon_agp_init(rdev);
671 if (r)
672 return r;
673 /* gtt_size is setup by radeon_agp_init */ 669 /* gtt_size is setup by radeon_agp_init */
674 rdev->mc.gtt_location = rdev->mc.agp_base; 670 rdev->mc.gtt_location = rdev->mc.agp_base;
675 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 671 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -1958,14 +1954,17 @@ int r600_suspend(struct radeon_device *rdev)
1958 /* FIXME: we should wait for ring to be empty */ 1954 /* FIXME: we should wait for ring to be empty */
1959 r600_cp_stop(rdev); 1955 r600_cp_stop(rdev);
1960 rdev->cp.ready = false; 1956 rdev->cp.ready = false;
1957 r600_irq_suspend(rdev);
1961 r600_wb_disable(rdev); 1958 r600_wb_disable(rdev);
1962 r600_pcie_gart_disable(rdev); 1959 r600_pcie_gart_disable(rdev);
1963 /* unpin shaders bo */ 1960 /* unpin shaders bo */
1964 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1961 if (rdev->r600_blit.shader_obj) {
1965 if (unlikely(r != 0)) 1962 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1966 return r; 1963 if (!r) {
1967 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1964 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1968 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1965 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1966 }
1967 }
1969 return 0; 1968 return 0;
1970} 1969}
1971 1970
@@ -2026,6 +2025,11 @@ int r600_init(struct radeon_device *rdev)
2026 r = radeon_fence_driver_init(rdev); 2025 r = radeon_fence_driver_init(rdev);
2027 if (r) 2026 if (r)
2028 return r; 2027 return r;
2028 if (rdev->flags & RADEON_IS_AGP) {
2029 r = radeon_agp_init(rdev);
2030 if (r)
2031 radeon_agp_disable(rdev);
2032 }
2029 r = r600_mc_init(rdev); 2033 r = r600_mc_init(rdev);
2030 if (r) 2034 if (r)
2031 return r; 2035 return r;
@@ -2060,13 +2064,14 @@ int r600_init(struct radeon_device *rdev)
2060 if (rdev->accel_working) { 2064 if (rdev->accel_working) {
2061 r = radeon_ib_pool_init(rdev); 2065 r = radeon_ib_pool_init(rdev);
2062 if (r) { 2066 if (r) {
2063 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 2067 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2064 rdev->accel_working = false;
2065 }
2066 r = r600_ib_test(rdev);
2067 if (r) {
2068 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2069 rdev->accel_working = false; 2068 rdev->accel_working = false;
2069 } else {
2070 r = r600_ib_test(rdev);
2071 if (r) {
2072 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2073 rdev->accel_working = false;
2074 }
2070 } 2075 }
2071 } 2076 }
2072 2077
@@ -2197,14 +2202,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2197 rb_bufsz = drm_order(ring_size / 4); 2202 rb_bufsz = drm_order(ring_size / 4);
2198 ring_size = (1 << rb_bufsz) * 4; 2203 ring_size = (1 << rb_bufsz) * 4;
2199 rdev->ih.ring_size = ring_size; 2204 rdev->ih.ring_size = ring_size;
2200 rdev->ih.align_mask = 4 - 1; 2205 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2206 rdev->ih.rptr = 0;
2201} 2207}
2202 2208
2203static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) 2209static int r600_ih_ring_alloc(struct radeon_device *rdev)
2204{ 2210{
2205 int r; 2211 int r;
2206 2212
2207 rdev->ih.ring_size = ring_size;
2208 /* Allocate ring buffer */ 2213 /* Allocate ring buffer */
2209 if (rdev->ih.ring_obj == NULL) { 2214 if (rdev->ih.ring_obj == NULL) {
2210 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, 2215 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
@@ -2234,9 +2239,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
2234 return r; 2239 return r;
2235 } 2240 }
2236 } 2241 }
2237 rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
2238 rdev->ih.rptr = 0;
2239
2240 return 0; 2242 return 0;
2241} 2243}
2242 2244
@@ -2386,7 +2388,7 @@ int r600_irq_init(struct radeon_device *rdev)
2386 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 2388 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2387 2389
2388 /* allocate ring */ 2390 /* allocate ring */
2389 ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); 2391 ret = r600_ih_ring_alloc(rdev);
2390 if (ret) 2392 if (ret)
2391 return ret; 2393 return ret;
2392 2394
@@ -2449,10 +2451,15 @@ int r600_irq_init(struct radeon_device *rdev)
2449 return ret; 2451 return ret;
2450} 2452}
2451 2453
2452void r600_irq_fini(struct radeon_device *rdev) 2454void r600_irq_suspend(struct radeon_device *rdev)
2453{ 2455{
2454 r600_disable_interrupts(rdev); 2456 r600_disable_interrupts(rdev);
2455 r600_rlc_stop(rdev); 2457 r600_rlc_stop(rdev);
2458}
2459
2460void r600_irq_fini(struct radeon_device *rdev)
2461{
2462 r600_irq_suspend(rdev);
2456 r600_ih_ring_fini(rdev); 2463 r600_ih_ring_fini(rdev);
2457} 2464}
2458 2465
@@ -2467,8 +2474,12 @@ int r600_irq_set(struct radeon_device *rdev)
2467 return -EINVAL; 2474 return -EINVAL;
2468 } 2475 }
2469 /* don't enable anything if the ih is disabled */ 2476 /* don't enable anything if the ih is disabled */
2470 if (!rdev->ih.enabled) 2477 if (!rdev->ih.enabled) {
2478 r600_disable_interrupts(rdev);
2479 /* force the active interrupt state to all disabled */
2480 r600_disable_interrupt_state(rdev);
2471 return 0; 2481 return 0;
2482 }
2472 2483
2473 if (ASIC_IS_DCE3(rdev)) { 2484 if (ASIC_IS_DCE3(rdev)) {
2474 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 2485 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2638,16 +2649,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2638 wptr = RREG32(IH_RB_WPTR); 2649 wptr = RREG32(IH_RB_WPTR);
2639 2650
2640 if (wptr & RB_OVERFLOW) { 2651 if (wptr & RB_OVERFLOW) {
2641 WARN_ON(1); 2652 /* When a ring buffer overflow happen start parsing interrupt
2642 /* XXX deal with overflow */ 2653 * from the last not overwritten vector (wptr + 16). Hopefully
2643 DRM_ERROR("IH RB overflow\n"); 2654 * this should allow us to catchup.
2655 */
2656 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2657 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2658 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2644 tmp = RREG32(IH_RB_CNTL); 2659 tmp = RREG32(IH_RB_CNTL);
2645 tmp |= IH_WPTR_OVERFLOW_CLEAR; 2660 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2646 WREG32(IH_RB_CNTL, tmp); 2661 WREG32(IH_RB_CNTL, tmp);
2647 } 2662 }
2648 wptr = wptr & WPTR_OFFSET_MASK; 2663 return (wptr & rdev->ih.ptr_mask);
2649
2650 return wptr;
2651} 2664}
2652 2665
2653/* r600 IV Ring 2666/* r600 IV Ring
@@ -2683,12 +2696,13 @@ int r600_irq_process(struct radeon_device *rdev)
2683 u32 wptr = r600_get_ih_wptr(rdev); 2696 u32 wptr = r600_get_ih_wptr(rdev);
2684 u32 rptr = rdev->ih.rptr; 2697 u32 rptr = rdev->ih.rptr;
2685 u32 src_id, src_data; 2698 u32 src_id, src_data;
2686 u32 last_entry = rdev->ih.ring_size - 16;
2687 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; 2699 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2688 unsigned long flags; 2700 unsigned long flags;
2689 bool queue_hotplug = false; 2701 bool queue_hotplug = false;
2690 2702
2691 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2703 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2704 if (!rdev->ih.enabled)
2705 return IRQ_NONE;
2692 2706
2693 spin_lock_irqsave(&rdev->ih.lock, flags); 2707 spin_lock_irqsave(&rdev->ih.lock, flags);
2694 2708
@@ -2817,10 +2831,8 @@ restart_ih:
2817 } 2831 }
2818 2832
2819 /* wptr/rptr are in bytes! */ 2833 /* wptr/rptr are in bytes! */
2820 if (rptr == last_entry) 2834 rptr += 16;
2821 rptr = 0; 2835 rptr &= rdev->ih.ptr_mask;
2822 else
2823 rptr += 16;
2824 } 2836 }
2825 /* make sure wptr hasn't changed while processing */ 2837 /* make sure wptr hasn't changed while processing */
2826 wptr = r600_get_ih_wptr(rdev); 2838 wptr = r600_get_ih_wptr(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 8787ea89dc6e..2bedce477a97 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -512,14 +512,16 @@ void r600_blit_fini(struct radeon_device *rdev)
512{ 512{
513 int r; 513 int r;
514 514
515 if (rdev->r600_blit.shader_obj == NULL)
516 return;
517 /* If we can't reserve the bo, unref should be enough to destroy
518 * it when it becomes idle.
519 */
515 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 520 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
516 if (unlikely(r != 0)) { 521 if (!r) {
517 dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); 522 radeon_bo_unpin(rdev->r600_blit.shader_obj);
518 goto out_unref; 523 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
519 } 524 }
520 radeon_bo_unpin(rdev->r600_blit.shader_obj);
521 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
522out_unref:
523 radeon_bo_unref(&rdev->r600_blit.shader_obj); 525 radeon_bo_unref(&rdev->r600_blit.shader_obj);
524} 526}
525 527
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 44060b92d9e6..e4c45ec16507 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
38 38
39struct r600_cs_track {
40 u32 cb_color0_base_last;
41};
42
39/** 43/**
40 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 44 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
41 * @parser: parser structure holding parsing context. 45 * @parser: parser structure holding parsing context.
@@ -177,6 +181,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
177} 181}
178 182
179/** 183/**
184 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
185 * @parser: parser structure holding parsing context.
186 *
187 * Check next packet is relocation packet3, do bo validation and compute
188 * GPU offset using the provided start.
189 **/
190static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
191{
192 struct radeon_cs_packet p3reloc;
193 int r;
194
195 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
196 if (r) {
197 return 0;
198 }
199 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
200 return 0;
201 }
202 return 1;
203}
204
205/**
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet 206 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context. 207 * @parser: parser structure holding parsing context.
182 * 208 *
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
337 struct radeon_cs_packet *pkt) 363 struct radeon_cs_packet *pkt)
338{ 364{
339 struct radeon_cs_reloc *reloc; 365 struct radeon_cs_reloc *reloc;
366 struct r600_cs_track *track;
340 volatile u32 *ib; 367 volatile u32 *ib;
341 unsigned idx; 368 unsigned idx;
342 unsigned i; 369 unsigned i;
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
344 int r; 371 int r;
345 u32 idx_value; 372 u32 idx_value;
346 373
374 track = (struct r600_cs_track *)p->track;
347 ib = p->ib->ptr; 375 ib = p->ib->ptr;
348 idx = pkt->idx + 1; 376 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx); 377 idx_value = radeon_get_ib_value(p, idx);
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
503 for (i = 0; i < pkt->count; i++) { 531 for (i = 0; i < pkt->count; i++) {
504 reg = start_reg + (4 * i); 532 reg = start_reg + (4 * i);
505 switch (reg) { 533 switch (reg) {
534 /* This register were added late, there is userspace
535 * which does provide relocation for those but set
536 * 0 offset. In order to avoid breaking old userspace
537 * we detect this and set address to point to last
538 * CB_COLOR0_BASE, note that if userspace doesn't set
539 * CB_COLOR0_BASE before this register we will report
540 * error. Old userspace always set CB_COLOR0_BASE
541 * before any of this.
542 */
543 case R_0280E0_CB_COLOR0_FRAG:
544 case R_0280E4_CB_COLOR1_FRAG:
545 case R_0280E8_CB_COLOR2_FRAG:
546 case R_0280EC_CB_COLOR3_FRAG:
547 case R_0280F0_CB_COLOR4_FRAG:
548 case R_0280F4_CB_COLOR5_FRAG:
549 case R_0280F8_CB_COLOR6_FRAG:
550 case R_0280FC_CB_COLOR7_FRAG:
551 case R_0280C0_CB_COLOR0_TILE:
552 case R_0280C4_CB_COLOR1_TILE:
553 case R_0280C8_CB_COLOR2_TILE:
554 case R_0280CC_CB_COLOR3_TILE:
555 case R_0280D0_CB_COLOR4_TILE:
556 case R_0280D4_CB_COLOR5_TILE:
557 case R_0280D8_CB_COLOR6_TILE:
558 case R_0280DC_CB_COLOR7_TILE:
559 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
560 if (!track->cb_color0_base_last) {
561 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
562 return -EINVAL;
563 }
564 ib[idx+1+i] = track->cb_color0_base_last;
565 printk_once(KERN_WARNING "radeon: You have old & broken userspace "
566 "please consider updating mesa & xf86-video-ati\n");
567 } else {
568 r = r600_cs_packet_next_reloc(p, &reloc);
569 if (r) {
570 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
571 return -EINVAL;
572 }
573 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
574 }
575 break;
506 case DB_DEPTH_BASE: 576 case DB_DEPTH_BASE:
507 case DB_HTILE_DATA_BASE: 577 case DB_HTILE_DATA_BASE:
508 case CB_COLOR0_BASE: 578 case CB_COLOR0_BASE:
579 r = r600_cs_packet_next_reloc(p, &reloc);
580 if (r) {
581 DRM_ERROR("bad SET_CONTEXT_REG "
582 "0x%04X\n", reg);
583 return -EINVAL;
584 }
585 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
586 track->cb_color0_base_last = ib[idx+1+i];
587 break;
509 case CB_COLOR1_BASE: 588 case CB_COLOR1_BASE:
510 case CB_COLOR2_BASE: 589 case CB_COLOR2_BASE:
511 case CB_COLOR3_BASE: 590 case CB_COLOR3_BASE:
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
678int r600_cs_parse(struct radeon_cs_parser *p) 757int r600_cs_parse(struct radeon_cs_parser *p)
679{ 758{
680 struct radeon_cs_packet pkt; 759 struct radeon_cs_packet pkt;
760 struct r600_cs_track *track;
681 int r; 761 int r;
682 762
763 track = kzalloc(sizeof(*track), GFP_KERNEL);
764 p->track = track;
683 do { 765 do {
684 r = r600_cs_packet_parse(p, &pkt, p->idx); 766 r = r600_cs_packet_parse(p, &pkt, p->idx);
685 if (r) { 767 if (r) {
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
757 /* initialize parser */ 839 /* initialize parser */
758 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 840 memset(&parser, 0, sizeof(struct radeon_cs_parser));
759 parser.filp = filp; 841 parser.filp = filp;
842 parser.dev = &dev->pdev->dev;
760 parser.rdev = NULL; 843 parser.rdev = NULL;
761 parser.family = family; 844 parser.family = family;
762 parser.ib = &fake_ib; 845 parser.ib = &fake_ib;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 05894edadab4..30480881aed1 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -882,4 +882,29 @@
882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) 882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
883 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
885
886#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
887#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
888#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
889#define C_0280E0_BASE_256B 0x00000000
890#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
891#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
892#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
893#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
894#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
895#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
896#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
897#define R_0280C0_CB_COLOR0_TILE 0x0280C0
898#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
899#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
900#define C_0280C0_BASE_256B 0x00000000
901#define R_0280C4_CB_COLOR1_TILE 0x0280C4
902#define R_0280C8_CB_COLOR2_TILE 0x0280C8
903#define R_0280CC_CB_COLOR3_TILE 0x0280CC
904#define R_0280D0_CB_COLOR4_TILE 0x0280D0
905#define R_0280D4_CB_COLOR5_TILE 0x0280D4
906#define R_0280D8_CB_COLOR6_TILE 0x0280D8
907#define R_0280DC_CB_COLOR7_TILE 0x0280DC
908
909
885#endif 910#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index eb5f99b9469d..f7df1a7e4413 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -410,7 +410,6 @@ struct r600_ih {
410 unsigned wptr_old; 410 unsigned wptr_old;
411 unsigned ring_size; 411 unsigned ring_size;
412 uint64_t gpu_addr; 412 uint64_t gpu_addr;
413 uint32_t align_mask;
414 uint32_t ptr_mask; 413 uint32_t ptr_mask;
415 spinlock_t lock; 414 spinlock_t lock;
416 bool enabled; 415 bool enabled;
@@ -465,6 +464,7 @@ struct radeon_cs_chunk {
465}; 464};
466 465
467struct radeon_cs_parser { 466struct radeon_cs_parser {
467 struct device *dev;
468 struct radeon_device *rdev; 468 struct radeon_device *rdev;
469 struct drm_file *filp; 469 struct drm_file *filp;
470 /* chunks */ 470 /* chunks */
@@ -847,7 +847,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
847 847
848static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 848static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
849{ 849{
850 if (reg < 0x10000) 850 if (reg < rdev->rmmio_size)
851 return readl(((void __iomem *)rdev->rmmio) + reg); 851 return readl(((void __iomem *)rdev->rmmio) + reg);
852 else { 852 else {
853 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 853 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -857,7 +857,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
857 857
858static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 858static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
859{ 859{
860 if (reg < 0x10000) 860 if (reg < rdev->rmmio_size)
861 writel(v, ((void __iomem *)rdev->rmmio) + reg); 861 writel(v, ((void __iomem *)rdev->rmmio) + reg);
862 else { 862 else {
863 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 863 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -1017,6 +1017,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1017#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) 1017#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
1018 1018
1019/* Common functions */ 1019/* Common functions */
1020/* AGP */
1021extern void radeon_agp_disable(struct radeon_device *rdev);
1020extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1022extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1021extern int radeon_modeset_init(struct radeon_device *rdev); 1023extern int radeon_modeset_init(struct radeon_device *rdev);
1022extern void radeon_modeset_fini(struct radeon_device *rdev); 1024extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1160,7 +1162,8 @@ extern int r600_irq_init(struct radeon_device *rdev);
1160extern void r600_irq_fini(struct radeon_device *rdev); 1162extern void r600_irq_fini(struct radeon_device *rdev);
1161extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1163extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1162extern int r600_irq_set(struct radeon_device *rdev); 1164extern int r600_irq_set(struct radeon_device *rdev);
1163 1165extern void r600_irq_suspend(struct radeon_device *rdev);
1166/* r600 audio */
1164extern int r600_audio_init(struct radeon_device *rdev); 1167extern int r600_audio_init(struct radeon_device *rdev);
1165extern int r600_audio_tmds_index(struct drm_encoder *encoder); 1168extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1166extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 1169extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 220f454ea9fa..c9ad7f5cc1ac 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -133,6 +133,13 @@ int radeon_agp_init(struct radeon_device *rdev)
133 bool is_v3; 133 bool is_v3;
134 int ret; 134 int ret;
135 135
136 if (rdev->ddev->agp->agp_info.aper_size < 32) {
137 dev_warn(rdev->dev, "AGP aperture to small (%dM) "
138 "need at least 32M, disabling AGP\n",
139 rdev->ddev->agp->agp_info.aper_size);
140 return -EINVAL;
141 }
142
136 /* Acquire AGP. */ 143 /* Acquire AGP. */
137 if (!rdev->ddev->agp->acquired) { 144 if (!rdev->ddev->agp->acquired) {
138 ret = drm_agp_acquire(rdev->ddev); 145 ret = drm_agp_acquire(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 812f24dbc2a8..73c4405bf42f 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
56 else if (post_div == 3) 56 else if (post_div == 3)
57 sclk >>= 2; 57 sclk >>= 2;
58 else if (post_div == 4) 58 else if (post_div == 4)
59 sclk >>= 4; 59 sclk >>= 3;
60 60
61 return sclk; 61 return sclk;
62} 62}
@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
86 else if (post_div == 3) 86 else if (post_div == 3)
87 mclk >>= 2; 87 mclk >>= 2;
88 else if (post_div == 4) 88 else if (post_div == 4)
89 mclk >>= 4; 89 mclk >>= 3;
90 90
91 return mclk; 91 return mclk;
92} 92}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 65590a0f1d93..1496cb8658ef 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -231,6 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
231 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 231 memset(&parser, 0, sizeof(struct radeon_cs_parser));
232 parser.filp = filp; 232 parser.filp = filp;
233 parser.rdev = rdev; 233 parser.rdev = rdev;
234 parser.dev = rdev->dev;
234 r = radeon_cs_parser_init(&parser, data); 235 r = radeon_cs_parser_init(&parser, data);
235 if (r) { 236 if (r) {
236 DRM_ERROR("Failed to initialize parser !\n"); 237 DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0c51f8e46613..768b1509fa03 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -544,6 +544,7 @@ void radeon_agp_disable(struct radeon_device *rdev)
544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 544 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
545 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 545 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
546 } 546 }
547 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
547} 548}
548 549
549void radeon_check_arguments(struct radeon_device *rdev) 550void radeon_check_arguments(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0ec491ead2ff..6a92f994cc26 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -357,7 +357,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
357 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 357 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
358 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 358 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
359 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 359 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
360 if (dig->dp_i2c_bus) 360 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
361 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
361 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 362 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
362 } 363 }
363 if (!radeon_connector->ddc_bus) 364 if (!radeon_connector->ddc_bus)
@@ -410,11 +411,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
410 uint32_t *fb_div_p, 411 uint32_t *fb_div_p,
411 uint32_t *frac_fb_div_p, 412 uint32_t *frac_fb_div_p,
412 uint32_t *ref_div_p, 413 uint32_t *ref_div_p,
413 uint32_t *post_div_p, 414 uint32_t *post_div_p)
414 int flags)
415{ 415{
416 uint32_t min_ref_div = pll->min_ref_div; 416 uint32_t min_ref_div = pll->min_ref_div;
417 uint32_t max_ref_div = pll->max_ref_div; 417 uint32_t max_ref_div = pll->max_ref_div;
418 uint32_t min_post_div = pll->min_post_div;
419 uint32_t max_post_div = pll->max_post_div;
418 uint32_t min_fractional_feed_div = 0; 420 uint32_t min_fractional_feed_div = 0;
419 uint32_t max_fractional_feed_div = 0; 421 uint32_t max_fractional_feed_div = 0;
420 uint32_t best_vco = pll->best_vco; 422 uint32_t best_vco = pll->best_vco;
@@ -430,7 +432,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
430 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 432 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
431 freq = freq * 1000; 433 freq = freq * 1000;
432 434
433 if (flags & RADEON_PLL_USE_REF_DIV) 435 if (pll->flags & RADEON_PLL_USE_REF_DIV)
434 min_ref_div = max_ref_div = pll->reference_div; 436 min_ref_div = max_ref_div = pll->reference_div;
435 else { 437 else {
436 while (min_ref_div < max_ref_div-1) { 438 while (min_ref_div < max_ref_div-1) {
@@ -445,19 +447,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
445 } 447 }
446 } 448 }
447 449
448 if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { 450 if (pll->flags & RADEON_PLL_USE_POST_DIV)
451 min_post_div = max_post_div = pll->post_div;
452
453 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
449 min_fractional_feed_div = pll->min_frac_feedback_div; 454 min_fractional_feed_div = pll->min_frac_feedback_div;
450 max_fractional_feed_div = pll->max_frac_feedback_div; 455 max_fractional_feed_div = pll->max_frac_feedback_div;
451 } 456 }
452 457
453 for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { 458 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
454 uint32_t ref_div; 459 uint32_t ref_div;
455 460
456 if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 461 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
457 continue; 462 continue;
458 463
459 /* legacy radeons only have a few post_divs */ 464 /* legacy radeons only have a few post_divs */
460 if (flags & RADEON_PLL_LEGACY) { 465 if (pll->flags & RADEON_PLL_LEGACY) {
461 if ((post_div == 5) || 466 if ((post_div == 5) ||
462 (post_div == 7) || 467 (post_div == 7) ||
463 (post_div == 9) || 468 (post_div == 9) ||
@@ -504,7 +509,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
504 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 509 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
505 current_freq = radeon_div(tmp, ref_div * post_div); 510 current_freq = radeon_div(tmp, ref_div * post_div);
506 511
507 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { 512 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
508 error = freq - current_freq; 513 error = freq - current_freq;
509 error = error < 0 ? 0xffffffff : error; 514 error = error < 0 ? 0xffffffff : error;
510 } else 515 } else
@@ -531,12 +536,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
531 best_freq = current_freq; 536 best_freq = current_freq;
532 best_error = error; 537 best_error = error;
533 best_vco_diff = vco_diff; 538 best_vco_diff = vco_diff;
534 } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || 539 } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
535 ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || 540 ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
536 ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || 541 ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
537 ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || 542 ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
538 ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || 543 ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
539 ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { 544 ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
540 best_post_div = post_div; 545 best_post_div = post_div;
541 best_ref_div = ref_div; 546 best_ref_div = ref_div;
542 best_feedback_div = feedback_div; 547 best_feedback_div = feedback_div;
@@ -572,8 +577,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
572 uint32_t *fb_div_p, 577 uint32_t *fb_div_p,
573 uint32_t *frac_fb_div_p, 578 uint32_t *frac_fb_div_p,
574 uint32_t *ref_div_p, 579 uint32_t *ref_div_p,
575 uint32_t *post_div_p, 580 uint32_t *post_div_p)
576 int flags)
577{ 581{
578 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 582 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
579 fixed20_12 pll_out_max, pll_out_min; 583 fixed20_12 pll_out_max, pll_out_min;
@@ -667,7 +671,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
667 radeonfb_remove(dev, fb); 671 radeonfb_remove(dev, fb);
668 672
669 if (radeon_fb->obj) { 673 if (radeon_fb->obj) {
670 radeon_gem_object_unpin(radeon_fb->obj);
671 mutex_lock(&dev->struct_mutex); 674 mutex_lock(&dev->struct_mutex);
672 drm_gem_object_unreference(radeon_fb->obj); 675 drm_gem_object_unreference(radeon_fb->obj);
673 mutex_unlock(&dev->struct_mutex); 676 mutex_unlock(&dev->struct_mutex);
@@ -715,7 +718,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
715 struct drm_gem_object *obj; 718 struct drm_gem_object *obj;
716 719
717 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); 720 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
718 721 if (obj == NULL) {
722 dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
723 "can't create framebuffer\n", mode_cmd->handle);
724 return NULL;
725 }
719 return radeon_framebuffer_create(dev, mode_cmd, obj); 726 return radeon_framebuffer_create(dev, mode_cmd, obj);
720} 727}
721 728
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cc27485a07ad..b6d8081e1246 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
339 } 339 }
340} 340}
341 341
342/* properly set crtc bpp when using atombios */
343void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
344{
345 struct drm_device *dev = crtc->dev;
346 struct radeon_device *rdev = dev->dev_private;
347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
348 int format;
349 uint32_t crtc_gen_cntl;
350 uint32_t disp_merge_cntl;
351 uint32_t crtc_pitch;
352
353 switch (crtc->fb->bits_per_pixel) {
354 case 8:
355 format = 2;
356 break;
357 case 15: /* 555 */
358 format = 3;
359 break;
360 case 16: /* 565 */
361 format = 4;
362 break;
363 case 24: /* RGB */
364 format = 5;
365 break;
366 case 32: /* xRGB */
367 format = 6;
368 break;
369 default:
370 return;
371 }
372
373 crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
374 ((crtc->fb->bits_per_pixel * 8) - 1)) /
375 (crtc->fb->bits_per_pixel * 8));
376 crtc_pitch |= crtc_pitch << 16;
377
378 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
379
380 switch (radeon_crtc->crtc_id) {
381 case 0:
382 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
383 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
384 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
385
386 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
387 crtc_gen_cntl |= (format << 8);
388 crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
389 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
390 break;
391 case 1:
392 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
393 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
394 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
395
396 crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
397 crtc_gen_cntl |= (format << 8);
398 WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
399 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
400 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
401 break;
402 }
403}
404
405int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 342int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
406 struct drm_framebuffer *old_fb) 343 struct drm_framebuffer *old_fb)
407{ 344{
@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
755 uint32_t post_divider = 0; 692 uint32_t post_divider = 0;
756 uint32_t freq = 0; 693 uint32_t freq = 0;
757 uint8_t pll_gain; 694 uint8_t pll_gain;
758 int pll_flags = RADEON_PLL_LEGACY;
759 bool use_bios_divs = false; 695 bool use_bios_divs = false;
760 /* PLL registers */ 696 /* PLL registers */
761 uint32_t pll_ref_div = 0; 697 uint32_t pll_ref_div = 0;
@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
789 else 725 else
790 pll = &rdev->clock.p1pll; 726 pll = &rdev->clock.p1pll;
791 727
728 pll->flags = RADEON_PLL_LEGACY;
729
792 if (mode->clock > 200000) /* range limits??? */ 730 if (mode->clock > 200000) /* range limits??? */
793 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 731 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
794 else 732 else
795 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 733 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
796 734
797 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 735 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
798 if (encoder->crtc == crtc) { 736 if (encoder->crtc == crtc) {
@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
804 } 742 }
805 743
806 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 744 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
807 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 745 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
808 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 746 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
809 if (!rdev->is_atom_bios) { 747 if (!rdev->is_atom_bios) {
810 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 748 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
819 } 757 }
820 } 758 }
821 } 759 }
822 pll_flags |= RADEON_PLL_USE_REF_DIV; 760 pll->flags |= RADEON_PLL_USE_REF_DIV;
823 } 761 }
824 } 762 }
825 } 763 }
@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
829 if (!use_bios_divs) { 767 if (!use_bios_divs) {
830 radeon_compute_pll(pll, mode->clock, 768 radeon_compute_pll(pll, mode->clock,
831 &freq, &feedback_div, &frac_fb_div, 769 &freq, &feedback_div, &frac_fb_div,
832 &reference_div, &post_divider, 770 &reference_div, &post_divider);
833 pll_flags);
834 771
835 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 772 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
836 if (post_div->divider == post_divider) 773 if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 91cb041cb40d..96b851f92f4c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -125,16 +125,24 @@ struct radeon_tmds_pll {
125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) 125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
128#define RADEON_PLL_USE_POST_DIV (1 << 12)
128 129
129struct radeon_pll { 130struct radeon_pll {
130 uint16_t reference_freq; 131 /* reference frequency */
131 uint16_t reference_div; 132 uint32_t reference_freq;
133
134 /* fixed dividers */
135 uint32_t reference_div;
136 uint32_t post_div;
137
138 /* pll in/out limits */
132 uint32_t pll_in_min; 139 uint32_t pll_in_min;
133 uint32_t pll_in_max; 140 uint32_t pll_in_max;
134 uint32_t pll_out_min; 141 uint32_t pll_out_min;
135 uint32_t pll_out_max; 142 uint32_t pll_out_max;
136 uint16_t xclk; 143 uint32_t best_vco;
137 144
145 /* divider limits */
138 uint32_t min_ref_div; 146 uint32_t min_ref_div;
139 uint32_t max_ref_div; 147 uint32_t max_ref_div;
140 uint32_t min_post_div; 148 uint32_t min_post_div;
@@ -143,7 +151,12 @@ struct radeon_pll {
143 uint32_t max_feedback_div; 151 uint32_t max_feedback_div;
144 uint32_t min_frac_feedback_div; 152 uint32_t min_frac_feedback_div;
145 uint32_t max_frac_feedback_div; 153 uint32_t max_frac_feedback_div;
146 uint32_t best_vco; 154
155 /* flags for the current clock */
156 uint32_t flags;
157
158 /* pll id */
159 uint32_t id;
147}; 160};
148 161
149struct radeon_i2c_chan { 162struct radeon_i2c_chan {
@@ -417,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
417 uint32_t *fb_div_p, 430 uint32_t *fb_div_p,
418 uint32_t *frac_fb_div_p, 431 uint32_t *frac_fb_div_p,
419 uint32_t *ref_div_p, 432 uint32_t *ref_div_p,
420 uint32_t *post_div_p, 433 uint32_t *post_div_p);
421 int flags);
422 434
423extern void radeon_compute_pll_avivo(struct radeon_pll *pll, 435extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
424 uint64_t freq, 436 uint64_t freq,
@@ -426,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
426 uint32_t *fb_div_p, 438 uint32_t *fb_div_p,
427 uint32_t *frac_fb_div_p, 439 uint32_t *frac_fb_div_p,
428 uint32_t *ref_div_p, 440 uint32_t *ref_div_p,
429 uint32_t *post_div_p, 441 uint32_t *post_div_p);
430 int flags);
431 442
432extern void radeon_setup_encoder_clones(struct drm_device *dev); 443extern void radeon_setup_encoder_clones(struct drm_device *dev);
433 444
@@ -453,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
453 464
454extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, 465extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
455 struct drm_framebuffer *old_fb); 466 struct drm_framebuffer *old_fb);
456extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
457 467
458extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, 468extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
459 struct drm_file *file_priv, 469 struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 4e636de877b2..d72a71bff218 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -220,7 +220,8 @@ int radeon_bo_unpin(struct radeon_bo *bo)
220 220
221int radeon_bo_evict_vram(struct radeon_device *rdev) 221int radeon_bo_evict_vram(struct radeon_device *rdev)
222{ 222{
223 if (rdev->flags & RADEON_IS_IGP) { 223 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
224 if (0 && (rdev->flags & RADEON_IS_IGP)) {
224 if (rdev->mc.igp_sideport_enabled == false) 225 if (rdev->mc.igp_sideport_enabled == false)
225 /* Useless to evict on IGP chips */ 226 /* Useless to evict on IGP chips */
226 return 0; 227 return 0;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
index 6021c8849a16..c29ac434ac9c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r200
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -91,6 +91,8 @@ r200 0x3294
910x22b8 SE_TCL_TEX_CYL_WRAP_CTL 910x22b8 SE_TCL_TEX_CYL_WRAP_CTL
920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL 920x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
930x22c4 SE_TCL_POINT_SPRITE_CNTL 930x22c4 SE_TCL_POINT_SPRITE_CNTL
940x22d0 SE_PVS_CNTL
950x22d4 SE_PVS_CONST_CNTL
940x2648 RE_POINTSIZE 960x2648 RE_POINTSIZE
950x26c0 RE_TOP_LEFT 970x26c0 RE_TOP_LEFT
960x26c4 RE_MISC 980x26c4 RE_MISC
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 59c71245fb91..55f6ffc4e58b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -779,7 +779,6 @@ int rv770_mc_init(struct radeon_device *rdev)
779 fixed20_12 a; 779 fixed20_12 a;
780 u32 tmp; 780 u32 tmp;
781 int chansize, numchan; 781 int chansize, numchan;
782 int r;
783 782
784 /* Get VRAM informations */ 783 /* Get VRAM informations */
785 rdev->mc.vram_is_ddr = true; 784 rdev->mc.vram_is_ddr = true;
@@ -822,9 +821,6 @@ int rv770_mc_init(struct radeon_device *rdev)
822 rdev->mc.real_vram_size = rdev->mc.aper_size; 821 rdev->mc.real_vram_size = rdev->mc.aper_size;
823 822
824 if (rdev->flags & RADEON_IS_AGP) { 823 if (rdev->flags & RADEON_IS_AGP) {
825 r = radeon_agp_init(rdev);
826 if (r)
827 return r;
828 /* gtt_size is setup by radeon_agp_init */ 824 /* gtt_size is setup by radeon_agp_init */
829 rdev->mc.gtt_location = rdev->mc.agp_base; 825 rdev->mc.gtt_location = rdev->mc.agp_base;
830 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; 826 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -972,13 +968,16 @@ int rv770_suspend(struct radeon_device *rdev)
972 /* FIXME: we should wait for ring to be empty */ 968 /* FIXME: we should wait for ring to be empty */
973 r700_cp_stop(rdev); 969 r700_cp_stop(rdev);
974 rdev->cp.ready = false; 970 rdev->cp.ready = false;
971 r600_irq_suspend(rdev);
975 r600_wb_disable(rdev); 972 r600_wb_disable(rdev);
976 rv770_pcie_gart_disable(rdev); 973 rv770_pcie_gart_disable(rdev);
977 /* unpin shaders bo */ 974 /* unpin shaders bo */
978 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 975 if (rdev->r600_blit.shader_obj) {
979 if (likely(r == 0)) { 976 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
980 radeon_bo_unpin(rdev->r600_blit.shader_obj); 977 if (likely(r == 0)) {
981 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 978 radeon_bo_unpin(rdev->r600_blit.shader_obj);
979 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
980 }
982 } 981 }
983 return 0; 982 return 0;
984} 983}
@@ -1037,6 +1036,11 @@ int rv770_init(struct radeon_device *rdev)
1037 r = radeon_fence_driver_init(rdev); 1036 r = radeon_fence_driver_init(rdev);
1038 if (r) 1037 if (r)
1039 return r; 1038 return r;
1039 if (rdev->flags & RADEON_IS_AGP) {
1040 r = radeon_agp_init(rdev);
1041 if (r)
1042 radeon_agp_disable(rdev);
1043 }
1040 r = rv770_mc_init(rdev); 1044 r = rv770_mc_init(rdev);
1041 if (r) 1045 if (r)
1042 return r; 1046 return r;
@@ -1071,13 +1075,14 @@ int rv770_init(struct radeon_device *rdev)
1071 if (rdev->accel_working) { 1075 if (rdev->accel_working) {
1072 r = radeon_ib_pool_init(rdev); 1076 r = radeon_ib_pool_init(rdev);
1073 if (r) { 1077 if (r) {
1074 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1078 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1075 rdev->accel_working = false;
1076 }
1077 r = r600_ib_test(rdev);
1078 if (r) {
1079 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1080 rdev->accel_working = false; 1079 rdev->accel_working = false;
1080 } else {
1081 r = r600_ib_test(rdev);
1082 if (r) {
1083 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1084 rdev->accel_working = false;
1085 }
1081 } 1086 }
1082 } 1087 }
1083 return 0; 1088 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2920f9a279e1..1a3e909b7bba 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -426,7 +426,8 @@ moved:
426 bdev->man[bo->mem.mem_type].gpu_offset; 426 bdev->man[bo->mem.mem_type].gpu_offset;
427 bo->cur_placement = bo->mem.placement; 427 bo->cur_placement = bo->mem.placement;
428 spin_unlock(&bo->lock); 428 spin_unlock(&bo->lock);
429 } 429 } else
430 bo->offset = 0;
430 431
431 return 0; 432 return 0;
432 433
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
523static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 524static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
524{ 525{
525 struct ttm_bo_global *glob = bdev->glob; 526 struct ttm_bo_global *glob = bdev->glob;
526 struct ttm_buffer_object *entry, *nentry; 527 struct ttm_buffer_object *entry = NULL;
527 struct list_head *list, *next; 528 int ret = 0;
528 int ret;
529 529
530 spin_lock(&glob->lru_lock); 530 spin_lock(&glob->lru_lock);
531 list_for_each_safe(list, next, &bdev->ddestroy) { 531 if (list_empty(&bdev->ddestroy))
532 entry = list_entry(list, struct ttm_buffer_object, ddestroy); 532 goto out_unlock;
533 nentry = NULL;
534 533
535 /* 534 entry = list_first_entry(&bdev->ddestroy,
536 * Protect the next list entry from destruction while we 535 struct ttm_buffer_object, ddestroy);
537 * unlock the lru_lock. 536 kref_get(&entry->list_kref);
538 */
539 537
540 if (next != &bdev->ddestroy) { 538 for (;;) {
541 nentry = list_entry(next, struct ttm_buffer_object, 539 struct ttm_buffer_object *nentry = NULL;
542 ddestroy); 540
541 if (entry->ddestroy.next != &bdev->ddestroy) {
542 nentry = list_first_entry(&entry->ddestroy,
543 struct ttm_buffer_object, ddestroy);
543 kref_get(&nentry->list_kref); 544 kref_get(&nentry->list_kref);
544 } 545 }
545 kref_get(&entry->list_kref);
546 546
547 spin_unlock(&glob->lru_lock); 547 spin_unlock(&glob->lru_lock);
548 ret = ttm_bo_cleanup_refs(entry, remove_all); 548 ret = ttm_bo_cleanup_refs(entry, remove_all);
549 kref_put(&entry->list_kref, ttm_bo_release_list); 549 kref_put(&entry->list_kref, ttm_bo_release_list);
550 entry = nentry;
551
552 if (ret || !entry)
553 goto out;
550 554
551 spin_lock(&glob->lru_lock); 555 spin_lock(&glob->lru_lock);
552 if (nentry) { 556 if (list_empty(&entry->ddestroy))
553 bool next_onlist = !list_empty(next);
554 spin_unlock(&glob->lru_lock);
555 kref_put(&nentry->list_kref, ttm_bo_release_list);
556 spin_lock(&glob->lru_lock);
557 /*
558 * Someone might have raced us and removed the
559 * next entry from the list. We don't bother restarting
560 * list traversal.
561 */
562
563 if (!next_onlist)
564 break;
565 }
566 if (ret)
567 break; 557 break;
568 } 558 }
569 ret = !list_empty(&bdev->ddestroy);
570 spin_unlock(&glob->lru_lock);
571 559
560out_unlock:
561 spin_unlock(&glob->lru_lock);
562out:
563 if (entry)
564 kref_put(&entry->list_kref, ttm_bo_release_list);
572 return ret; 565 return ret;
573} 566}
574 567
@@ -950,6 +943,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
950 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 943 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
951 ~TTM_PL_MASK_MEMTYPE); 944 ~TTM_PL_MASK_MEMTYPE);
952 945
946
947 if (mem_type == TTM_PL_SYSTEM) {
948 mem->mem_type = mem_type;
949 mem->placement = cur_flags;
950 mem->mm_node = NULL;
951 return 0;
952 }
953
953 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
954 interruptible, no_wait); 955 interruptible, no_wait);
955 if (ret == 0 && mem->mm_node) { 956 if (ret == 0 && mem->mm_node) {
@@ -1844,6 +1845,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1844 * anyone tries to access a ttm page. 1845 * anyone tries to access a ttm page.
1845 */ 1846 */
1846 1847
1848 if (bo->bdev->driver->swap_notify)
1849 bo->bdev->driver->swap_notify(bo);
1850
1847 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); 1851 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1848out: 1852out:
1849 1853
@@ -1864,3 +1868,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1864 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1868 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1865 ; 1869 ;
1866} 1870}
1871EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index f619ebcaa4ec..3d172ef04ee1 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
288 wake_up_all(&lock->queue); 288 wake_up_all(&lock->queue);
289 spin_unlock(&lock->lock); 289 spin_unlock(&lock->lock);
290} 290}
291EXPORT_SYMBOL(ttm_suspend_unlock);
291 292
292static bool __ttm_suspend_lock(struct ttm_lock *lock) 293static bool __ttm_suspend_lock(struct ttm_lock *lock)
293{ 294{
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock)
309{ 310{
310 wait_event(lock->queue, __ttm_suspend_lock(lock)); 311 wait_event(lock->queue, __ttm_suspend_lock(lock));
311} 312}
313EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index d6f2d2b882e9..825ebe3d89d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -48,6 +48,15 @@ struct ttm_placement vmw_vram_placement = {
48 .busy_placement = &vram_placement_flags 48 .busy_placement = &vram_placement_flags
49}; 49};
50 50
51struct ttm_placement vmw_vram_sys_placement = {
52 .fpfn = 0,
53 .lpfn = 0,
54 .num_placement = 1,
55 .placement = &vram_placement_flags,
56 .num_busy_placement = 1,
57 .busy_placement = &sys_placement_flags
58};
59
51struct ttm_placement vmw_vram_ne_placement = { 60struct ttm_placement vmw_vram_ne_placement = {
52 .fpfn = 0, 61 .fpfn = 0,
53 .lpfn = 0, 62 .lpfn = 0,
@@ -172,6 +181,18 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
172 return 0; 181 return 0;
173} 182}
174 183
184static void vmw_move_notify(struct ttm_buffer_object *bo,
185 struct ttm_mem_reg *new_mem)
186{
187 if (new_mem->mem_type != TTM_PL_SYSTEM)
188 vmw_dmabuf_gmr_unbind(bo);
189}
190
191static void vmw_swap_notify(struct ttm_buffer_object *bo)
192{
193 vmw_dmabuf_gmr_unbind(bo);
194}
195
175/** 196/**
176 * FIXME: We're using the old vmware polling method to sync. 197 * FIXME: We're using the old vmware polling method to sync.
177 * Do this with fences instead. 198 * Do this with fences instead.
@@ -225,5 +246,7 @@ struct ttm_bo_driver vmw_bo_driver = {
225 .sync_obj_wait = vmw_sync_obj_wait, 246 .sync_obj_wait = vmw_sync_obj_wait,
226 .sync_obj_flush = vmw_sync_obj_flush, 247 .sync_obj_flush = vmw_sync_obj_flush,
227 .sync_obj_unref = vmw_sync_obj_unref, 248 .sync_obj_unref = vmw_sync_obj_unref,
228 .sync_obj_ref = vmw_sync_obj_ref 249 .sync_obj_ref = vmw_sync_obj_ref,
250 .move_notify = vmw_move_notify,
251 .swap_notify = vmw_swap_notify
229}; 252};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1db1ef30be2b..dedd121d8fe7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -147,6 +147,8 @@ static char *vmw_devname = "vmwgfx";
147 147
148static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 148static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
149static void vmw_master_init(struct vmw_master *); 149static void vmw_master_init(struct vmw_master *);
150static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
151 void *ptr);
150 152
151static void vmw_print_capabilities(uint32_t capabilities) 153static void vmw_print_capabilities(uint32_t capabilities)
152{ 154{
@@ -217,6 +219,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
217 219
218 dev_priv->dev = dev; 220 dev_priv->dev = dev;
219 dev_priv->vmw_chipset = chipset; 221 dev_priv->vmw_chipset = chipset;
222 dev_priv->last_read_sequence = (uint32_t) -100;
220 mutex_init(&dev_priv->hw_mutex); 223 mutex_init(&dev_priv->hw_mutex);
221 mutex_init(&dev_priv->cmdbuf_mutex); 224 mutex_init(&dev_priv->cmdbuf_mutex);
222 rwlock_init(&dev_priv->resource_lock); 225 rwlock_init(&dev_priv->resource_lock);
@@ -351,6 +354,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
351 vmw_fb_init(dev_priv); 354 vmw_fb_init(dev_priv);
352 } 355 }
353 356
357 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
358 register_pm_notifier(&dev_priv->pm_nb);
359
354 return 0; 360 return 0;
355 361
356out_no_device: 362out_no_device:
@@ -385,6 +391,8 @@ static int vmw_driver_unload(struct drm_device *dev)
385 391
386 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); 392 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
387 393
394 unregister_pm_notifier(&dev_priv->pm_nb);
395
388 if (!dev_priv->stealth) { 396 if (!dev_priv->stealth) {
389 vmw_fb_close(dev_priv); 397 vmw_fb_close(dev_priv);
390 vmw_kms_close(dev_priv); 398 vmw_kms_close(dev_priv);
@@ -650,6 +658,57 @@ static void vmw_remove(struct pci_dev *pdev)
650 drm_put_dev(dev); 658 drm_put_dev(dev);
651} 659}
652 660
661static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
662 void *ptr)
663{
664 struct vmw_private *dev_priv =
665 container_of(nb, struct vmw_private, pm_nb);
666 struct vmw_master *vmaster = dev_priv->active_master;
667
668 switch (val) {
669 case PM_HIBERNATION_PREPARE:
670 case PM_SUSPEND_PREPARE:
671 ttm_suspend_lock(&vmaster->lock);
672
673 /**
674 * This empties VRAM and unbinds all GMR bindings.
675 * Buffer contents is moved to swappable memory.
676 */
677 ttm_bo_swapout_all(&dev_priv->bdev);
678 break;
679 case PM_POST_HIBERNATION:
680 case PM_POST_SUSPEND:
681 ttm_suspend_unlock(&vmaster->lock);
682 break;
683 case PM_RESTORE_PREPARE:
684 break;
685 case PM_POST_RESTORE:
686 break;
687 default:
688 break;
689 }
690 return 0;
691}
692
693/**
694 * These might not be needed with the virtual SVGA device.
695 */
696
697int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
698{
699 pci_save_state(pdev);
700 pci_disable_device(pdev);
701 pci_set_power_state(pdev, PCI_D3hot);
702 return 0;
703}
704
705int vmw_pci_resume(struct pci_dev *pdev)
706{
707 pci_set_power_state(pdev, PCI_D0);
708 pci_restore_state(pdev);
709 return pci_enable_device(pdev);
710}
711
653static struct drm_driver driver = { 712static struct drm_driver driver = {
654 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 713 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
655 DRIVER_MODESET, 714 DRIVER_MODESET,
@@ -689,7 +748,9 @@ static struct drm_driver driver = {
689 .name = VMWGFX_DRIVER_NAME, 748 .name = VMWGFX_DRIVER_NAME,
690 .id_table = vmw_pci_id_list, 749 .id_table = vmw_pci_id_list,
691 .probe = vmw_probe, 750 .probe = vmw_probe,
692 .remove = vmw_remove 751 .remove = vmw_remove,
752 .suspend = vmw_pci_suspend,
753 .resume = vmw_pci_resume
693 }, 754 },
694 .name = VMWGFX_DRIVER_NAME, 755 .name = VMWGFX_DRIVER_NAME,
695 .desc = VMWGFX_DRIVER_DESC, 756 .desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e61bd85b6975..50529a7f06fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,6 +32,7 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "vmwgfx_drm.h" 33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h" 34#include "drm_hashtab.h"
35#include "linux/suspend.h"
35#include "ttm/ttm_bo_driver.h" 36#include "ttm/ttm_bo_driver.h"
36#include "ttm/ttm_object.h" 37#include "ttm/ttm_object.h"
37#include "ttm/ttm_lock.h" 38#include "ttm/ttm_lock.h"
@@ -258,6 +259,7 @@ struct vmw_private {
258 259
259 struct vmw_master *active_master; 260 struct vmw_master *active_master;
260 struct vmw_master fbdev_master; 261 struct vmw_master fbdev_master;
262 struct notifier_block pm_nb;
261}; 263};
262 264
263static inline struct vmw_private *vmw_priv(struct drm_device *dev) 265static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -353,6 +355,7 @@ extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
353 struct vmw_dma_buffer *bo); 355 struct vmw_dma_buffer *bo);
354extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, 356extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
355 struct vmw_dma_buffer *bo); 357 struct vmw_dma_buffer *bo);
358extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
356extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 359extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv); 360 struct drm_file *file_priv);
358extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 361extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -401,6 +404,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
401 404
402extern struct ttm_placement vmw_vram_placement; 405extern struct ttm_placement vmw_vram_placement;
403extern struct ttm_placement vmw_vram_ne_placement; 406extern struct ttm_placement vmw_vram_ne_placement;
407extern struct ttm_placement vmw_vram_sys_placement;
404extern struct ttm_placement vmw_sys_placement; 408extern struct ttm_placement vmw_sys_placement;
405extern struct ttm_bo_driver vmw_bo_driver; 409extern struct ttm_bo_driver vmw_bo_driver;
406extern int vmw_dma_quiescent(struct drm_device *dev); 410extern int vmw_dma_quiescent(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e92da567403..d69caf92ffe7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -490,10 +490,29 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) 490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
491 return 0; 491 return 0;
492 492
493 /**
494 * Put BO in VRAM, only if there is space.
495 */
496
497 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
498 if (unlikely(ret == -ERESTARTSYS))
499 return ret;
500
501 /**
502 * Otherwise, set it up as GMR.
503 */
504
505 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
506 return 0;
507
493 ret = vmw_gmr_bind(dev_priv, bo); 508 ret = vmw_gmr_bind(dev_priv, bo);
494 if (likely(ret == 0 || ret == -ERESTARTSYS)) 509 if (likely(ret == 0 || ret == -ERESTARTSYS))
495 return ret; 510 return ret;
496 511
512 /**
513 * If that failed, try VRAM again, this time evicting
514 * previous contents.
515 */
497 516
498 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 517 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
499 return ret; 518 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 641dde76ada1..4f4f6432be8b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -649,14 +649,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
649 if (unlikely(ret != 0)) 649 if (unlikely(ret != 0))
650 goto err_unlock; 650 goto err_unlock;
651 651
652 if (vmw_bo->gmr_bound) {
653 vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
654 spin_lock(&bo->glob->lru_lock);
655 ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
656 spin_unlock(&bo->glob->lru_lock);
657 vmw_bo->gmr_bound = NULL;
658 }
659
660 ret = ttm_bo_validate(bo, &ne_placement, false, false); 652 ret = ttm_bo_validate(bo, &ne_placement, false, false);
661 ttm_bo_unreserve(bo); 653 ttm_bo_unreserve(bo);
662err_unlock: 654err_unlock:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 01feb48af333..f7d5f70b52dd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -98,8 +98,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
98 (unsigned int) min, 98 (unsigned int) min,
99 (unsigned int) fifo->capabilities); 99 (unsigned int) fifo->capabilities);
100 100
101 dev_priv->fence_seq = (uint32_t) -100; 101 dev_priv->fence_seq = dev_priv->last_read_sequence;
102 dev_priv->last_read_sequence = (uint32_t) -100;
103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 102 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
104 103
105 return vmw_fifo_send_fence(dev_priv, &dummy); 104 return vmw_fifo_send_fence(dev_priv, &dummy);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b1af76e371c3..686692de209a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -553,9 +553,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
553 } *cmd; 553 } *cmd;
554 int i, increment = 1; 554 int i, increment = 1;
555 555
556 if (!num_clips || 556 if (!num_clips) {
557 !(dev_priv->fifo.capabilities &
558 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
559 num_clips = 1; 557 num_clips = 1;
560 clips = &norect; 558 clips = &norect;
561 norect.x1 = norect.y1 = 0; 559 norect.x1 = norect.y1 = 0;
@@ -574,10 +572,10 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
574 572
575 for (i = 0; i < num_clips; i++, clips += increment) { 573 for (i = 0; i < num_clips; i++, clips += increment) {
576 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 574 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
577 cmd[i].body.x = cpu_to_le32(clips[i].x1); 575 cmd[i].body.x = cpu_to_le32(clips->x1);
578 cmd[i].body.y = cpu_to_le32(clips[i].y1); 576 cmd[i].body.y = cpu_to_le32(clips->y1);
579 cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); 577 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
580 cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); 578 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
581 } 579 }
582 580
583 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); 581 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index bb6e6a096d25..5b6eabeb7f51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -104,7 +104,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
104 bool pin, bool interruptible) 104 bool pin, bool interruptible)
105{ 105{
106 struct ttm_buffer_object *bo = &buf->base; 106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_bo_global *glob = bo->glob;
108 struct ttm_placement *overlay_placement = &vmw_vram_placement; 107 struct ttm_placement *overlay_placement = &vmw_vram_placement;
109 int ret; 108 int ret;
110 109
@@ -116,14 +115,6 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
116 if (unlikely(ret != 0)) 115 if (unlikely(ret != 0))
117 goto err; 116 goto err;
118 117
119 if (buf->gmr_bound) {
120 vmw_gmr_unbind(dev_priv, buf->gmr_id);
121 spin_lock(&glob->lru_lock);
122 ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
123 spin_unlock(&glob->lru_lock);
124 buf->gmr_bound = NULL;
125 }
126
127 if (pin) 118 if (pin)
128 overlay_placement = &vmw_vram_ne_placement; 119 overlay_placement = &vmw_vram_ne_placement;
129 120
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c012d5927f65..e01db120efff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -599,6 +599,27 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
599 if (unlikely(ret != 0)) 599 if (unlikely(ret != 0))
600 goto out_err1; 600 goto out_err1;
601 601
602
603 if (srf->flags & (1 << 9) &&
604 srf->num_sizes == 1 &&
605 srf->sizes[0].width == 64 &&
606 srf->sizes[0].height == 64 &&
607 srf->format == SVGA3D_A8R8G8B8) {
608
609 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
610 /* clear the image */
611 if (srf->snooper.image) {
612 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
613 } else {
614 DRM_ERROR("Failed to allocate cursor_image\n");
615 ret = -ENOMEM;
616 goto out_err1;
617 }
618 } else {
619 srf->snooper.image = NULL;
620 }
621 srf->snooper.crtc = NULL;
622
602 user_srf->base.shareable = false; 623 user_srf->base.shareable = false;
603 user_srf->base.tfile = NULL; 624 user_srf->base.tfile = NULL;
604 625
@@ -622,24 +643,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
622 return ret; 643 return ret;
623 } 644 }
624 645
625 if (srf->flags & (1 << 9) &&
626 srf->num_sizes == 1 &&
627 srf->sizes[0].width == 64 &&
628 srf->sizes[0].height == 64 &&
629 srf->format == SVGA3D_A8R8G8B8) {
630
631 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
632 /* clear the image */
633 if (srf->snooper.image)
634 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
635 else
636 DRM_ERROR("Failed to allocate cursor_image\n");
637
638 } else {
639 srf->snooper.image = NULL;
640 }
641 srf->snooper.crtc = NULL;
642
643 rep->sid = user_srf->base.hash.key; 646 rep->sid = user_srf->base.hash.key;
644 if (rep->sid == SVGA3D_INVALID_ID) 647 if (rep->sid == SVGA3D_INVALID_ID)
645 DRM_ERROR("Created bad Surface ID.\n"); 648 DRM_ERROR("Created bad Surface ID.\n");
@@ -754,20 +757,29 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
754 return bo_user_size + page_array_size; 757 return bo_user_size + page_array_size;
755} 758}
756 759
757void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 760void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
758{ 761{
759 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 762 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
760 struct ttm_bo_global *glob = bo->glob; 763 struct ttm_bo_global *glob = bo->glob;
761 struct vmw_private *dev_priv = 764 struct vmw_private *dev_priv =
762 container_of(bo->bdev, struct vmw_private, bdev); 765 container_of(bo->bdev, struct vmw_private, bdev);
763 766
764 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
765 if (vmw_bo->gmr_bound) { 767 if (vmw_bo->gmr_bound) {
766 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); 768 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
767 spin_lock(&glob->lru_lock); 769 spin_lock(&glob->lru_lock);
768 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); 770 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
769 spin_unlock(&glob->lru_lock); 771 spin_unlock(&glob->lru_lock);
772 vmw_bo->gmr_bound = false;
770 } 773 }
774}
775
776void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
777{
778 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
779 struct ttm_bo_global *glob = bo->glob;
780
781 vmw_dmabuf_gmr_unbind(bo);
782 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
771 kfree(vmw_bo); 783 kfree(vmw_bo);
772} 784}
773 785
@@ -813,18 +825,10 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
813static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 825static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
814{ 826{
815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 827 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
816 struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
817 struct ttm_bo_global *glob = bo->glob; 828 struct ttm_bo_global *glob = bo->glob;
818 struct vmw_private *dev_priv =
819 container_of(bo->bdev, struct vmw_private, bdev);
820 829
830 vmw_dmabuf_gmr_unbind(bo);
821 ttm_mem_global_free(glob->mem_glob, bo->acc_size); 831 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
822 if (vmw_bo->gmr_bound) {
823 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
824 spin_lock(&glob->lru_lock);
825 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
826 spin_unlock(&glob->lru_lock);
827 }
828 kfree(vmw_user_bo); 832 kfree(vmw_user_bo);
829} 833}
830 834
@@ -868,7 +872,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
868 } 872 }
869 873
870 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 874 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
871 &vmw_vram_placement, true, 875 &vmw_vram_sys_placement, true,
872 &vmw_user_dmabuf_destroy); 876 &vmw_user_dmabuf_destroy);
873 if (unlikely(ret != 0)) 877 if (unlikely(ret != 0))
874 return ret; 878 return ret;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 102ade134165..fee6eee7ae5b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -286,7 +286,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
286 MCC_WRB_SGE_CNT_SHIFT; 286 MCC_WRB_SGE_CNT_SHIFT;
287 wrb->payload_length = payload_len; 287 wrb->payload_length = payload_len;
288 wrb->tag0 = opcode; 288 wrb->tag0 = opcode;
289 be_dws_cpu_to_le(wrb, 20); 289 be_dws_cpu_to_le(wrb, 8);
290} 290}
291 291
292/* Don't touch the hdr after it's prepared */ 292/* Don't touch the hdr after it's prepared */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 3a1f7902c16d..33ab8c7f14fe 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -910,7 +910,7 @@ static inline struct page *be_alloc_pages(u32 size)
910static void be_post_rx_frags(struct be_adapter *adapter) 910static void be_post_rx_frags(struct be_adapter *adapter)
911{ 911{
912 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; 912 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
913 struct be_rx_page_info *page_info = NULL; 913 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
914 struct be_queue_info *rxq = &adapter->rx_obj.q; 914 struct be_queue_info *rxq = &adapter->rx_obj.q;
915 struct page *pagep = NULL; 915 struct page *pagep = NULL;
916 struct be_eth_rx_d *rxd; 916 struct be_eth_rx_d *rxd;
@@ -941,7 +941,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
941 rxd = queue_head_node(rxq); 941 rxd = queue_head_node(rxq);
942 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 942 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
943 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 943 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
944 queue_head_inc(rxq);
945 944
946 /* Any space left in the current big page for another frag? */ 945 /* Any space left in the current big page for another frag? */
947 if ((page_offset + rx_frag_size + rx_frag_size) > 946 if ((page_offset + rx_frag_size + rx_frag_size) >
@@ -949,10 +948,13 @@ static void be_post_rx_frags(struct be_adapter *adapter)
949 pagep = NULL; 948 pagep = NULL;
950 page_info->last_page_user = true; 949 page_info->last_page_user = true;
951 } 950 }
951
952 prev_page_info = page_info;
953 queue_head_inc(rxq);
952 page_info = &page_info_tbl[rxq->head]; 954 page_info = &page_info_tbl[rxq->head];
953 } 955 }
954 if (pagep) 956 if (pagep)
955 page_info->last_page_user = true; 957 prev_page_info->last_page_user = true;
956 958
957 if (posted) { 959 if (posted) {
958 atomic_add(posted, &rxq->used); 960 atomic_add(posted, &rxq->used);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 8ffea3990d07..0b23bc4f56c6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
33#include <asm/dma.h> 33#include <asm/dma.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35 35
36#include <asm/dpmc.h>
36#include <asm/blackfin.h> 37#include <asm/blackfin.h>
37#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
38#include <asm/portmux.h> 39#include <asm/portmux.h>
@@ -386,8 +387,8 @@ static int mii_probe(struct net_device *dev)
386 u32 sclk, mdc_div; 387 u32 sclk, mdc_div;
387 388
388 /* Enable PHY output early */ 389 /* Enable PHY output early */
389 if (!(bfin_read_VR_CTL() & PHYCLKOE)) 390 if (!(bfin_read_VR_CTL() & CLKBUFOE))
390 bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); 391 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
391 392
392 sclk = get_sclk(); 393 sclk = get_sclk();
393 mdc_div = ((sclk / MDC_CLK) / 2) - 1; 394 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2a567df3ea71..e8932db7ee77 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -326,6 +326,8 @@ struct e1000_adapter {
326 /* for ioport free */ 326 /* for ioport free */
327 int bars; 327 int bars;
328 int need_ioport; 328 int need_ioport;
329
330 bool discarding;
329}; 331};
330 332
331enum e1000_state_t { 333enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7e855f9bbd97..d29bb532eccf 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1698 rctl &= ~E1000_RCTL_SZ_4096; 1698 rctl &= ~E1000_RCTL_SZ_4096;
1699 rctl |= E1000_RCTL_BSEX; 1699 rctl |= E1000_RCTL_BSEX;
1700 switch (adapter->rx_buffer_len) { 1700 switch (adapter->rx_buffer_len) {
1701 case E1000_RXBUFFER_256:
1702 rctl |= E1000_RCTL_SZ_256;
1703 rctl &= ~E1000_RCTL_BSEX;
1704 break;
1705 case E1000_RXBUFFER_512:
1706 rctl |= E1000_RCTL_SZ_512;
1707 rctl &= ~E1000_RCTL_BSEX;
1708 break;
1709 case E1000_RXBUFFER_1024:
1710 rctl |= E1000_RCTL_SZ_1024;
1711 rctl &= ~E1000_RCTL_BSEX;
1712 break;
1713 case E1000_RXBUFFER_2048: 1701 case E1000_RXBUFFER_2048:
1714 default: 1702 default:
1715 rctl |= E1000_RCTL_SZ_2048; 1703 rctl |= E1000_RCTL_SZ_2048;
@@ -2802,13 +2790,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2802dma_error: 2790dma_error:
2803 dev_err(&pdev->dev, "TX DMA map failed\n"); 2791 dev_err(&pdev->dev, "TX DMA map failed\n");
2804 buffer_info->dma = 0; 2792 buffer_info->dma = 0;
2805 count--; 2793 if (count)
2806
2807 while (count >= 0) {
2808 count--; 2794 count--;
2809 i--; 2795
2810 if (i < 0) 2796 while (count--) {
2797 if (i==0)
2811 i += tx_ring->count; 2798 i += tx_ring->count;
2799 i--;
2812 buffer_info = &tx_ring->buffer_info[i]; 2800 buffer_info = &tx_ring->buffer_info[i];
2813 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2801 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2814 } 2802 }
@@ -3176,13 +3164,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3176 * however with the new *_jumbo_rx* routines, jumbo receives will use 3164 * however with the new *_jumbo_rx* routines, jumbo receives will use
3177 * fragmented skbs */ 3165 * fragmented skbs */
3178 3166
3179 if (max_frame <= E1000_RXBUFFER_256) 3167 if (max_frame <= E1000_RXBUFFER_2048)
3180 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3181 else if (max_frame <= E1000_RXBUFFER_512)
3182 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3183 else if (max_frame <= E1000_RXBUFFER_1024)
3184 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3185 else if (max_frame <= E1000_RXBUFFER_2048)
3186 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3168 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3187 else 3169 else
3188#if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3170#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
@@ -3850,13 +3832,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3850 3832
3851 length = le16_to_cpu(rx_desc->length); 3833 length = le16_to_cpu(rx_desc->length);
3852 /* !EOP means multiple descriptors were used to store a single 3834 /* !EOP means multiple descriptors were used to store a single
3853 * packet, also make sure the frame isn't just CRC only */ 3835 * packet, if thats the case we need to toss it. In fact, we
3854 if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { 3836 * to toss every packet with the EOP bit clear and the next
3837 * frame that _does_ have the EOP bit set, as it is by
3838 * definition only a frame fragment
3839 */
3840 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
3841 adapter->discarding = true;
3842
3843 if (adapter->discarding) {
3855 /* All receives must fit into a single buffer */ 3844 /* All receives must fit into a single buffer */
3856 E1000_DBG("%s: Receive packet consumed multiple" 3845 E1000_DBG("%s: Receive packet consumed multiple"
3857 " buffers\n", netdev->name); 3846 " buffers\n", netdev->name);
3858 /* recycle */ 3847 /* recycle */
3859 buffer_info->skb = skb; 3848 buffer_info->skb = skb;
3849 if (status & E1000_RXD_STAT_EOP)
3850 adapter->discarding = false;
3860 goto next_desc; 3851 goto next_desc;
3861 } 3852 }
3862 3853
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d6ee28f6ea08..d236efaf7478 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -421,6 +421,7 @@ struct e1000_info {
421/* CRC Stripping defines */ 421/* CRC Stripping defines */
422#define FLAG2_CRC_STRIPPING (1 << 0) 422#define FLAG2_CRC_STRIPPING (1 << 0)
423#define FLAG2_HAS_PHY_WAKEUP (1 << 1) 423#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
424#define FLAG2_IS_DISCARDING (1 << 2)
424 425
425#define E1000_RX_DESC_PS(R, i) \ 426#define E1000_RX_DESC_PS(R, i) \
426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 427 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c45965a256b6..57f149b75fbe 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
450 450
451 length = le16_to_cpu(rx_desc->length); 451 length = le16_to_cpu(rx_desc->length);
452 452
453 /* !EOP means multiple descriptors were used to store a single 453 /*
454 * packet, also make sure the frame isn't just CRC only */ 454 * !EOP means multiple descriptors were used to store a single
455 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 455 * packet, if that's the case we need to toss it. In fact, we
456 * need to toss every packet with the EOP bit clear and the
457 * next frame that _does_ have the EOP bit set, as it is by
458 * definition only a frame fragment
459 */
460 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
461 adapter->flags2 |= FLAG2_IS_DISCARDING;
462
463 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
456 /* All receives must fit into a single buffer */ 464 /* All receives must fit into a single buffer */
457 e_dbg("Receive packet consumed multiple buffers\n"); 465 e_dbg("Receive packet consumed multiple buffers\n");
458 /* recycle */ 466 /* recycle */
459 buffer_info->skb = skb; 467 buffer_info->skb = skb;
468 if (status & E1000_RXD_STAT_EOP)
469 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
460 goto next_desc; 470 goto next_desc;
461 } 471 }
462 472
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
745 PCI_DMA_FROMDEVICE); 755 PCI_DMA_FROMDEVICE);
746 buffer_info->dma = 0; 756 buffer_info->dma = 0;
747 757
748 if (!(staterr & E1000_RXD_STAT_EOP)) { 758 /* see !EOP comment in other rx routine */
759 if (!(staterr & E1000_RXD_STAT_EOP))
760 adapter->flags2 |= FLAG2_IS_DISCARDING;
761
762 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
749 e_dbg("Packet Split buffers didn't pick up the full " 763 e_dbg("Packet Split buffers didn't pick up the full "
750 "packet\n"); 764 "packet\n");
751 dev_kfree_skb_irq(skb); 765 dev_kfree_skb_irq(skb);
766 if (staterr & E1000_RXD_STAT_EOP)
767 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
752 goto next_desc; 768 goto next_desc;
753 } 769 }
754 770
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1118 1134
1119 rx_ring->next_to_clean = 0; 1135 rx_ring->next_to_clean = 0;
1120 rx_ring->next_to_use = 0; 1136 rx_ring->next_to_use = 0;
1137 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1121 1138
1122 writel(0, adapter->hw.hw_addr + rx_ring->head); 1139 writel(0, adapter->hw.hw_addr + rx_ring->head);
1123 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1140 writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2333 rctl &= ~E1000_RCTL_SZ_4096; 2350 rctl &= ~E1000_RCTL_SZ_4096;
2334 rctl |= E1000_RCTL_BSEX; 2351 rctl |= E1000_RCTL_BSEX;
2335 switch (adapter->rx_buffer_len) { 2352 switch (adapter->rx_buffer_len) {
2336 case 256:
2337 rctl |= E1000_RCTL_SZ_256;
2338 rctl &= ~E1000_RCTL_BSEX;
2339 break;
2340 case 512:
2341 rctl |= E1000_RCTL_SZ_512;
2342 rctl &= ~E1000_RCTL_BSEX;
2343 break;
2344 case 1024:
2345 rctl |= E1000_RCTL_SZ_1024;
2346 rctl &= ~E1000_RCTL_BSEX;
2347 break;
2348 case 2048: 2353 case 2048:
2349 default: 2354 default:
2350 rctl |= E1000_RCTL_SZ_2048; 2355 rctl |= E1000_RCTL_SZ_2048;
@@ -3781,7 +3786,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
3781 0, IPPROTO_TCP, 0); 3786 0, IPPROTO_TCP, 0);
3782 cmd_length = E1000_TXD_CMD_IP; 3787 cmd_length = E1000_TXD_CMD_IP;
3783 ipcse = skb_transport_offset(skb) - 1; 3788 ipcse = skb_transport_offset(skb) - 1;
3784 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3789 } else if (skb_is_gso_v6(skb)) {
3785 ipv6_hdr(skb)->payload_len = 0; 3790 ipv6_hdr(skb)->payload_len = 0;
3786 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3791 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3787 &ipv6_hdr(skb)->daddr, 3792 &ipv6_hdr(skb)->daddr,
@@ -3962,13 +3967,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3962dma_error: 3967dma_error:
3963 dev_err(&pdev->dev, "TX DMA map failed\n"); 3968 dev_err(&pdev->dev, "TX DMA map failed\n");
3964 buffer_info->dma = 0; 3969 buffer_info->dma = 0;
3965 count--; 3970 if (count)
3966
3967 while (count >= 0) {
3968 count--; 3971 count--;
3969 i--; 3972
3970 if (i < 0) 3973 while (count--) {
3974 if (i==0)
3971 i += tx_ring->count; 3975 i += tx_ring->count;
3976 i--;
3972 buffer_info = &tx_ring->buffer_info[i]; 3977 buffer_info = &tx_ring->buffer_info[i];
3973 e1000_put_txbuf(adapter, buffer_info);; 3978 e1000_put_txbuf(adapter, buffer_info);;
3974 } 3979 }
@@ -4317,13 +4322,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4317 * fragmented skbs 4322 * fragmented skbs
4318 */ 4323 */
4319 4324
4320 if (max_frame <= 256) 4325 if (max_frame <= 2048)
4321 adapter->rx_buffer_len = 256;
4322 else if (max_frame <= 512)
4323 adapter->rx_buffer_len = 512;
4324 else if (max_frame <= 1024)
4325 adapter->rx_buffer_len = 1024;
4326 else if (max_frame <= 2048)
4327 adapter->rx_buffer_len = 2048; 4326 adapter->rx_buffer_len = 2048;
4328 else 4327 else
4329 adapter->rx_buffer_len = 4096; 4328 adapter->rx_buffer_len = 4096;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 933c64ff2465..997124d2992a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3422,7 +3422,7 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
3422 iph->daddr, 0, 3422 iph->daddr, 0,
3423 IPPROTO_TCP, 3423 IPPROTO_TCP,
3424 0); 3424 0);
3425 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3425 } else if (skb_is_gso_v6(skb)) {
3426 ipv6_hdr(skb)->payload_len = 0; 3426 ipv6_hdr(skb)->payload_len = 0;
3427 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3427 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3428 &ipv6_hdr(skb)->daddr, 3428 &ipv6_hdr(skb)->daddr,
@@ -3584,6 +3584,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3584 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3584 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3585 struct skb_frag_struct *frag; 3585 struct skb_frag_struct *frag;
3586 3586
3587 count++;
3587 i++; 3588 i++;
3588 if (i == tx_ring->count) 3589 if (i == tx_ring->count)
3589 i = 0; 3590 i = 0;
@@ -3605,7 +3606,6 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3605 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3606 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3606 goto dma_error; 3607 goto dma_error;
3607 3608
3608 count++;
3609 } 3609 }
3610 3610
3611 tx_ring->buffer_info[i].skb = skb; 3611 tx_ring->buffer_info[i].skb = skb;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 0dbd0320023a..297a5ddd77f0 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1963,7 +1963,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1963 iph->daddr, 0, 1963 iph->daddr, 0,
1964 IPPROTO_TCP, 1964 IPPROTO_TCP,
1965 0); 1965 0);
1966 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 1966 } else if (skb_is_gso_v6(skb)) {
1967 ipv6_hdr(skb)->payload_len = 0; 1967 ipv6_hdr(skb)->payload_len = 0;
1968 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1968 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1969 &ipv6_hdr(skb)->daddr, 1969 &ipv6_hdr(skb)->daddr,
@@ -2126,6 +2126,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2126 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2126 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2127 struct skb_frag_struct *frag; 2127 struct skb_frag_struct *frag;
2128 2128
2129 count++;
2129 i++; 2130 i++;
2130 if (i == tx_ring->count) 2131 if (i == tx_ring->count)
2131 i = 0; 2132 i = 0;
@@ -2146,7 +2147,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2146 PCI_DMA_TODEVICE); 2147 PCI_DMA_TODEVICE);
2147 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2148 if (pci_dma_mapping_error(pdev, buffer_info->dma))
2148 goto dma_error; 2149 goto dma_error;
2149 count++;
2150 } 2150 }
2151 2151
2152 tx_ring->buffer_info[i].skb = skb; 2152 tx_ring->buffer_info[i].skb = skb;
@@ -2163,14 +2163,14 @@ dma_error:
2163 buffer_info->length = 0; 2163 buffer_info->length = 0;
2164 buffer_info->next_to_watch = 0; 2164 buffer_info->next_to_watch = 0;
2165 buffer_info->mapped_as_page = false; 2165 buffer_info->mapped_as_page = false;
2166 count--; 2166 if (count)
2167 count--;
2167 2168
2168 /* clear timestamp and dma mappings for remaining portion of packet */ 2169 /* clear timestamp and dma mappings for remaining portion of packet */
2169 while (count >= 0) { 2170 while (count--) {
2170 count--; 2171 if (i==0)
2171 i--;
2172 if (i < 0)
2173 i += tx_ring->count; 2172 i += tx_ring->count;
2173 i--;
2174 buffer_info = &tx_ring->buffer_info[i]; 2174 buffer_info = &tx_ring->buffer_info[i];
2175 igbvf_put_txbuf(adapter, buffer_info); 2175 igbvf_put_txbuf(adapter, buffer_info);
2176 } 2176 }
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index bcd0f01d5feb..593d1a4f217c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1363,13 +1363,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1363dma_error: 1363dma_error:
1364 dev_err(&pdev->dev, "TX DMA map failed\n"); 1364 dev_err(&pdev->dev, "TX DMA map failed\n");
1365 buffer_info->dma = 0; 1365 buffer_info->dma = 0;
1366 count--; 1366 if (count)
1367
1368 while (count >= 0) {
1369 count--; 1367 count--;
1370 i--; 1368
1371 if (i < 0) 1369 while (count--) {
1370 if (i==0)
1372 i += tx_ring->count; 1371 i += tx_ring->count;
1372 i--;
1373 buffer_info = &tx_ring->buffer_info[i]; 1373 buffer_info = &tx_ring->buffer_info[i];
1374 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1374 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1375 } 1375 }
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 9c9202f40b10..b5f64ad67975 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -4928,7 +4928,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
4928 iph->daddr, 0, 4928 iph->daddr, 0,
4929 IPPROTO_TCP, 4929 IPPROTO_TCP,
4930 0); 4930 0);
4931 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 4931 } else if (skb_is_gso_v6(skb)) {
4932 ipv6_hdr(skb)->payload_len = 0; 4932 ipv6_hdr(skb)->payload_len = 0;
4933 tcp_hdr(skb)->check = 4933 tcp_hdr(skb)->check =
4934 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4934 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -5167,14 +5167,14 @@ dma_error:
5167 tx_buffer_info->dma = 0; 5167 tx_buffer_info->dma = 0;
5168 tx_buffer_info->time_stamp = 0; 5168 tx_buffer_info->time_stamp = 0;
5169 tx_buffer_info->next_to_watch = 0; 5169 tx_buffer_info->next_to_watch = 0;
5170 count--; 5170 if (count)
5171 count--;
5171 5172
5172 /* clear timestamp and dma mappings for remaining portion of packet */ 5173 /* clear timestamp and dma mappings for remaining portion of packet */
5173 while (count >= 0) { 5174 while (count--) {
5174 count--; 5175 if (i==0)
5175 i--;
5176 if (i < 0)
5177 i += tx_ring->count; 5176 i += tx_ring->count;
5177 i--;
5178 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5178 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
5180 } 5180 }
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 813aca3fc433..7b17404d0858 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -717,6 +717,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
717 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), 717 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
718 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), 718 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
719 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 719 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
720 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
720 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 721 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
721 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 722 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
722 PCMCIA_DEVICE_NULL, 723 PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b0e9f9c51721..0295097d6c44 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -410,7 +410,6 @@ EXPORT_SYMBOL(phy_start_aneg);
410 410
411 411
412static void phy_change(struct work_struct *work); 412static void phy_change(struct work_struct *work);
413static void phy_state_machine(struct work_struct *work);
414 413
415/** 414/**
416 * phy_start_machine - start PHY state machine tracking 415 * phy_start_machine - start PHY state machine tracking
@@ -430,7 +429,6 @@ void phy_start_machine(struct phy_device *phydev,
430{ 429{
431 phydev->adjust_state = handler; 430 phydev->adjust_state = handler;
432 431
433 INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine);
434 schedule_delayed_work(&phydev->state_queue, HZ); 432 schedule_delayed_work(&phydev->state_queue, HZ);
435} 433}
436 434
@@ -761,7 +759,7 @@ EXPORT_SYMBOL(phy_start);
761 * phy_state_machine - Handle the state machine 759 * phy_state_machine - Handle the state machine
762 * @work: work_struct that describes the work to be done 760 * @work: work_struct that describes the work to be done
763 */ 761 */
764static void phy_state_machine(struct work_struct *work) 762void phy_state_machine(struct work_struct *work)
765{ 763{
766 struct delayed_work *dwork = to_delayed_work(work); 764 struct delayed_work *dwork = to_delayed_work(work);
767 struct phy_device *phydev = 765 struct phy_device *phydev =
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8212b2b93422..adbc0fded130 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -177,6 +177,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
177 dev->state = PHY_DOWN; 177 dev->state = PHY_DOWN;
178 178
179 mutex_init(&dev->lock); 179 mutex_init(&dev->lock);
180 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
180 181
181 return dev; 182 return dev;
182} 183}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 707b391afa02..894a7c84faef 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -4119,7 +4119,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4119 err = pcie_set_readrq(pdev, 4096); 4119 err = pcie_set_readrq(pdev, 4096);
4120 if (err) { 4120 if (err) {
4121 dev_err(&pdev->dev, "Set readrq failed.\n"); 4121 dev_err(&pdev->dev, "Set readrq failed.\n");
4122 goto err_out; 4122 goto err_out1;
4123 } 4123 }
4124 4124
4125 err = pci_request_regions(pdev, DRV_NAME); 4125 err = pci_request_regions(pdev, DRV_NAME);
@@ -4140,7 +4140,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4140 4140
4141 if (err) { 4141 if (err) {
4142 dev_err(&pdev->dev, "No usable DMA configuration.\n"); 4142 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4143 goto err_out; 4143 goto err_out2;
4144 } 4144 }
4145 4145
4146 /* Set PCIe reset type for EEH to fundamental. */ 4146 /* Set PCIe reset type for EEH to fundamental. */
@@ -4152,7 +4152,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4152 if (!qdev->reg_base) { 4152 if (!qdev->reg_base) {
4153 dev_err(&pdev->dev, "Register mapping failed.\n"); 4153 dev_err(&pdev->dev, "Register mapping failed.\n");
4154 err = -ENOMEM; 4154 err = -ENOMEM;
4155 goto err_out; 4155 goto err_out2;
4156 } 4156 }
4157 4157
4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3); 4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
@@ -4162,14 +4162,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4162 if (!qdev->doorbell_area) { 4162 if (!qdev->doorbell_area) {
4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); 4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4164 err = -ENOMEM; 4164 err = -ENOMEM;
4165 goto err_out; 4165 goto err_out2;
4166 } 4166 }
4167 4167
4168 err = ql_get_board_info(qdev); 4168 err = ql_get_board_info(qdev);
4169 if (err) { 4169 if (err) {
4170 dev_err(&pdev->dev, "Register access failed.\n"); 4170 dev_err(&pdev->dev, "Register access failed.\n");
4171 err = -EIO; 4171 err = -EIO;
4172 goto err_out; 4172 goto err_out2;
4173 } 4173 }
4174 qdev->msg_enable = netif_msg_init(debug, default_msg); 4174 qdev->msg_enable = netif_msg_init(debug, default_msg);
4175 spin_lock_init(&qdev->hw_lock); 4175 spin_lock_init(&qdev->hw_lock);
@@ -4179,7 +4179,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4179 err = qdev->nic_ops->get_flash(qdev); 4179 err = qdev->nic_ops->get_flash(qdev);
4180 if (err) { 4180 if (err) {
4181 dev_err(&pdev->dev, "Invalid FLASH.\n"); 4181 dev_err(&pdev->dev, "Invalid FLASH.\n");
4182 goto err_out; 4182 goto err_out2;
4183 } 4183 }
4184 4184
4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
@@ -4212,8 +4212,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4212 DRV_NAME, DRV_VERSION); 4212 DRV_NAME, DRV_VERSION);
4213 } 4213 }
4214 return 0; 4214 return 0;
4215err_out: 4215err_out2:
4216 ql_release_all(pdev); 4216 ql_release_all(pdev);
4217err_out1:
4217 pci_disable_device(pdev); 4218 pci_disable_device(pdev);
4218 return err; 4219 return err;
4219} 4220}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index cc4218667cba..3c4836d0898f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3421,7 +3421,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3421 break; 3421 break;
3422 } 3422 }
3423 } else { 3423 } else {
3424 if (!(val64 & busy_bit)) { 3424 if (val64 & busy_bit) {
3425 ret = SUCCESS; 3425 ret = SUCCESS;
3426 break; 3426 break;
3427 } 3427 }
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 0d4eba7266ec..9f035b9f0350 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -804,7 +804,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
804 loff_t offset, u8 *buffer, size_t length) 804 loff_t offset, u8 *buffer, size_t length)
805{ 805{
806 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; 806 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
807 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)]; 807 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
808 size_t outlen; 808 size_t outlen;
809 int rc; 809 int rc;
810 810
@@ -828,7 +828,7 @@ fail:
828int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 828int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
829 loff_t offset, const u8 *buffer, size_t length) 829 loff_t offset, const u8 *buffer, size_t length)
830{ 830{
831 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)]; 831 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
832 int rc; 832 int rc;
833 833
834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); 834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
@@ -838,7 +838,8 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
838 838
839 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 839 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
840 840
841 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf), 841 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
842 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
842 NULL, 0, NULL); 843 NULL, 0, NULL);
843 if (rc) 844 if (rc)
844 goto fail; 845 goto fail;
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index de916728c2e3..10ce98f4c0fb 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -111,6 +111,7 @@ extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
111extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 111extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
112 loff_t offset, const u8 *buffer, 112 loff_t offset, const u8 *buffer,
113 size_t length); 113 size_t length);
114#define EFX_MCDI_NVRAM_LEN_MAX 128
114extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 115extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
115 loff_t offset, size_t length); 116 loff_t offset, size_t length);
116extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, 117extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 2a85360a46f0..73e71f420624 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1090,8 +1090,10 @@
1090#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 1090#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
1091#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 1091#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
1092#define MC_CMD_MAC_RX_MATCH_FAULT 59 1092#define MC_CMD_MAC_RX_MATCH_FAULT 59
1093#define MC_CMD_GMAC_DMABUF_START 64
1094#define MC_CMD_GMAC_DMABUF_END 95
1093/* Insert new members here. */ 1095/* Insert new members here. */
1094#define MC_CMD_MAC_GENERATION_END 60 1096#define MC_CMD_MAC_GENERATION_END 96
1095#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) 1097#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
1096 1098
1097/* MC_CMD_MAC_STATS: 1099/* MC_CMD_MAC_STATS:
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 3a464529a46b..407bbaddfea6 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -23,7 +23,6 @@
23#include "mcdi_pcol.h" 23#include "mcdi_pcol.h"
24 24
25#define EFX_SPI_VERIFY_BUF_LEN 16 25#define EFX_SPI_VERIFY_BUF_LEN 16
26#define EFX_MCDI_CHUNK_LEN 128
27 26
28struct efx_mtd_partition { 27struct efx_mtd_partition {
29 struct mtd_info mtd; 28 struct mtd_info mtd;
@@ -428,7 +427,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
428 int rc = 0; 427 int rc = 0;
429 428
430 while (offset < end) { 429 while (offset < end) {
431 chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 430 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
432 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, 431 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
433 buffer, chunk); 432 buffer, chunk);
434 if (rc) 433 if (rc)
@@ -491,7 +490,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
491 } 490 }
492 491
493 while (offset < end) { 492 while (offset < end) {
494 chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 493 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
495 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, 494 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
496 buffer, chunk); 495 buffer, chunk);
497 if (rc) 496 if (rc)
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index ff8f0a417fa3..e0d13a451019 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -318,12 +318,6 @@ static int qt202x_reset_phy(struct efx_nic *efx)
318 /* Wait 250ms for the PHY to complete bootup */ 318 /* Wait 250ms for the PHY to complete bootup */
319 msleep(250); 319 msleep(250);
320 320
321 /* Check that all the MMDs we expect are present and responding. We
322 * expect faults on some if the link is down, but not on the PHY XS */
323 rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
324 if (rc < 0)
325 goto fail;
326
327 falcon_board(efx)->type->init_phy(efx); 321 falcon_board(efx)->type->init_phy(efx);
328 322
329 return rc; 323 return rc;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 37f486b65f63..d760650c5c04 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -644,6 +644,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
644{ 644{
645 u32 reg1; 645 u32 reg1;
646 646
647 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
647 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 648 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
648 reg1 &= ~phy_power[port]; 649 reg1 &= ~phy_power[port];
649 650
@@ -651,6 +652,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
651 reg1 |= coma_mode[port]; 652 reg1 |= coma_mode[port];
652 653
653 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 654 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
655 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
654 sky2_pci_read32(hw, PCI_DEV_REG1); 656 sky2_pci_read32(hw, PCI_DEV_REG1);
655 657
656 if (hw->chip_id == CHIP_ID_YUKON_FE) 658 if (hw->chip_id == CHIP_ID_YUKON_FE)
@@ -707,9 +709,11 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
707 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); 709 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
708 } 710 }
709 711
712 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
710 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 713 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
711 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ 714 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
712 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 715 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
716 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
713} 717}
714 718
715/* Force a renegotiation */ 719/* Force a renegotiation */
@@ -2149,7 +2153,9 @@ static void sky2_qlink_intr(struct sky2_hw *hw)
2149 2153
2150 /* reset PHY Link Detect */ 2154 /* reset PHY Link Detect */
2151 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); 2155 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2156 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2152 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); 2157 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2158 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2153 2159
2154 sky2_link_up(sky2); 2160 sky2_link_up(sky2);
2155} 2161}
@@ -2640,6 +2646,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2640 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2646 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2641 u16 pci_err; 2647 u16 pci_err;
2642 2648
2649 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2643 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2650 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2644 if (net_ratelimit()) 2651 if (net_ratelimit())
2645 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", 2652 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@@ -2647,12 +2654,14 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2647 2654
2648 sky2_pci_write16(hw, PCI_STATUS, 2655 sky2_pci_write16(hw, PCI_STATUS,
2649 pci_err | PCI_STATUS_ERROR_BITS); 2656 pci_err | PCI_STATUS_ERROR_BITS);
2657 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2650 } 2658 }
2651 2659
2652 if (status & Y2_IS_PCI_EXP) { 2660 if (status & Y2_IS_PCI_EXP) {
2653 /* PCI-Express uncorrectable Error occurred */ 2661 /* PCI-Express uncorrectable Error occurred */
2654 u32 err; 2662 u32 err;
2655 2663
2664 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2656 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2665 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2657 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2666 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2658 0xfffffffful); 2667 0xfffffffful);
@@ -2660,6 +2669,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2660 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); 2669 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
2661 2670
2662 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2671 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2672 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2663 } 2673 }
2664 2674
2665 if (status & Y2_HWE_L1_MASK) 2675 if (status & Y2_HWE_L1_MASK)
@@ -3038,6 +3048,7 @@ static void sky2_reset(struct sky2_hw *hw)
3038 } 3048 }
3039 3049
3040 sky2_power_on(hw); 3050 sky2_power_on(hw);
3051 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3041 3052
3042 for (i = 0; i < hw->ports; i++) { 3053 for (i = 0; i < hw->ports; i++) {
3043 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 3054 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -3074,6 +3085,7 @@ static void sky2_reset(struct sky2_hw *hw)
3074 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; 3085 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3075 3086
3076 /* reset PHY Link Detect */ 3087 /* reset PHY Link Detect */
3088 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3077 sky2_pci_write16(hw, PSM_CONFIG_REG4, 3089 sky2_pci_write16(hw, PSM_CONFIG_REG4,
3078 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); 3090 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
3079 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); 3091 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
@@ -3091,6 +3103,7 @@ static void sky2_reset(struct sky2_hw *hw)
3091 /* restore the PCIe Link Control register */ 3103 /* restore the PCIe Link Control register */
3092 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); 3104 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
3093 } 3105 }
3106 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3094 3107
3095 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ 3108 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3096 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); 3109 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
@@ -3228,6 +3241,27 @@ static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
3228 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; 3241 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
3229} 3242}
3230 3243
3244static void sky2_hw_set_wol(struct sky2_hw *hw)
3245{
3246 int wol = 0;
3247 int i;
3248
3249 for (i = 0; i < hw->ports; i++) {
3250 struct net_device *dev = hw->dev[i];
3251 struct sky2_port *sky2 = netdev_priv(dev);
3252
3253 if (sky2->wol)
3254 wol = 1;
3255 }
3256
3257 if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3258 hw->chip_id == CHIP_ID_YUKON_EX ||
3259 hw->chip_id == CHIP_ID_YUKON_FE_P)
3260 sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
3261
3262 device_set_wakeup_enable(&hw->pdev->dev, wol);
3263}
3264
3231static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 3265static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3232{ 3266{
3233 const struct sky2_port *sky2 = netdev_priv(dev); 3267 const struct sky2_port *sky2 = netdev_priv(dev);
@@ -3247,13 +3281,7 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3247 3281
3248 sky2->wol = wol->wolopts; 3282 sky2->wol = wol->wolopts;
3249 3283
3250 if (hw->chip_id == CHIP_ID_YUKON_EC_U || 3284 sky2_hw_set_wol(hw);
3251 hw->chip_id == CHIP_ID_YUKON_EX ||
3252 hw->chip_id == CHIP_ID_YUKON_FE_P)
3253 sky2_write32(hw, B0_CTST, sky2->wol
3254 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
3255
3256 device_set_wakeup_enable(&hw->pdev->dev, sky2->wol);
3257 3285
3258 if (!netif_running(dev)) 3286 if (!netif_running(dev))
3259 sky2_wol_init(sky2); 3287 sky2_wol_init(sky2);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 595777dcadb1..20696b5d60a5 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -249,6 +249,7 @@ static struct pci_device_id tulip_pci_tbl[] = {
249 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 249 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 250 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
251 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 251 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
252 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
252 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 253 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
253 { } /* terminate list */ 254 { } /* terminate list */
254}; 255};
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 96bdc0b43889..eb8fe7e16c6c 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3279,13 +3279,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3279 /* Handle the transmitted buffer and release */ 3279 /* Handle the transmitted buffer and release */
3280 /* the BD to be used with the current frame */ 3280 /* the BD to be used with the current frame */
3281 3281
3282 if (bd == ugeth->txBd[txQ]) /* queue empty? */ 3282 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
3283 if (!skb)
3283 break; 3284 break;
3284 3285
3285 dev->stats.tx_packets++; 3286 dev->stats.tx_packets++;
3286 3287
3287 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
3288
3289 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3288 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
3290 skb_recycle_check(skb, 3289 skb_recycle_check(skb,
3291 ugeth->ug_info->uf_info.max_rx_buf_length + 3290 ugeth->ug_info->uf_info.max_rx_buf_length +
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c708ecc3cb2e..9ead30bd00c4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -395,8 +395,7 @@ static void refill_work(struct work_struct *work)
395 395
396 vi = container_of(work, struct virtnet_info, refill.work); 396 vi = container_of(work, struct virtnet_info, refill.work);
397 napi_disable(&vi->napi); 397 napi_disable(&vi->napi);
398 try_fill_recv(vi, GFP_KERNEL); 398 still_empty = !try_fill_recv(vi, GFP_KERNEL);
399 still_empty = (vi->num == 0);
400 napi_enable(&vi->napi); 399 napi_enable(&vi->napi);
401 400
402 /* In theory, this can happen: if we don't get any buffers in 401 /* In theory, this can happen: if we don't get any buffers in
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 5cc0f279417e..2d7c96d7e865 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -151,6 +151,7 @@ enum {
151 151
152 /* Device IDs */ 152 /* Device IDs */
153 USB_DEVICE_ID_I6050 = 0x0186, 153 USB_DEVICE_ID_I6050 = 0x0186,
154 USB_DEVICE_ID_I6050_2 = 0x0188,
154}; 155};
155 156
156 157
@@ -234,6 +235,7 @@ struct i2400mu {
234 u8 rx_size_auto_shrink; 235 u8 rx_size_auto_shrink;
235 236
236 struct dentry *debugfs_dentry; 237 struct dentry *debugfs_dentry;
238 unsigned i6050:1; /* 1 if this is a 6050 based SKU */
237}; 239};
238 240
239 241
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 3b48681f8a0d..98f4f8c5fb68 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -478,7 +478,16 @@ int i2400mu_probe(struct usb_interface *iface,
478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; 478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
479 i2400m->bus_bm_mac_addr_impaired = 0; 479 i2400m->bus_bm_mac_addr_impaired = 0;
480 480
481 if (id->idProduct == USB_DEVICE_ID_I6050) { 481 switch (id->idProduct) {
482 case USB_DEVICE_ID_I6050:
483 case USB_DEVICE_ID_I6050_2:
484 i2400mu->i6050 = 1;
485 break;
486 default:
487 break;
488 }
489
490 if (i2400mu->i6050) {
482 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; 491 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
483 i2400mu->endpoint_cfg.bulk_out = 0; 492 i2400mu->endpoint_cfg.bulk_out = 0;
484 i2400mu->endpoint_cfg.notification = 3; 493 i2400mu->endpoint_cfg.notification = 3;
@@ -719,6 +728,7 @@ int i2400mu_post_reset(struct usb_interface *iface)
719static 728static
720struct usb_device_id i2400mu_id_table[] = { 729struct usb_device_id i2400mu_id_table[] = {
721 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, 730 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
731 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
722 { USB_DEVICE(0x8086, 0x0181) }, 732 { USB_DEVICE(0x8086, 0x0181) },
723 { USB_DEVICE(0x8086, 0x1403) }, 733 { USB_DEVICE(0x8086, 0x1403) },
724 { USB_DEVICE(0x8086, 0x1405) }, 734 { USB_DEVICE(0x8086, 0x1405) },
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 33a5866538e7..de45f308b744 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1598,6 +1598,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
1598 .use_bsm = false, 1598 .use_bsm = false,
1599 .ht_greenfield_support = true, 1599 .ht_greenfield_support = true,
1600 .led_compensation = 51, 1600 .led_compensation = 51,
1601 .use_rts_for_ht = true, /* use rts/cts protection */
1601 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1602 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1602 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1603 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1603}; 1604};
@@ -1622,6 +1623,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
1622 .use_bsm = false, 1623 .use_bsm = false,
1623 .ht_greenfield_support = true, 1624 .ht_greenfield_support = true,
1624 .led_compensation = 51, 1625 .led_compensation = 51,
1626 .use_rts_for_ht = true, /* use rts/cts protection */
1625 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1627 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1626}; 1628};
1627 1629
@@ -1667,6 +1669,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
1667 .use_bsm = false, 1669 .use_bsm = false,
1668 .ht_greenfield_support = true, 1670 .ht_greenfield_support = true,
1669 .led_compensation = 51, 1671 .led_compensation = 51,
1672 .use_rts_for_ht = true, /* use rts/cts protection */
1670 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1673 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1671 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1674 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1672}; 1675};
@@ -1691,6 +1694,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
1691 .use_bsm = false, 1694 .use_bsm = false,
1692 .ht_greenfield_support = true, 1695 .ht_greenfield_support = true,
1693 .led_compensation = 51, 1696 .led_compensation = 51,
1697 .use_rts_for_ht = true, /* use rts/cts protection */
1694 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1698 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1695 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1699 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1696}; 1700};
@@ -1715,6 +1719,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
1715 .use_bsm = false, 1719 .use_bsm = false,
1716 .ht_greenfield_support = true, 1720 .ht_greenfield_support = true,
1717 .led_compensation = 51, 1721 .led_compensation = 51,
1722 .use_rts_for_ht = true, /* use rts/cts protection */
1718 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1723 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1719 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1724 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1720}; 1725};
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index e7d88d1da15d..83cc4e500a96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,3 +1,29 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
1#include <linux/module.h> 27#include <linux/module.h>
2 28
3/* sparse doesn't like tracepoint macros */ 29/* sparse doesn't like tracepoint macros */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 21361968ab7e..d9c7363b1bbb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,3 +1,29 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
1#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) 27#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
2#define __IWLWIFI_DEVICE_TRACE 28#define __IWLWIFI_DEVICE_TRACE
3 29
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 777584d76a88..1e41ad0fcad5 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -973,6 +973,10 @@ int iwm_send_pmkid_update(struct iwm_priv *iwm,
973 973
974 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); 974 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
975 975
976 update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
977 update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
978 sizeof(struct iwm_umac_wifi_if));
979
976 update.command = cpu_to_le32(command); 980 update.command = cpu_to_le32(command);
977 if (pmksa->bssid) 981 if (pmksa->bssid)
978 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN); 982 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 06af0552cd75..3dfd9f0e9003 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -463,6 +463,7 @@ struct iwm_umac_cmd_stop_resume_tx {
463#define IWM_CMD_PMKID_FLUSH 3 463#define IWM_CMD_PMKID_FLUSH 3
464 464
465struct iwm_umac_pmkid_update { 465struct iwm_umac_pmkid_update {
466 struct iwm_umac_wifi_if hdr;
466 __le32 command; 467 __le32 command;
467 u8 bssid[ETH_ALEN]; 468 u8 bssid[ETH_ALEN];
468 __le16 reserved; 469 __le16 reserved;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a15962a19b2a..a72f7c2577de 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -197,6 +197,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
197 i %= ring_limit; 197 i %= ring_limit;
198 continue; 198 continue;
199 } 199 }
200
201 if (unlikely(len > priv->common.rx_mtu)) {
202 if (net_ratelimit())
203 dev_err(&priv->pdev->dev, "rx'd frame size "
204 "exceeds length threshold.\n");
205
206 len = priv->common.rx_mtu;
207 }
200 skb_put(skb, len); 208 skb_put(skb, len);
201 209
202 if (p54_rx(dev, skb)) { 210 if (p54_rx(dev, skb)) {
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ac19ecd19cfe..72d3e437e190 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = {
62 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 62 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
63 /* ZD1211B */ 63 /* ZD1211B */
64 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, 64 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B },
65 { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, 66 { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
66 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, 67 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
67 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 68 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index df854401af2d..95421fa3b304 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -758,6 +758,7 @@ static struct pcmcia_device_id serial_ids[] = {
758 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), 758 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
759 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), 759 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
760 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 760 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
761 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
761 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 762 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
762 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 763 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
763 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 764 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index ff7664e0c3cd..4c4e0f8375b3 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -353,6 +353,11 @@ struct ttm_bo_driver {
353 /* notify the driver we are taking a fault on this BO 353 /* notify the driver we are taking a fault on this BO
354 * and have reserved it */ 354 * and have reserved it */
355 void (*fault_reserve_notify)(struct ttm_buffer_object *bo); 355 void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
356
357 /**
358 * notify the driver that we're about to swap out this bo
359 */
360 void (*swap_notify) (struct ttm_buffer_object *bo);
356}; 361};
357 362
358/** 363/**
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 7968defd2fa7..6a7eb402165d 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -485,6 +485,7 @@ void phy_driver_unregister(struct phy_driver *drv);
485int phy_driver_register(struct phy_driver *new_driver); 485int phy_driver_register(struct phy_driver *new_driver);
486void phy_prepare_link(struct phy_device *phydev, 486void phy_prepare_link(struct phy_device *phydev,
487 void (*adjust_link)(struct net_device *)); 487 void (*adjust_link)(struct net_device *));
488void phy_state_machine(struct work_struct *work);
488void phy_start_machine(struct phy_device *phydev, 489void phy_start_machine(struct phy_device *phydev,
489 void (*handler)(struct net_device *)); 490 void (*handler)(struct net_device *));
490void phy_stop_machine(struct phy_device *phydev); 491void phy_stop_machine(struct phy_device *phydev);
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 56f8e5585df7..74f119a2829a 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -5,6 +5,7 @@
5#include <linux/wait.h> 5#include <linux/wait.h>
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7#include <linux/xfrm.h> 7#include <linux/xfrm.h>
8#include <net/dst_ops.h>
8 9
9struct ctl_table_header; 10struct ctl_table_header;
10 11
@@ -42,6 +43,11 @@ struct netns_xfrm {
42 unsigned int policy_count[XFRM_POLICY_MAX * 2]; 43 unsigned int policy_count[XFRM_POLICY_MAX * 2];
43 struct work_struct policy_hash_work; 44 struct work_struct policy_hash_work;
44 45
46 struct dst_ops xfrm4_dst_ops;
47#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
48 struct dst_ops xfrm6_dst_ops;
49#endif
50
45 struct sock *nlsk; 51 struct sock *nlsk;
46 struct sock *nlsk_stash; 52 struct sock *nlsk_stash;
47 53
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 15696b1fd30f..ab170a60e7d3 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -132,6 +132,8 @@ static __inline__ void nr_node_put(struct nr_node *nr_node)
132static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) 132static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
133{ 133{
134 if (atomic_dec_and_test(&nr_neigh->refcount)) { 134 if (atomic_dec_and_test(&nr_neigh->refcount)) {
135 if (nr_neigh->ax25)
136 ax25_cb_put(nr_neigh->ax25);
135 kfree(nr_neigh->digipeat); 137 kfree(nr_neigh->digipeat);
136 kfree(nr_neigh); 138 kfree(nr_neigh);
137 } 139 }
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 6d85861ab990..60c27706e7b9 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1367,8 +1367,8 @@ struct xfrmk_spdinfo {
1367extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq); 1367extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq);
1368extern int xfrm_state_delete(struct xfrm_state *x); 1368extern int xfrm_state_delete(struct xfrm_state *x);
1369extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info); 1369extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
1370extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si); 1370extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1371extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si); 1371extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1372extern int xfrm_replay_check(struct xfrm_state *x, 1372extern int xfrm_replay_check(struct xfrm_state *x,
1373 struct sk_buff *skb, __be32 seq); 1373 struct sk_buff *skb, __be32 seq);
1374extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); 1374extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b7889782047e..c1b92cab46c7 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -163,7 +163,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
163 goto err_unlock; 163 goto err_unlock;
164 } 164 }
165 165
166 rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, 166 rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
167 smp_processor_id()); 167 smp_processor_id());
168 rx_stats->rx_packets++; 168 rx_stats->rx_packets++;
169 rx_stats->rx_bytes += skb->len; 169 rx_stats->rx_bytes += skb->len;
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 9d4adfd22757..f2b3b56aa779 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -819,7 +819,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
819 ma = &ifa->address; 819 ma = &ifa->address;
820 else { /* We need to make a copy of the entry. */ 820 else { /* We need to make a copy of the entry. */
821 da.s_node = sa.s_node; 821 da.s_node = sa.s_node;
822 da.s_net = da.s_net; 822 da.s_net = sa.s_net;
823 ma = &da; 823 ma = &da;
824 } 824 }
825 825
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index bf706f83a5c9..14912600ec57 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -92,6 +92,12 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2
92#endif 92#endif
93 } 93 }
94 94
95 /*
96 * There is one ref for the state machine; a caller needs
97 * one more to put it back, just like with the existing one.
98 */
99 ax25_cb_hold(ax25);
100
95 ax25_cb_add(ax25); 101 ax25_cb_add(ax25);
96 102
97 ax25->state = AX25_STATE_1; 103 ax25->state = AX25_STATE_1;
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index f3e9ba1cfd01..57dfb9c8c4f2 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -77,34 +77,24 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
77 return err; 77 return err;
78} 78}
79 79
80static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 80static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
81{ 81{
82 struct kmem_cache *slab; 82 struct kmem_cache *slab;
83 char slab_name_fmt[32], *slab_name;
84 va_list args; 83 va_list args;
85 84
86 va_start(args, fmt); 85 va_start(args, fmt);
87 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 86 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
88 va_end(args); 87 va_end(args);
89 88
90 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL); 89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
91 if (slab_name == NULL)
92 return NULL;
93 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
94 SLAB_HWCACHE_ALIGN, NULL); 90 SLAB_HWCACHE_ALIGN, NULL);
95 if (slab == NULL)
96 kfree(slab_name);
97 return slab; 91 return slab;
98} 92}
99 93
100static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 94static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
101{ 95{
102 if (slab != NULL) { 96 if (slab != NULL)
103 const char *name = kmem_cache_name(slab);
104
105 kmem_cache_destroy(slab); 97 kmem_cache_destroy(slab);
106 kfree(name);
107 }
108} 98}
109 99
110static int ccid_activate(struct ccid_operations *ccid_ops) 100static int ccid_activate(struct ccid_operations *ccid_ops)
@@ -113,6 +103,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
113 103
114 ccid_ops->ccid_hc_rx_slab = 104 ccid_ops->ccid_hc_rx_slab =
115 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, 105 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
106 ccid_ops->ccid_hc_rx_slab_name,
116 "ccid%u_hc_rx_sock", 107 "ccid%u_hc_rx_sock",
117 ccid_ops->ccid_id); 108 ccid_ops->ccid_id);
118 if (ccid_ops->ccid_hc_rx_slab == NULL) 109 if (ccid_ops->ccid_hc_rx_slab == NULL)
@@ -120,6 +111,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
120 111
121 ccid_ops->ccid_hc_tx_slab = 112 ccid_ops->ccid_hc_tx_slab =
122 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, 113 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
114 ccid_ops->ccid_hc_tx_slab_name,
123 "ccid%u_hc_tx_sock", 115 "ccid%u_hc_tx_sock",
124 ccid_ops->ccid_id); 116 ccid_ops->ccid_id);
125 if (ccid_ops->ccid_hc_tx_slab == NULL) 117 if (ccid_ops->ccid_hc_tx_slab == NULL)
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index facedd20b531..269958bf7fe9 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -49,6 +49,8 @@ struct ccid_operations {
49 const char *ccid_name; 49 const char *ccid_name;
50 struct kmem_cache *ccid_hc_rx_slab, 50 struct kmem_cache *ccid_hc_rx_slab,
51 *ccid_hc_tx_slab; 51 *ccid_hc_tx_slab;
52 char ccid_hc_rx_slab_name[32];
53 char ccid_hc_tx_slab_name[32];
52 __u32 ccid_hc_rx_obj_size, 54 __u32 ccid_hc_rx_obj_size,
53 ccid_hc_tx_obj_size; 55 ccid_hc_tx_obj_size;
54 /* Interface Routines */ 56 /* Interface Routines */
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index a1362dc8abb0..bace1d8cbcfd 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -161,7 +161,8 @@ static __init int dccpprobe_init(void)
161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
162 goto err0; 162 goto err0;
163 163
164 ret = register_jprobe(&dccp_send_probe); 164 ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
165 "dccp");
165 if (ret) 166 if (ret)
166 goto err1; 167 goto err1;
167 168
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index bdb78dd180ce..1aaa8110d84b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -368,7 +368,7 @@ static int inet_diag_bc_run(const void *bc, int len,
368 yes = entry->sport >= op[1].no; 368 yes = entry->sport >= op[1].no;
369 break; 369 break;
370 case INET_DIAG_BC_S_LE: 370 case INET_DIAG_BC_S_LE:
371 yes = entry->dport <= op[1].no; 371 yes = entry->sport <= op[1].no;
372 break; 372 break;
373 case INET_DIAG_BC_D_GE: 373 case INET_DIAG_BC_D_GE:
374 yes = entry->dport >= op[1].no; 374 yes = entry->dport >= op[1].no;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e446496f564f..d62b05d33384 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -586,7 +586,9 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
586{ 586{
587 remove_proc_entry("rt_cache", net->proc_net_stat); 587 remove_proc_entry("rt_cache", net->proc_net_stat);
588 remove_proc_entry("rt_cache", net->proc_net); 588 remove_proc_entry("rt_cache", net->proc_net);
589#ifdef CONFIG_NET_CLS_ROUTE
589 remove_proc_entry("rt_acct", net->proc_net); 590 remove_proc_entry("rt_acct", net->proc_net);
591#endif
590} 592}
591 593
592static struct pernet_operations ip_rt_proc_ops __net_initdata = { 594static struct pernet_operations ip_rt_proc_ops __net_initdata = {
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index bb110c5ce1d2..9bc805df95d2 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -39,9 +39,9 @@ static int port __read_mostly = 0;
39MODULE_PARM_DESC(port, "Port to match (0=all)"); 39MODULE_PARM_DESC(port, "Port to match (0=all)");
40module_param(port, int, 0); 40module_param(port, int, 0);
41 41
42static int bufsize __read_mostly = 4096; 42static unsigned int bufsize __read_mostly = 4096;
43MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); 43MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
44module_param(bufsize, int, 0); 44module_param(bufsize, uint, 0);
45 45
46static int full __read_mostly; 46static int full __read_mostly;
47MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); 47MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
@@ -75,12 +75,12 @@ static struct {
75 75
76static inline int tcp_probe_used(void) 76static inline int tcp_probe_used(void)
77{ 77{
78 return (tcp_probe.head - tcp_probe.tail) % bufsize; 78 return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
79} 79}
80 80
81static inline int tcp_probe_avail(void) 81static inline int tcp_probe_avail(void)
82{ 82{
83 return bufsize - tcp_probe_used(); 83 return bufsize - tcp_probe_used() - 1;
84} 84}
85 85
86/* 86/*
@@ -116,7 +116,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
116 p->ssthresh = tcp_current_ssthresh(sk); 116 p->ssthresh = tcp_current_ssthresh(sk);
117 p->srtt = tp->srtt >> 3; 117 p->srtt = tp->srtt >> 3;
118 118
119 tcp_probe.head = (tcp_probe.head + 1) % bufsize; 119 tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
120 } 120 }
121 tcp_probe.lastcwnd = tp->snd_cwnd; 121 tcp_probe.lastcwnd = tp->snd_cwnd;
122 spin_unlock(&tcp_probe.lock); 122 spin_unlock(&tcp_probe.lock);
@@ -149,7 +149,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file)
149static int tcpprobe_sprint(char *tbuf, int n) 149static int tcpprobe_sprint(char *tbuf, int n)
150{ 150{
151 const struct tcp_log *p 151 const struct tcp_log *p
152 = tcp_probe.log + tcp_probe.tail % bufsize; 152 = tcp_probe.log + tcp_probe.tail;
153 struct timespec tv 153 struct timespec tv
154 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); 154 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
155 155
@@ -192,7 +192,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
192 width = tcpprobe_sprint(tbuf, sizeof(tbuf)); 192 width = tcpprobe_sprint(tbuf, sizeof(tbuf));
193 193
194 if (cnt + width < len) 194 if (cnt + width < len)
195 tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; 195 tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1);
196 196
197 spin_unlock_bh(&tcp_probe.lock); 197 spin_unlock_bh(&tcp_probe.lock);
198 198
@@ -222,9 +222,10 @@ static __init int tcpprobe_init(void)
222 init_waitqueue_head(&tcp_probe.wait); 222 init_waitqueue_head(&tcp_probe.wait);
223 spin_lock_init(&tcp_probe.lock); 223 spin_lock_init(&tcp_probe.lock);
224 224
225 if (bufsize < 0) 225 if (bufsize == 0)
226 return -EINVAL; 226 return -EINVAL;
227 227
228 bufsize = roundup_pow_of_two(bufsize);
228 tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); 229 tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
229 if (!tcp_probe.log) 230 if (!tcp_probe.log)
230 goto err0; 231 goto err0;
@@ -236,7 +237,7 @@ static __init int tcpprobe_init(void)
236 if (ret) 237 if (ret)
237 goto err1; 238 goto err1;
238 239
239 pr_info("TCP probe registered (port=%d)\n", port); 240 pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize);
240 return 0; 241 return 0;
241 err1: 242 err1:
242 proc_net_remove(&init_net, procname); 243 proc_net_remove(&init_net, procname);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 8c08a28d8f83..67107d63c1cd 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -15,7 +15,6 @@
15#include <net/xfrm.h> 15#include <net/xfrm.h>
16#include <net/ip.h> 16#include <net/ip.h>
17 17
18static struct dst_ops xfrm4_dst_ops;
19static struct xfrm_policy_afinfo xfrm4_policy_afinfo; 18static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
20 19
21static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, 20static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
@@ -190,8 +189,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
190 189
191static inline int xfrm4_garbage_collect(struct dst_ops *ops) 190static inline int xfrm4_garbage_collect(struct dst_ops *ops)
192{ 191{
193 xfrm4_policy_afinfo.garbage_collect(&init_net); 192 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
194 return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2); 193
194 xfrm4_policy_afinfo.garbage_collect(net);
195 return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
195} 196}
196 197
197static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) 198static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -268,7 +269,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
268static struct ctl_table xfrm4_policy_table[] = { 269static struct ctl_table xfrm4_policy_table[] = {
269 { 270 {
270 .procname = "xfrm4_gc_thresh", 271 .procname = "xfrm4_gc_thresh",
271 .data = &xfrm4_dst_ops.gc_thresh, 272 .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
272 .maxlen = sizeof(int), 273 .maxlen = sizeof(int),
273 .mode = 0644, 274 .mode = 0644,
274 .proc_handler = proc_dointvec, 275 .proc_handler = proc_dointvec,
@@ -295,8 +296,6 @@ static void __exit xfrm4_policy_fini(void)
295 296
296void __init xfrm4_init(int rt_max_size) 297void __init xfrm4_init(int rt_max_size)
297{ 298{
298 xfrm4_state_init();
299 xfrm4_policy_init();
300 /* 299 /*
301 * Select a default value for the gc_thresh based on the main route 300 * Select a default value for the gc_thresh based on the main route
302 * table hash size. It seems to me the worst case scenario is when 301 * table hash size. It seems to me the worst case scenario is when
@@ -308,6 +307,9 @@ void __init xfrm4_init(int rt_max_size)
308 * and start cleaning when were 1/2 full 307 * and start cleaning when were 1/2 full
309 */ 308 */
310 xfrm4_dst_ops.gc_thresh = rt_max_size/2; 309 xfrm4_dst_ops.gc_thresh = rt_max_size/2;
310
311 xfrm4_state_init();
312 xfrm4_policy_init();
311#ifdef CONFIG_SYSCTL 313#ifdef CONFIG_SYSCTL
312 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, 314 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
313 xfrm4_policy_table); 315 xfrm4_policy_table);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7254e3f899a7..dbdc696f5fc5 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -24,7 +24,6 @@
24#include <net/mip6.h> 24#include <net/mip6.h>
25#endif 25#endif
26 26
27static struct dst_ops xfrm6_dst_ops;
28static struct xfrm_policy_afinfo xfrm6_policy_afinfo; 27static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
29 28
30static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, 29static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
@@ -224,8 +223,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
224 223
225static inline int xfrm6_garbage_collect(struct dst_ops *ops) 224static inline int xfrm6_garbage_collect(struct dst_ops *ops)
226{ 225{
227 xfrm6_policy_afinfo.garbage_collect(&init_net); 226 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
228 return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2); 227
228 xfrm6_policy_afinfo.garbage_collect(net);
229 return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
229} 230}
230 231
231static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) 232static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -310,7 +311,7 @@ static void xfrm6_policy_fini(void)
310static struct ctl_table xfrm6_policy_table[] = { 311static struct ctl_table xfrm6_policy_table[] = {
311 { 312 {
312 .procname = "xfrm6_gc_thresh", 313 .procname = "xfrm6_gc_thresh",
313 .data = &xfrm6_dst_ops.gc_thresh, 314 .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh,
314 .maxlen = sizeof(int), 315 .maxlen = sizeof(int),
315 .mode = 0644, 316 .mode = 0644,
316 .proc_handler = proc_dointvec, 317 .proc_handler = proc_dointvec,
@@ -326,13 +327,6 @@ int __init xfrm6_init(void)
326 int ret; 327 int ret;
327 unsigned int gc_thresh; 328 unsigned int gc_thresh;
328 329
329 ret = xfrm6_policy_init();
330 if (ret)
331 goto out;
332
333 ret = xfrm6_state_init();
334 if (ret)
335 goto out_policy;
336 /* 330 /*
337 * We need a good default value for the xfrm6 gc threshold. 331 * We need a good default value for the xfrm6 gc threshold.
338 * In ipv4 we set it to the route hash table size * 8, which 332 * In ipv4 we set it to the route hash table size * 8, which
@@ -346,6 +340,15 @@ int __init xfrm6_init(void)
346 */ 340 */
347 gc_thresh = FIB6_TABLE_HASHSZ * 8; 341 gc_thresh = FIB6_TABLE_HASHSZ * 8;
348 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; 342 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh;
343
344 ret = xfrm6_policy_init();
345 if (ret)
346 goto out;
347
348 ret = xfrm6_state_init();
349 if (ret)
350 goto out_policy;
351
349#ifdef CONFIG_SYSCTL 352#ifdef CONFIG_SYSCTL
350 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, 353 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path,
351 xfrm6_policy_table); 354 xfrm6_policy_table);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6dc3579c0ac5..9ae1a4760b58 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1331,6 +1331,9 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1331 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1331 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1332 struct ieee80211_conf *conf = &local->hw.conf; 1332 struct ieee80211_conf *conf = &local->hw.conf;
1333 1333
1334 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1335 return -EOPNOTSUPP;
1336
1334 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 1337 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
1335 return -EOPNOTSUPP; 1338 return -EOPNOTSUPP;
1336 1339
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 699d3ed869c4..29bc4c516238 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -190,7 +190,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
190 rate_control_pid_normalize(pinfo, sband->n_bitrates); 190 rate_control_pid_normalize(pinfo, sband->n_bitrates);
191 191
192 /* Compute the proportional, integral and derivative errors. */ 192 /* Compute the proportional, integral and derivative errors. */
193 err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; 193 err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT;
194 194
195 err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift; 195 err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift;
196 spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop; 196 spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index aacba76070fc..e2e2d33cafdf 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -843,12 +843,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
843 dptr = skb_push(skb, 1); 843 dptr = skb_push(skb, 1);
844 *dptr = AX25_P_NETROM; 844 *dptr = AX25_P_NETROM;
845 845
846 ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); 846 ax25s = nr_neigh->ax25;
847 if (nr_neigh->ax25 && ax25s) { 847 nr_neigh->ax25 = ax25_send_frame(skb, 256,
848 /* We were already holding this ax25_cb */ 848 (ax25_address *)dev->dev_addr,
849 &nr_neigh->callsign,
850 nr_neigh->digipeat, nr_neigh->dev);
851 if (ax25s)
849 ax25_cb_put(ax25s); 852 ax25_cb_put(ax25s);
850 }
851 nr_neigh->ax25 = ax25s;
852 853
853 dev_put(dev); 854 dev_put(dev);
854 ret = (nr_neigh->ax25 != NULL); 855 ret = (nr_neigh->ax25 != NULL);
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index bd86a63960ce..5ef5f6988a2e 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -101,13 +101,17 @@ static void rose_t0timer_expiry(unsigned long param)
101static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) 101static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
102{ 102{
103 ax25_address *rose_call; 103 ax25_address *rose_call;
104 ax25_cb *ax25s;
104 105
105 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 106 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
106 rose_call = (ax25_address *)neigh->dev->dev_addr; 107 rose_call = (ax25_address *)neigh->dev->dev_addr;
107 else 108 else
108 rose_call = &rose_callsign; 109 rose_call = &rose_callsign;
109 110
111 ax25s = neigh->ax25;
110 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 112 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
113 if (ax25s)
114 ax25_cb_put(ax25s);
111 115
112 return (neigh->ax25 != NULL); 116 return (neigh->ax25 != NULL);
113} 117}
@@ -120,13 +124,17 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
120static int rose_link_up(struct rose_neigh *neigh) 124static int rose_link_up(struct rose_neigh *neigh)
121{ 125{
122 ax25_address *rose_call; 126 ax25_address *rose_call;
127 ax25_cb *ax25s;
123 128
124 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 129 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
125 rose_call = (ax25_address *)neigh->dev->dev_addr; 130 rose_call = (ax25_address *)neigh->dev->dev_addr;
126 else 131 else
127 rose_call = &rose_callsign; 132 rose_call = &rose_callsign;
128 133
134 ax25s = neigh->ax25;
129 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 135 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
136 if (ax25s)
137 ax25_cb_put(ax25s);
130 138
131 return (neigh->ax25 != NULL); 139 return (neigh->ax25 != NULL);
132} 140}
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 795c4b025e31..70a0b3b4b4d2 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -235,6 +235,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
235 235
236 if ((s = rose_neigh_list) == rose_neigh) { 236 if ((s = rose_neigh_list) == rose_neigh) {
237 rose_neigh_list = rose_neigh->next; 237 rose_neigh_list = rose_neigh->next;
238 if (rose_neigh->ax25)
239 ax25_cb_put(rose_neigh->ax25);
238 kfree(rose_neigh->digipeat); 240 kfree(rose_neigh->digipeat);
239 kfree(rose_neigh); 241 kfree(rose_neigh);
240 return; 242 return;
@@ -243,6 +245,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
243 while (s != NULL && s->next != NULL) { 245 while (s != NULL && s->next != NULL) {
244 if (s->next == rose_neigh) { 246 if (s->next == rose_neigh) {
245 s->next = rose_neigh->next; 247 s->next = rose_neigh->next;
248 if (rose_neigh->ax25)
249 ax25_cb_put(rose_neigh->ax25);
246 kfree(rose_neigh->digipeat); 250 kfree(rose_neigh->digipeat);
247 kfree(rose_neigh); 251 kfree(rose_neigh);
248 return; 252 return;
@@ -812,6 +816,7 @@ void rose_link_failed(ax25_cb *ax25, int reason)
812 816
813 if (rose_neigh != NULL) { 817 if (rose_neigh != NULL) {
814 rose_neigh->ax25 = NULL; 818 rose_neigh->ax25 = NULL;
819 ax25_cb_put(ax25);
815 820
816 rose_del_route_by_neigh(rose_neigh); 821 rose_del_route_by_neigh(rose_neigh);
817 rose_kill_by_neigh(rose_neigh); 822 rose_kill_by_neigh(rose_neigh);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 2333d78187e4..dc0fc4989d54 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -655,6 +655,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
655 memset(&wrqu, 0, sizeof(wrqu)); 655 memset(&wrqu, 0, sizeof(wrqu));
656 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 656 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
657 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 657 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
658 wdev->wext.connect.ssid_len = 0;
658#endif 659#endif
659} 660}
660 661
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 4725a549ad4d..0ecb16a9a883 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -469,16 +469,16 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total)
469 return 0; 469 return 0;
470} 470}
471 471
472void xfrm_spd_getinfo(struct xfrmk_spdinfo *si) 472void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
473{ 473{
474 read_lock_bh(&xfrm_policy_lock); 474 read_lock_bh(&xfrm_policy_lock);
475 si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN]; 475 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
476 si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT]; 476 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
477 si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD]; 477 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
478 si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 478 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
479 si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 479 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
480 si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 480 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
481 si->spdhcnt = init_net.xfrm.policy_idx_hmask; 481 si->spdhcnt = net->xfrm.policy_idx_hmask;
482 si->spdhmcnt = xfrm_policy_hashmax; 482 si->spdhmcnt = xfrm_policy_hashmax;
483 read_unlock_bh(&xfrm_policy_lock); 483 read_unlock_bh(&xfrm_policy_lock);
484} 484}
@@ -1309,15 +1309,28 @@ static inline int xfrm_get_tos(struct flowi *fl, int family)
1309 return tos; 1309 return tos;
1310} 1310}
1311 1311
1312static inline struct xfrm_dst *xfrm_alloc_dst(int family) 1312static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1313{ 1313{
1314 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1314 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1315 struct dst_ops *dst_ops;
1315 struct xfrm_dst *xdst; 1316 struct xfrm_dst *xdst;
1316 1317
1317 if (!afinfo) 1318 if (!afinfo)
1318 return ERR_PTR(-EINVAL); 1319 return ERR_PTR(-EINVAL);
1319 1320
1320 xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS); 1321 switch (family) {
1322 case AF_INET:
1323 dst_ops = &net->xfrm.xfrm4_dst_ops;
1324 break;
1325#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1326 case AF_INET6:
1327 dst_ops = &net->xfrm.xfrm6_dst_ops;
1328 break;
1329#endif
1330 default:
1331 BUG();
1332 }
1333 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
1321 1334
1322 xfrm_policy_put_afinfo(afinfo); 1335 xfrm_policy_put_afinfo(afinfo);
1323 1336
@@ -1366,6 +1379,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1366 struct flowi *fl, 1379 struct flowi *fl,
1367 struct dst_entry *dst) 1380 struct dst_entry *dst)
1368{ 1381{
1382 struct net *net = xp_net(policy);
1369 unsigned long now = jiffies; 1383 unsigned long now = jiffies;
1370 struct net_device *dev; 1384 struct net_device *dev;
1371 struct dst_entry *dst_prev = NULL; 1385 struct dst_entry *dst_prev = NULL;
@@ -1389,7 +1403,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1389 dst_hold(dst); 1403 dst_hold(dst);
1390 1404
1391 for (; i < nx; i++) { 1405 for (; i < nx; i++) {
1392 struct xfrm_dst *xdst = xfrm_alloc_dst(family); 1406 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1393 struct dst_entry *dst1 = &xdst->u.dst; 1407 struct dst_entry *dst1 = &xdst->u.dst;
1394 1408
1395 err = PTR_ERR(xdst); 1409 err = PTR_ERR(xdst);
@@ -2279,6 +2293,7 @@ EXPORT_SYMBOL(xfrm_bundle_ok);
2279 2293
2280int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2294int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2281{ 2295{
2296 struct net *net;
2282 int err = 0; 2297 int err = 0;
2283 if (unlikely(afinfo == NULL)) 2298 if (unlikely(afinfo == NULL))
2284 return -EINVAL; 2299 return -EINVAL;
@@ -2302,6 +2317,27 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2302 xfrm_policy_afinfo[afinfo->family] = afinfo; 2317 xfrm_policy_afinfo[afinfo->family] = afinfo;
2303 } 2318 }
2304 write_unlock_bh(&xfrm_policy_afinfo_lock); 2319 write_unlock_bh(&xfrm_policy_afinfo_lock);
2320
2321 rtnl_lock();
2322 for_each_net(net) {
2323 struct dst_ops *xfrm_dst_ops;
2324
2325 switch (afinfo->family) {
2326 case AF_INET:
2327 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2328 break;
2329#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2330 case AF_INET6:
2331 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2332 break;
2333#endif
2334 default:
2335 BUG();
2336 }
2337 *xfrm_dst_ops = *afinfo->dst_ops;
2338 }
2339 rtnl_unlock();
2340
2305 return err; 2341 return err;
2306} 2342}
2307EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2343EXPORT_SYMBOL(xfrm_policy_register_afinfo);
@@ -2332,6 +2368,22 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2332} 2368}
2333EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2369EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2334 2370
2371static void __net_init xfrm_dst_ops_init(struct net *net)
2372{
2373 struct xfrm_policy_afinfo *afinfo;
2374
2375 read_lock_bh(&xfrm_policy_afinfo_lock);
2376 afinfo = xfrm_policy_afinfo[AF_INET];
2377 if (afinfo)
2378 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2379#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2380 afinfo = xfrm_policy_afinfo[AF_INET6];
2381 if (afinfo)
2382 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2383#endif
2384 read_unlock_bh(&xfrm_policy_afinfo_lock);
2385}
2386
2335static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2387static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2336{ 2388{
2337 struct xfrm_policy_afinfo *afinfo; 2389 struct xfrm_policy_afinfo *afinfo;
@@ -2494,6 +2546,7 @@ static int __net_init xfrm_net_init(struct net *net)
2494 rv = xfrm_policy_init(net); 2546 rv = xfrm_policy_init(net);
2495 if (rv < 0) 2547 if (rv < 0)
2496 goto out_policy; 2548 goto out_policy;
2549 xfrm_dst_ops_init(net);
2497 rv = xfrm_sysctl_init(net); 2550 rv = xfrm_sysctl_init(net);
2498 if (rv < 0) 2551 if (rv < 0)
2499 goto out_sysctl; 2552 goto out_sysctl;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d847f1a52b44..b36cc344474b 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -641,11 +641,11 @@ out:
641} 641}
642EXPORT_SYMBOL(xfrm_state_flush); 642EXPORT_SYMBOL(xfrm_state_flush);
643 643
644void xfrm_sad_getinfo(struct xfrmk_sadinfo *si) 644void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
645{ 645{
646 spin_lock_bh(&xfrm_state_lock); 646 spin_lock_bh(&xfrm_state_lock);
647 si->sadcnt = init_net.xfrm.state_num; 647 si->sadcnt = net->xfrm.state_num;
648 si->sadhcnt = init_net.xfrm.state_hmask; 648 si->sadhcnt = net->xfrm.state_hmask;
649 si->sadhmcnt = xfrm_state_hashmax; 649 si->sadhmcnt = xfrm_state_hashmax;
650 spin_unlock_bh(&xfrm_state_lock); 650 spin_unlock_bh(&xfrm_state_lock);
651} 651}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 1ada6186933c..d5a712976004 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -781,7 +781,8 @@ static inline size_t xfrm_spdinfo_msgsize(void)
781 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 781 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
782} 782}
783 783
784static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 784static int build_spdinfo(struct sk_buff *skb, struct net *net,
785 u32 pid, u32 seq, u32 flags)
785{ 786{
786 struct xfrmk_spdinfo si; 787 struct xfrmk_spdinfo si;
787 struct xfrmu_spdinfo spc; 788 struct xfrmu_spdinfo spc;
@@ -795,7 +796,7 @@ static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
795 796
796 f = nlmsg_data(nlh); 797 f = nlmsg_data(nlh);
797 *f = flags; 798 *f = flags;
798 xfrm_spd_getinfo(&si); 799 xfrm_spd_getinfo(net, &si);
799 spc.incnt = si.incnt; 800 spc.incnt = si.incnt;
800 spc.outcnt = si.outcnt; 801 spc.outcnt = si.outcnt;
801 spc.fwdcnt = si.fwdcnt; 802 spc.fwdcnt = si.fwdcnt;
@@ -828,7 +829,7 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
828 if (r_skb == NULL) 829 if (r_skb == NULL)
829 return -ENOMEM; 830 return -ENOMEM;
830 831
831 if (build_spdinfo(r_skb, spid, seq, *flags) < 0) 832 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
832 BUG(); 833 BUG();
833 834
834 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 835 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
@@ -841,7 +842,8 @@ static inline size_t xfrm_sadinfo_msgsize(void)
841 + nla_total_size(4); /* XFRMA_SAD_CNT */ 842 + nla_total_size(4); /* XFRMA_SAD_CNT */
842} 843}
843 844
844static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 845static int build_sadinfo(struct sk_buff *skb, struct net *net,
846 u32 pid, u32 seq, u32 flags)
845{ 847{
846 struct xfrmk_sadinfo si; 848 struct xfrmk_sadinfo si;
847 struct xfrmu_sadhinfo sh; 849 struct xfrmu_sadhinfo sh;
@@ -854,7 +856,7 @@ static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
854 856
855 f = nlmsg_data(nlh); 857 f = nlmsg_data(nlh);
856 *f = flags; 858 *f = flags;
857 xfrm_sad_getinfo(&si); 859 xfrm_sad_getinfo(net, &si);
858 860
859 sh.sadhmcnt = si.sadhmcnt; 861 sh.sadhmcnt = si.sadhmcnt;
860 sh.sadhcnt = si.sadhcnt; 862 sh.sadhcnt = si.sadhcnt;
@@ -882,7 +884,7 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
882 if (r_skb == NULL) 884 if (r_skb == NULL)
883 return -ENOMEM; 885 return -ENOMEM;
884 886
885 if (build_sadinfo(r_skb, spid, seq, *flags) < 0) 887 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
886 BUG(); 888 BUG();
887 889
888 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 890 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);