aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-03-04 05:47:50 -0500
committerIngo Molnar <mingo@elte.hu>2010-03-04 05:47:52 -0500
commit4f16d4e0c9a4b20d9f0db365587b96d6001efd7d (patch)
treefa25dcf285b26f1fac2bf267d0d1cd2c4eba90b8
parent1e259e0a9982078896f3404240096cbea01daca4 (diff)
parent6630125419ef37ff8781713c5e9d416f2a4ba357 (diff)
Merge branch 'perf/core' into perf/urgent
Merge reason: Switch from pre-merge topical split to the post-merge urgent track Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--Documentation/ABI/testing/ima_policy12
-rw-r--r--Documentation/cpu-freq/governors.txt4
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--Documentation/trace/kprobetrace.txt57
-rw-r--r--MAINTAINERS22
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig18
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/include/asm/cacheflush.h3
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/mach-gemini/gpio.c4
-rw-r--r--arch/arm/mach-mx25/clock.c58
-rw-r--r--arch/arm/mach-mx25/mx25pdk.c2
-rw-r--r--arch/arm/mach-mx3/mx31ads.c4
-rw-r--r--arch/arm/mach-omap2/gpmc.c5
-rw-r--r--arch/arm/mach-omap2/irq.c4
-rw-r--r--arch/arm/mach-omap2/mmc-twl4030.c7
-rw-r--r--arch/arm/mach-omap2/mux.c10
-rw-r--r--arch/arm/mach-omap2/mux34xx.c47
-rw-r--r--arch/arm/mach-omap2/serial.c11
-rw-r--r--arch/arm/mach-realview/realview_pbx.c4
-rw-r--r--arch/arm/mm/alignment.c3
-rw-r--r--arch/arm/mm/proc-arm6_7.S2
-rw-r--r--arch/arm/plat-mxc/audmux-v2.c137
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31lite.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx35.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/irqs.h5
-rw-r--r--arch/arm/plat-omap/clock.c4
-rw-r--r--arch/arm/plat-omap/gpio.c4
-rw-r--r--arch/arm/plat-omap/omap_device.c10
-rw-r--r--arch/arm/tools/mach-types46
-rw-r--r--arch/arm/vfp/vfpmodule.c5
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c15
-rw-r--r--arch/ia64/include/asm/acpi.h1
-rw-r--r--arch/ia64/include/asm/elf.h4
-rw-r--r--arch/ia64/kernel/kprobes.c2
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/microblaze/configs/mmu_defconfig112
-rw-r--r--arch/microblaze/configs/nommu_defconfig101
-rw-r--r--arch/microblaze/include/asm/io.h2
-rw-r--r--arch/microblaze/kernel/cpu/cache.c27
-rw-r--r--arch/microblaze/kernel/entry-nommu.S10
-rw-r--r--arch/microblaze/kernel/setup.c1
-rw-r--r--arch/mips/bcm47xx/prom.c8
-rw-r--r--arch/mips/configs/ip27_defconfig917
-rw-r--r--arch/mips/kernel/cpu-probe.c4
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/mm/c-octeon.c4
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/highmem.c1
-rw-r--r--arch/mips/sni/rm200.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/pci.c7
-rw-r--r--arch/parisc/kernel/signal.c4
-rw-r--r--arch/powerpc/kernel/perf_callchain.c3
-rw-r--r--arch/powerpc/kernel/perf_event.c10
-rw-r--r--arch/powerpc/mm/tlb_hash64.c12
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c3
-rw-r--r--arch/powerpc/platforms/85xx/smp.c21
-rw-r--r--arch/powerpc/platforms/pseries/xics.c14
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S3
-rw-r--r--arch/sh/kernel/dwarf.c20
-rw-r--r--arch/sh/kernel/entry-common.S8
-rw-r--r--arch/sh/kernel/perf_callchain.c3
-rw-r--r--arch/sh/kernel/ptrace_64.c11
-rw-r--r--arch/sh/kernel/signal_64.c4
-rw-r--r--arch/sparc/include/asm/stat.h4
-rw-r--r--arch/sparc/kernel/kstack.h4
-rw-r--r--arch/sparc/kernel/of_device_32.c2
-rw-r--r--arch/sparc/kernel/pci.c7
-rw-r--r--arch/sparc/kernel/perf_event.c10
-rw-r--r--arch/sparc/kernel/process_32.c2
-rw-r--r--arch/sparc/kernel/process_64.c8
-rw-r--r--arch/sparc/kernel/signal32.c10
-rw-r--r--arch/sparc/kernel/signal_32.c6
-rw-r--r--arch/sparc/kernel/signal_64.c8
-rw-r--r--arch/sparc/kernel/tsb.S6
-rw-r--r--arch/x86/include/asm/alternative.h5
-rw-r--r--arch/x86/include/asm/debugreg.h3
-rw-r--r--arch/x86/include/asm/elf.h5
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h1
-rw-r--r--arch/x86/include/asm/nmi.h1
-rw-r--r--arch/x86/include/asm/perf_event.h30
-rw-r--r--arch/x86/include/asm/ptrace.h4
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/system.h4
-rw-r--r--arch/x86/kernel/acpi/boot.c13
-rw-r--r--arch/x86/kernel/alternative.c18
-rw-r--r--arch/x86/kernel/apic/apic.c17
-rw-r--r--arch/x86/kernel/apic/probe_32.c29
-rw-r--r--arch/x86/kernel/apic/probe_64.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.c1868
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c416
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c982
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c157
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c13
-rw-r--r--arch/x86/kernel/dumpstack_32.c5
-rw-r--r--arch/x86/kernel/dumpstack_64.c5
-rw-r--r--arch/x86/kernel/hw_breakpoint.c17
-rw-r--r--arch/x86/kernel/kprobes.c5
-rw-r--r--arch/x86/kernel/mpparse.c7
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/ptrace.c24
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/traps.c3
-rw-r--r--arch/x86/kvm/i8254.c3
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/oprofile/nmi_int.c17
-rw-r--r--arch/x86/oprofile/op_model_amd.c261
-rw-r--r--arch/x86/oprofile/op_model_p4.c6
-rw-r--r--arch/x86/oprofile/op_model_ppro.c21
-rw-r--r--arch/x86/oprofile/op_x86_model.h20
-rw-r--r--block/blk-core.c11
-rw-r--r--block/cfq-iosched.c49
-rw-r--r--drivers/acpi/dock.c1
-rw-r--r--drivers/acpi/processor_idle.c36
-rw-r--r--drivers/acpi/processor_pdc.c14
-rw-r--r--drivers/acpi/processor_perflib.c6
-rw-r--r--drivers/acpi/scan.c27
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/ata/ahci.c15
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c3
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c1
-rw-r--r--drivers/char/agp/amd64-agp.c5
-rw-r--r--drivers/char/tpm/tpm_infineon.c79
-rw-r--r--drivers/char/tty_io.c4
-rw-r--r--drivers/clocksource/cs5535-clockevt.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
-rw-r--r--drivers/edac/amd64_edac.c15
-rw-r--r--drivers/edac/mpc85xx_edac.c8
-rw-r--r--drivers/firewire/net.c53
-rw-r--r--drivers/firewire/ohci.c13
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c47
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c146
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c38
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c276
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c113
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c1
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/atom.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/radeon/r100.c14
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r420.c3
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c56
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c107
-rw-r--r--drivers/gpu/drm/radeon/rs400.c28
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c108
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c16
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/lm78.c25
-rw-r--r--drivers/hwmon/w83781d.c26
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c12
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/input/input-polldev.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c9
-rw-r--r--drivers/input/serio/i8042.c8
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c8
-rw-r--r--drivers/md/dm-log-userspace-transfer.c10
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-sysfs.c8
-rw-r--r--drivers/md/dm.c21
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/common/saa7146_video.c4
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c20
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig4
-rw-r--r--drivers/media/dvb/frontends/l64781.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c8
-rw-r--r--drivers/media/video/bt8xx/bttvp.h1
-rw-r--r--drivers/media/video/mt9t112.c2
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c8
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/mmc/card/mmc_test.c9
-rw-r--r--drivers/net/ax88796.c2
-rw-r--r--drivers/net/benet/be_cmds.c1
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/e1000/e1000_main.c19
-rw-r--r--drivers/net/igb/igb_main.c20
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c11
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/sfc/efx.c1
-rw-r--r--drivers/net/sfc/falcon_boards.c45
-rw-r--r--drivers/net/sfc/mcdi.c2
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sky2.c8
-rw-r--r--drivers/net/tc35815.c1
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/via-velocity.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c22
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/pci/quirks.c17
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/power/wm97xx_battery.c10
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c9
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c18
-rw-r--r--drivers/scsi/fcoe/libfcoe.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c3
-rw-r--r--drivers/scsi/libfc/fc_lport.c3
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c8
-rw-r--r--drivers/serial/8250.c7
-rw-r--r--drivers/ssb/main.c3
-rw-r--r--drivers/usb/core/devio.c48
-rw-r--r--drivers/usb/gadget/f_eem.c3
-rw-r--r--drivers/usb/gadget/multi.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c1
-rw-r--r--drivers/usb/host/ehci-hub.c13
-rw-r--r--drivers/usb/host/fhci-tds.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c41
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/otg/Kconfig1
-rw-r--r--drivers/usb/serial/ftdi_sio.c25
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h18
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/video/efifb.c11
-rw-r--r--drivers/watchdog/bfin_wdt.c13
-rw-r--r--fs/9p/v9fs.c33
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/vfs_file.c19
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/btrfs/disk-io.c7
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/file.c8
-rw-r--r--fs/btrfs/inode.c50
-rw-r--r--fs/btrfs/relocation.c3
-rw-r--r--fs/cachefiles/namei.c12
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/inode.c12
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c11
-rw-r--r--fs/compat_ioctl.c9
-rw-r--r--fs/exec.c20
-rw-r--r--fs/fcntl.c6
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/lock_dlm.c11
-rw-r--r--fs/gfs2/ops_fstype.c14
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/namei.c20
-rw-r--r--fs/nfs/direct.c3
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/fscache.c9
-rw-r--r--fs/nfs/inode.c4
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/nfs2xdr.c2
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c78
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4xdr.c6
-rw-r--r--fs/nfs/pagelist.c17
-rw-r--r--fs/nfs/super.c15
-rw-r--r--fs/nfs/sysctl.c2
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/export.c10
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c10
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h4
-rw-r--r--fs/ocfs2/dlm/dlmapi.h2
-rw-r--r--fs/ocfs2/dlm/dlmast.c2
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmlock.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c38
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c147
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c8
-rw-r--r--fs/ocfs2/dlmglue.c85
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c18
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/ioctl.c14
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/ocfs2_fs.h11
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/stack_o2cb.c12
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/symlink.c10
-rw-r--r--fs/ocfs2/uptodate.c4
-rw-r--r--fs/proc/base.c24
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/sysfs/inode.c35
-rw-r--r--include/drm/nouveau_drm.h1
-rw-r--r--include/drm/vmwgfx_drm.h20
-rw-r--r--include/linux/amba/bus.h6
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/bitops.h29
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/ftrace.h7
-rw-r--r--include/linux/ftrace_event.h20
-rw-r--r--include/linux/hw_breakpoint.h8
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/list.h14
-rw-r--r--include/linux/perf_event.h75
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/net/netns/conntrack.h3
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/trace/events/lock.h29
-rw-r--r--include/trace/ftrace.h60
-rw-r--r--include/trace/syscall.h4
-rw-r--r--init/Kconfig15
-rw-r--r--init/main.c2
-rw-r--r--kernel/futex.c30
-rw-r--r--kernel/hw_breakpoint.c10
-rw-r--r--kernel/kfifo.c3
-rw-r--r--kernel/kgdb.c6
-rw-r--r--kernel/kprobes.c34
-rw-r--r--kernel/perf_event.c642
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/softlockup.c15
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c54
-rw-r--r--kernel/trace/trace_event_profile.c52
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_kprobe.c198
-rw-r--r--kernel/trace/trace_stack.c24
-rw-r--r--kernel/trace/trace_syscalls.c76
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/idr.c9
-rw-r--r--mm/migrate.c39
-rw-r--r--mm/oom_kill.c2
-rw-r--r--net/9p/client.c53
-rw-r--r--net/9p/trans_fd.c10
-rw-r--r--net/9p/trans_rdma.c9
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hci_event.c1
-rw-r--r--net/bluetooth/hidp/core.c49
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/rfcomm/core.c8
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/net-sysfs.c3
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/dccp/ccid.c2
-rw-r--r--net/dccp/ccid.h8
-rw-r--r--net/dccp/probe.c4
-rw-r--r--net/ipv4/devinet.c7
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/ipcomp.c6
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c22
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/ipcomp6.c6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/irda/irnet/irnet_ppp.c5
-rw-r--r--net/key/af_key.c15
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/rate.c3
-rw-r--r--net/mac80211/scan.c18
-rw-r--r--net/netfilter/nf_conntrack_core.c116
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c7
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sched/Kconfig16
-rw-r--r--net/xfrm/xfrm_state.c12
-rw-r--r--samples/hw_breakpoint/data_breakpoint.c6
-rw-r--r--security/integrity/ima/ima.h3
-rw-r--r--security/integrity/ima/ima_api.c4
-rw-r--r--security/integrity/ima/ima_iint.c9
-rw-r--r--security/integrity/ima/ima_main.c239
-rw-r--r--security/integrity/ima/ima_policy.c9
-rw-r--r--security/security.c2
-rw-r--r--sound/pci/ctxfi/ctatc.c15
-rw-r--r--sound/pci/ctxfi/ctvmem.c38
-rw-r--r--sound/pci/ctxfi/ctvmem.h8
-rw-r--r--sound/pci/hda/hda_intel.c26
-rw-r--r--sound/pci/ice1712/aureon.c12
-rw-r--r--sound/soc/omap/omap3pandora.c1
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Documentation/perf-archive.txt22
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt33
-rw-r--r--tools/perf/Documentation/perf-lock.txt29
-rw-r--r--tools/perf/Documentation/perf-probe.txt20
-rw-r--r--tools/perf/Documentation/perf-top.txt2
-rw-r--r--tools/perf/Documentation/perf-trace-perl.txt2
-rw-r--r--tools/perf/Documentation/perf-trace-python.txt625
-rw-r--r--tools/perf/Documentation/perf-trace.txt15
-rw-r--r--tools/perf/Documentation/perf.txt2
-rw-r--r--tools/perf/Makefile51
-rw-r--r--tools/perf/builtin-annotate.c240
-rw-r--r--tools/perf/builtin-buildid-cache.c133
-rw-r--r--tools/perf/builtin-buildid-list.c31
-rw-r--r--tools/perf/builtin-diff.c74
-rw-r--r--tools/perf/builtin-help.c5
-rw-r--r--tools/perf/builtin-kmem.c48
-rw-r--r--tools/perf/builtin-lock.c822
-rw-r--r--tools/perf/builtin-probe.c94
-rw-r--r--tools/perf/builtin-record.c50
-rw-r--r--tools/perf/builtin-report.c58
-rw-r--r--tools/perf/builtin-sched.c32
-rw-r--r--tools/perf/builtin-stat.c106
-rw-r--r--tools/perf/builtin-timechart.c25
-rw-r--r--tools/perf/builtin-top.c109
-rw-r--r--tools/perf/builtin-trace.c34
-rw-r--r--tools/perf/builtin.h2
-rw-r--r--tools/perf/command-list.txt3
-rw-r--r--tools/perf/design.txt8
-rw-r--r--tools/perf/perf-archive.sh33
-rw-r--r--tools/perf/perf.c25
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.c5
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.xs3
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm2
-rw-r--r--tools/perf/scripts/perl/bin/check-perf-trace-record7
-rw-r--r--tools/perf/scripts/perl/bin/check-perf-trace-report6
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-report4
-rw-r--r--tools/perf/scripts/perl/failed-syscalls.pl38
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c88
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py91
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py25
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-report4
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-report4
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-report4
-rw-r--r--tools/perf/scripts/python/check-perf-trace.py83
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py68
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py64
-rw-r--r--tools/perf/scripts/python/syscall-counts.py58
-rw-r--r--tools/perf/util/build-id.c39
-rw-r--r--tools/perf/util/build-id.h8
-rw-r--r--tools/perf/util/data_map.c252
-rw-r--r--tools/perf/util/debug.c1
-rw-r--r--tools/perf/util/debugfs.c17
-rw-r--r--tools/perf/util/debugfs.h2
-rw-r--r--tools/perf/util/event.c220
-rw-r--r--tools/perf/util/event.h79
-rw-r--r--tools/perf/util/header.c284
-rw-r--r--tools/perf/util/header.h9
-rw-r--r--tools/perf/util/include/linux/hash.h5
-rw-r--r--tools/perf/util/include/linux/kernel.h1
-rw-r--r--tools/perf/util/map.c52
-rw-r--r--tools/perf/util/map.h94
-rw-r--r--tools/perf/util/parse-events.c48
-rw-r--r--tools/perf/util/probe-event.c105
-rw-r--r--tools/perf/util/probe-event.h2
-rw-r--r--tools/perf/util/probe-finder.c203
-rw-r--r--tools/perf/util/probe-finder.h33
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c (renamed from tools/perf/util/trace-event-perl.c)115
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c573
-rw-r--r--tools/perf/util/session.c431
-rw-r--r--tools/perf/util/session.h55
-rw-r--r--tools/perf/util/string.c65
-rw-r--r--tools/perf/util/symbol.c529
-rw-r--r--tools/perf/util/symbol.h52
-rw-r--r--tools/perf/util/thread.c52
-rw-r--r--tools/perf/util/thread.h24
-rw-r--r--tools/perf/util/trace-event-info.c64
-rw-r--r--tools/perf/util/trace-event-parse.c24
-rw-r--r--tools/perf/util/trace-event-perl.h55
-rw-r--r--tools/perf/util/trace-event-read.c18
-rw-r--r--tools/perf/util/trace-event-scripting.c167
-rw-r--r--tools/perf/util/trace-event.h10
-rw-r--r--tools/perf/util/util.c94
-rw-r--r--tools/perf/util/util.h3
-rw-r--r--tools/perf/util/values.c1
599 files changed, 13687 insertions, 5864 deletions
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index 6434f0df012e..6cd6daefaaed 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -20,7 +20,7 @@ Description:
20 lsm: [[subj_user=] [subj_role=] [subj_type=] 20 lsm: [[subj_user=] [subj_role=] [subj_type=]
21 [obj_user=] [obj_role=] [obj_type=]] 21 [obj_user=] [obj_role=] [obj_type=]]
22 22
23 base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION] 23 base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK]
24 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] 24 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
25 fsmagic:= hex value 25 fsmagic:= hex value
26 uid:= decimal value 26 uid:= decimal value
@@ -40,11 +40,11 @@ Description:
40 40
41 measure func=BPRM_CHECK 41 measure func=BPRM_CHECK
42 measure func=FILE_MMAP mask=MAY_EXEC 42 measure func=FILE_MMAP mask=MAY_EXEC
43 measure func=INODE_PERM mask=MAY_READ uid=0 43 measure func=FILE_CHECK mask=MAY_READ uid=0
44 44
45 The default policy measures all executables in bprm_check, 45 The default policy measures all executables in bprm_check,
46 all files mmapped executable in file_mmap, and all files 46 all files mmapped executable in file_mmap, and all files
47 open for read by root in inode_permission. 47 open for read by root in do_filp_open.
48 48
49 Examples of LSM specific definitions: 49 Examples of LSM specific definitions:
50 50
@@ -54,8 +54,8 @@ Description:
54 54
55 dont_measure obj_type=var_log_t 55 dont_measure obj_type=var_log_t
56 dont_measure obj_type=auditd_log_t 56 dont_measure obj_type=auditd_log_t
57 measure subj_user=system_u func=INODE_PERM mask=MAY_READ 57 measure subj_user=system_u func=FILE_CHECK mask=MAY_READ
58 measure subj_role=system_r func=INODE_PERM mask=MAY_READ 58 measure subj_role=system_r func=FILE_CHECK mask=MAY_READ
59 59
60 Smack: 60 Smack:
61 measure subj_user=_ func=INODE_PERM mask=MAY_READ 61 measure subj_user=_ func=FILE_CHECK mask=MAY_READ
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index aed082f49d09..737988fca64d 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
145up_threshold: defines what the average CPU usage between the samplings 145up_threshold: defines what the average CPU usage between the samplings
146of 'sampling_rate' needs to be for the kernel to make a decision on 146of 'sampling_rate' needs to be for the kernel to make a decision on
147whether it should increase the frequency. For example when it is set 147whether it should increase the frequency. For example when it is set
148to its default value of '80' it means that between the checking 148to its default value of '95' it means that between the checking
149intervals the CPU needs to be on average more than 80% in use to then 149intervals the CPU needs to be on average more than 95% in use to then
150decide that the CPU frequency needs to be increased. 150decide that the CPU frequency needs to be increased.
151 151
152ignore_nice_load: this parameter takes a value of '0' or '1'. When 152ignore_nice_load: this parameter takes a value of '0' or '1'. When
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 736d45602886..e7848a0d99eb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
199 acpi_display_output=video 199 acpi_display_output=video
200 See above. 200 See above.
201 201
202 acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods
203 early. Needed on some platforms to properly
204 initialize the EC.
205
202 acpi_irq_balance [HW,ACPI] 206 acpi_irq_balance [HW,ACPI]
203 ACPI will balance active IRQs 207 ACPI will balance active IRQs
204 default in APIC mode 208 default in APIC mode
@@ -311,6 +315,11 @@ and is between 256 and 4096 characters. It is defined in the file
311 aic79xx= [HW,SCSI] 315 aic79xx= [HW,SCSI]
312 See Documentation/scsi/aic79xx.txt. 316 See Documentation/scsi/aic79xx.txt.
313 317
318 alignment= [KNL,ARM]
319 Allow the default userspace alignment fault handler
320 behaviour to be specified. Bit 0 enables warnings,
321 bit 1 enables fixups, and bit 2 sends a segfault.
322
314 amd_iommu= [HW,X86-84] 323 amd_iommu= [HW,X86-84]
315 Pass parameters to the AMD IOMMU driver in the system. 324 Pass parameters to the AMD IOMMU driver in the system.
316 Possible values are: 325 Possible values are:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 006b39dec87d..e87f3cdc8a6a 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
1074 Default: 5 1074 Default: 5
1075 1075
1076max_addresses - INTEGER 1076max_addresses - INTEGER
1077 Number of maximum addresses per interface. 0 disables limitation. 1077 Maximum number of autoconfigured addresses per interface. Setting
1078 It is recommended not set too large value (or 0) because it would 1078 to zero disables the limitation. It is not recommended to set this
1079 be too easy way to crash kernel to allow to create too much of 1079 value too large (or to zero) because it would be an easy way to
1080 autoconfigured addresses. 1080 crash the kernel by allowing too many addresses to be created.
1081 Default: 16 1081 Default: 16
1082 1082
1083disable_ipv6 - BOOLEAN 1083disable_ipv6 - BOOLEAN
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt
index 47aabeebbdf6..a9100b28eb84 100644
--- a/Documentation/trace/kprobetrace.txt
+++ b/Documentation/trace/kprobetrace.txt
@@ -24,6 +24,7 @@ Synopsis of kprobe_events
24------------------------- 24-------------------------
25 p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe 25 p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe
26 r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe 26 r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe
27 -:[GRP/]EVENT : Clear a probe
27 28
28 GRP : Group name. If omitted, use "kprobes" for it. 29 GRP : Group name. If omitted, use "kprobes" for it.
29 EVENT : Event name. If omitted, the event name is generated 30 EVENT : Event name. If omitted, the event name is generated
@@ -37,15 +38,12 @@ Synopsis of kprobe_events
37 @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol) 38 @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol)
38 $stackN : Fetch Nth entry of stack (N >= 0) 39 $stackN : Fetch Nth entry of stack (N >= 0)
39 $stack : Fetch stack address. 40 $stack : Fetch stack address.
40 $argN : Fetch function argument. (N >= 0)(*) 41 $retval : Fetch return value.(*)
41 $retval : Fetch return value.(**) 42 +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
42 +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***)
43 NAME=FETCHARG: Set NAME as the argument name of FETCHARG. 43 NAME=FETCHARG: Set NAME as the argument name of FETCHARG.
44 44
45 (*) aN may not correct on asmlinkaged functions and at the middle of 45 (*) only for return probe.
46 function body. 46 (**) this is useful for fetching a field of data structures.
47 (**) only for return probe.
48 (***) this is useful for fetching a field of data structures.
49 47
50 48
51Per-Probe Event Filtering 49Per-Probe Event Filtering
@@ -82,13 +80,16 @@ Usage examples
82To add a probe as a new event, write a new definition to kprobe_events 80To add a probe as a new event, write a new definition to kprobe_events
83as below. 81as below.
84 82
85 echo p:myprobe do_sys_open dfd=$arg0 filename=$arg1 flags=$arg2 mode=$arg3 > /sys/kernel/debug/tracing/kprobe_events 83 echo 'p:myprobe do_sys_open dfd=%ax filename=%dx flags=%cx mode=+4($stack)' > /sys/kernel/debug/tracing/kprobe_events
86 84
87 This sets a kprobe on the top of do_sys_open() function with recording 85 This sets a kprobe on the top of do_sys_open() function with recording
881st to 4th arguments as "myprobe" event. As this example shows, users can 861st to 4th arguments as "myprobe" event. Note, which register/stack entry is
89choose more familiar names for each arguments. 87assigned to each function argument depends on arch-specific ABI. If you unsure
88the ABI, please try to use probe subcommand of perf-tools (you can find it
89under tools/perf/).
90As this example shows, users can choose more familiar names for each arguments.
90 91
91 echo r:myretprobe do_sys_open $retval >> /sys/kernel/debug/tracing/kprobe_events 92 echo 'r:myretprobe do_sys_open $retval' >> /sys/kernel/debug/tracing/kprobe_events
92 93
93 This sets a kretprobe on the return point of do_sys_open() function with 94 This sets a kretprobe on the return point of do_sys_open() function with
94recording return value as "myretprobe" event. 95recording return value as "myretprobe" event.
@@ -97,23 +98,24 @@ recording return value as "myretprobe" event.
97 98
98 cat /sys/kernel/debug/tracing/events/kprobes/myprobe/format 99 cat /sys/kernel/debug/tracing/events/kprobes/myprobe/format
99name: myprobe 100name: myprobe
100ID: 75 101ID: 780
101format: 102format:
102 field:unsigned short common_type; offset:0; size:2; 103 field:unsigned short common_type; offset:0; size:2; signed:0;
103 field:unsigned char common_flags; offset:2; size:1; 104 field:unsigned char common_flags; offset:2; size:1; signed:0;
104 field:unsigned char common_preempt_count; offset:3; size:1; 105 field:unsigned char common_preempt_count; offset:3; size:1;signed:0;
105 field:int common_pid; offset:4; size:4; 106 field:int common_pid; offset:4; size:4; signed:1;
106 field:int common_tgid; offset:8; size:4; 107 field:int common_lock_depth; offset:8; size:4; signed:1;
107 108
108 field: unsigned long ip; offset:16;tsize:8; 109 field:unsigned long __probe_ip; offset:12; size:4; signed:0;
109 field: int nargs; offset:24;tsize:4; 110 field:int __probe_nargs; offset:16; size:4; signed:1;
110 field: unsigned long dfd; offset:32;tsize:8; 111 field:unsigned long dfd; offset:20; size:4; signed:0;
111 field: unsigned long filename; offset:40;tsize:8; 112 field:unsigned long filename; offset:24; size:4; signed:0;
112 field: unsigned long flags; offset:48;tsize:8; 113 field:unsigned long flags; offset:28; size:4; signed:0;
113 field: unsigned long mode; offset:56;tsize:8; 114 field:unsigned long mode; offset:32; size:4; signed:0;
114 115
115print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, REC->filename, REC->flags, REC->mode
116 116
117print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->__probe_ip,
118REC->dfd, REC->filename, REC->flags, REC->mode
117 119
118 You can see that the event has 4 arguments as in the expressions you specified. 120 You can see that the event has 4 arguments as in the expressions you specified.
119 121
@@ -121,6 +123,12 @@ print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, R
121 123
122 This clears all probe points. 124 This clears all probe points.
123 125
126 Or,
127
128 echo -:myprobe >> kprobe_events
129
130 This clears probe points selectively.
131
124 Right after definition, each event is disabled by default. For tracing these 132 Right after definition, each event is disabled by default. For tracing these
125events, you need to enable it. 133events, you need to enable it.
126 134
@@ -146,4 +154,3 @@ events, you need to enable it.
146returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel 154returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel
147returns from do_sys_open to sys_open+0x1b). 155returns from do_sys_open to sys_open+0x1b).
148 156
149
diff --git a/MAINTAINERS b/MAINTAINERS
index 03f38c18f323..2533fc45a44a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -616,10 +616,10 @@ M: Richard Purdie <rpurdie@rpsys.net>
616S: Maintained 616S: Maintained
617 617
618ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE 618ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
619M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> 619M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
620L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 620L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
621T: git git://gitorious.org/linux-gemini/mainline.git 621T: git git://gitorious.org/linux-gemini/mainline.git
622S: Maintained 622S: Odd Fixes
623F: arch/arm/mach-gemini/ 623F: arch/arm/mach-gemini/
624 624
625ARM/EBSA110 MACHINE SUPPORT 625ARM/EBSA110 MACHINE SUPPORT
@@ -641,9 +641,9 @@ T: topgit git://git.openezx.org/openezx.git
641F: arch/arm/mach-pxa/ezx.c 641F: arch/arm/mach-pxa/ezx.c
642 642
643ARM/FARADAY FA526 PORT 643ARM/FARADAY FA526 PORT
644M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> 644M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
645L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 645L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
646S: Maintained 646S: Odd Fixes
647F: arch/arm/mm/*-fa* 647F: arch/arm/mm/*-fa*
648 648
649ARM/FOOTBRIDGE ARCHITECTURE 649ARM/FOOTBRIDGE ARCHITECTURE
@@ -1733,10 +1733,9 @@ F: include/linux/tfrc.h
1733F: net/dccp/ 1733F: net/dccp/
1734 1734
1735DECnet NETWORK LAYER 1735DECnet NETWORK LAYER
1736M: Christine Caulfield <christine.caulfield@googlemail.com>
1737W: http://linux-decnet.sourceforge.net 1736W: http://linux-decnet.sourceforge.net
1738L: linux-decnet-user@lists.sourceforge.net 1737L: linux-decnet-user@lists.sourceforge.net
1739S: Maintained 1738S: Orphan
1740F: Documentation/networking/decnet.txt 1739F: Documentation/networking/decnet.txt
1741F: net/decnet/ 1740F: net/decnet/
1742 1741
@@ -3411,8 +3410,10 @@ S: Maintained
3411F: drivers/scsi/sym53c8xx_2/ 3410F: drivers/scsi/sym53c8xx_2/
3412 3411
3413LTP (Linux Test Project) 3412LTP (Linux Test Project)
3414M: Subrata Modak <subrata@linux.vnet.ibm.com> 3413M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com>
3415M: Mike Frysinger <vapier@gentoo.org> 3414M: Garrett Cooper <yanegomi@gmail.com>
3415M: Mike Frysinger <vapier@gentoo.org>
3416M: Subrata Modak <subrata@linux.vnet.ibm.com>
3416L: ltp-list@lists.sourceforge.net (subscribers-only) 3417L: ltp-list@lists.sourceforge.net (subscribers-only)
3417W: http://ltp.sourceforge.net/ 3418W: http://ltp.sourceforge.net/
3418T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git 3419T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git
@@ -3488,9 +3489,9 @@ S: Maintained
3488F: drivers/net/wireless/libertas/ 3489F: drivers/net/wireless/libertas/
3489 3490
3490MARVELL MV643XX ETHERNET DRIVER 3491MARVELL MV643XX ETHERNET DRIVER
3491M: Lennert Buytenhek <buytenh@marvell.com> 3492M: Lennert Buytenhek <buytenh@wantstofly.org>
3492L: netdev@vger.kernel.org 3493L: netdev@vger.kernel.org
3493S: Supported 3494S: Maintained
3494F: drivers/net/mv643xx_eth.* 3495F: drivers/net/mv643xx_eth.*
3495F: include/linux/mv643xx.h 3496F: include/linux/mv643xx.h
3496 3497
@@ -3836,6 +3837,7 @@ NETWORKING DRIVERS
3836L: netdev@vger.kernel.org 3837L: netdev@vger.kernel.org
3837W: http://www.linuxfoundation.org/en/Net 3838W: http://www.linuxfoundation.org/en/Net
3838T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git 3839T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
3840T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
3839S: Odd Fixes 3841S: Odd Fixes
3840F: drivers/net/ 3842F: drivers/net/
3841F: include/linux/if_* 3843F: include/linux/if_*
diff --git a/Makefile b/Makefile
index 394aec712c7d..1b24895212d8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 33 3SUBLEVEL = 33
4EXTRAVERSION = -rc6 4EXTRAVERSION =
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 9d055b4f0585..06a13729c8df 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -3,11 +3,9 @@
3# 3#
4 4
5config OPROFILE 5config OPROFILE
6 tristate "OProfile system profiling (EXPERIMENTAL)" 6 tristate "OProfile system profiling"
7 depends on PROFILING 7 depends on PROFILING
8 depends on HAVE_OPROFILE 8 depends on HAVE_OPROFILE
9 depends on TRACING_SUPPORT
10 select TRACING
11 select RING_BUFFER 9 select RING_BUFFER
12 select RING_BUFFER_ALLOW_SWAP 10 select RING_BUFFER_ALLOW_SWAP
13 help 11 help
@@ -17,20 +15,6 @@ config OPROFILE
17 15
18 If unsure, say N. 16 If unsure, say N.
19 17
20config OPROFILE_IBS
21 bool "OProfile AMD IBS support (EXPERIMENTAL)"
22 default n
23 depends on OPROFILE && SMP && X86
24 help
25 Instruction-Based Sampling (IBS) is a new profiling
26 technique that provides rich, precise program performance
27 information. IBS is introduced by AMD Family10h processors
28 (AMD Opteron Quad-Core processor "Barcelona") to overcome
29 the limitations of conventional performance counter
30 sampling.
31
32 If unsure, say N.
33
34config OPROFILE_EVENT_MULTIPLEX 18config OPROFILE_EVENT_MULTIPLEX
35 bool "OProfile multiplexing support (EXPERIMENTAL)" 19 bool "OProfile multiplexing support (EXPERIMENTAL)"
36 default n 20 default n
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4c33ca82f9b1..184a6bd54825 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -702,6 +702,7 @@ config ARCH_OMAP
702 select ARCH_HAS_CPUFREQ 702 select ARCH_HAS_CPUFREQ
703 select GENERIC_TIME 703 select GENERIC_TIME
704 select GENERIC_CLOCKEVENTS 704 select GENERIC_CLOCKEVENTS
705 select ARCH_HAS_HOLES_MEMORYMODEL
705 help 706 help
706 Support for TI's OMAP platform (OMAP1 and OMAP2). 707 Support for TI's OMAP platform (OMAP1 and OMAP2).
707 708
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 9e7582572741..356d702c0808 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -94,7 +94,7 @@ CFLAGS_ABI +=-funwind-tables
94endif 94endif
95 95
96ifeq ($(CONFIG_THUMB2_KERNEL),y) 96ifeq ($(CONFIG_THUMB2_KERNEL),y)
97AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=thumb,-Wa$(comma)-mauto-it) 97AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
98AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) 98AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
99CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN) 99CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
100AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb 100AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index c77d2fa1f6e5..8113bb5fb66e 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -42,7 +42,8 @@
42#endif 42#endif
43 43
44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ 44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) 45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
46 defined(CONFIG_CPU_ARM1026)
46# define MULTI_CACHE 1 47# define MULTI_CACHE 1
47#endif 48#endif
48 49
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c6c57b640b6b..621acad8ea43 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -102,6 +102,7 @@ struct cpu_cache_fns cpu_cache;
102#endif 102#endif
103#ifdef CONFIG_OUTER_CACHE 103#ifdef CONFIG_OUTER_CACHE
104struct outer_cache_fns outer_cache; 104struct outer_cache_fns outer_cache;
105EXPORT_SYMBOL(outer_cache);
105#endif 106#endif
106 107
107struct stack { 108struct stack {
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c
index e7263854bc7b..fe3bd5ac8b10 100644
--- a/arch/arm/mach-gemini/gpio.c
+++ b/arch/arm/mach-gemini/gpio.c
@@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
86 unsigned int reg_both, reg_level, reg_type; 86 unsigned int reg_both, reg_level, reg_type;
87 87
88 reg_type = __raw_readl(base + GPIO_INT_TYPE); 88 reg_type = __raw_readl(base + GPIO_INT_TYPE);
89 reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE); 89 reg_level = __raw_readl(base + GPIO_INT_LEVEL);
90 reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE); 90 reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
91 91
92 switch (type) { 92 switch (type) {
@@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
117 } 117 }
118 118
119 __raw_writel(reg_type, base + GPIO_INT_TYPE); 119 __raw_writel(reg_type, base + GPIO_INT_TYPE);
120 __raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE); 120 __raw_writel(reg_level, base + GPIO_INT_LEVEL);
121 __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE); 121 __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
122 122
123 gpio_ack_irq(irq); 123 gpio_ack_irq(irq);
diff --git a/arch/arm/mach-mx25/clock.c b/arch/arm/mach-mx25/clock.c
index 6e838b857712..6acc88bcdc40 100644
--- a/arch/arm/mach-mx25/clock.c
+++ b/arch/arm/mach-mx25/clock.c
@@ -119,6 +119,11 @@ static unsigned long get_rate_nfc(struct clk *clk)
119 return get_rate_per(8); 119 return get_rate_per(8);
120} 120}
121 121
122static unsigned long get_rate_gpt(struct clk *clk)
123{
124 return get_rate_per(5);
125}
126
122static unsigned long get_rate_otg(struct clk *clk) 127static unsigned long get_rate_otg(struct clk *clk)
123{ 128{
124 return 48000000; /* FIXME */ 129 return 48000000; /* FIXME */
@@ -144,7 +149,7 @@ static void clk_cgcr_disable(struct clk *clk)
144 __raw_writel(reg, clk->enable_reg); 149 __raw_writel(reg, clk->enable_reg);
145} 150}
146 151
147#define DEFINE_CLOCK(name, i, er, es, gr, sr) \ 152#define DEFINE_CLOCK(name, i, er, es, gr, sr, s) \
148 static struct clk name = { \ 153 static struct clk name = { \
149 .id = i, \ 154 .id = i, \
150 .enable_reg = CRM_BASE + er, \ 155 .enable_reg = CRM_BASE + er, \
@@ -153,27 +158,30 @@ static void clk_cgcr_disable(struct clk *clk)
153 .set_rate = sr, \ 158 .set_rate = sr, \
154 .enable = clk_cgcr_enable, \ 159 .enable = clk_cgcr_enable, \
155 .disable = clk_cgcr_disable, \ 160 .disable = clk_cgcr_disable, \
161 .secondary = s, \
156 } 162 }
157 163
158DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_ipg, NULL); 164DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_gpt, NULL, NULL);
159DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL); 165DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL);
160DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL); 166DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL, NULL);
161DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL); 167DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL, NULL);
162DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL); 168DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL, NULL);
163DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL); 169DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL);
164DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL); 170DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk);
165DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL); 171DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk);
166DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL); 172DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk);
167DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL); 173DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk);
168DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL); 174DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk);
169DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL); 175DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL, NULL);
170DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL); 176DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL);
171DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL); 177DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL);
172DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL); 178DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL, NULL);
173DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL); 179DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL, NULL);
174DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL); 180DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL, NULL);
175DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL); 181DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL);
176DEFINE_CLOCK(fec_clk, 0, CCM_CGCR0, 23, get_rate_ipg, NULL); 182DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL);
183DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL, NULL);
184DEFINE_CLOCK(fec_clk, 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk);
177 185
178#define _REGISTER_CLOCK(d, n, c) \ 186#define _REGISTER_CLOCK(d, n, c) \
179 { \ 187 { \
@@ -208,13 +216,21 @@ static struct clk_lookup lookups[] = {
208 _REGISTER_CLOCK("fec.0", NULL, fec_clk) 216 _REGISTER_CLOCK("fec.0", NULL, fec_clk)
209}; 217};
210 218
211int __init mx25_clocks_init(unsigned long fref) 219int __init mx25_clocks_init(void)
212{ 220{
213 int i; 221 int i;
214 222
215 for (i = 0; i < ARRAY_SIZE(lookups); i++) 223 for (i = 0; i < ARRAY_SIZE(lookups); i++)
216 clkdev_add(&lookups[i]); 224 clkdev_add(&lookups[i]);
217 225
226 /* Turn off all clocks except the ones we need to survive, namely:
227 * EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM,
228 * SCC
229 */
230 __raw_writel((1 << 19), CRM_BASE + CCM_CGCR0);
231 __raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1);
232 __raw_writel((1 << 5), CRM_BASE + CCM_CGCR2);
233
218 mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); 234 mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
219 235
220 return 0; 236 return 0;
diff --git a/arch/arm/mach-mx25/mx25pdk.c b/arch/arm/mach-mx25/mx25pdk.c
index 921bc99ea231..6f06089246eb 100644
--- a/arch/arm/mach-mx25/mx25pdk.c
+++ b/arch/arm/mach-mx25/mx25pdk.c
@@ -91,7 +91,7 @@ static void __init mx25pdk_init(void)
91 91
92static void __init mx25pdk_timer_init(void) 92static void __init mx25pdk_timer_init(void)
93{ 93{
94 mx25_clocks_init(26000000); 94 mx25_clocks_init();
95} 95}
96 96
97static struct sys_timer mx25pdk_timer = { 97static struct sys_timer mx25pdk_timer = {
diff --git a/arch/arm/mach-mx3/mx31ads.c b/arch/arm/mach-mx3/mx31ads.c
index 3e7bafa2ddbb..938c549767dc 100644
--- a/arch/arm/mach-mx3/mx31ads.c
+++ b/arch/arm/mach-mx3/mx31ads.c
@@ -173,6 +173,7 @@ static void expio_unmask_irq(u32 irq)
173} 173}
174 174
175static struct irq_chip expio_irq_chip = { 175static struct irq_chip expio_irq_chip = {
176 .name = "EXPIO(CPLD)",
176 .ack = expio_ack_irq, 177 .ack = expio_ack_irq,
177 .mask = expio_mask_irq, 178 .mask = expio_mask_irq,
178 .unmask = expio_unmask_irq, 179 .unmask = expio_unmask_irq,
@@ -302,6 +303,7 @@ static struct regulator_init_data ldo1_data = {
302 .min_uV = 2800000, 303 .min_uV = 2800000,
303 .max_uV = 2800000, 304 .max_uV = 2800000,
304 .valid_modes_mask = REGULATOR_MODE_NORMAL, 305 .valid_modes_mask = REGULATOR_MODE_NORMAL,
306 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
305 .apply_uV = 1, 307 .apply_uV = 1,
306 }, 308 },
307}; 309};
@@ -322,6 +324,7 @@ static struct regulator_init_data ldo2_data = {
322 .min_uV = 3300000, 324 .min_uV = 3300000,
323 .max_uV = 3300000, 325 .max_uV = 3300000,
324 .valid_modes_mask = REGULATOR_MODE_NORMAL, 326 .valid_modes_mask = REGULATOR_MODE_NORMAL,
327 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
325 .apply_uV = 1, 328 .apply_uV = 1,
326 }, 329 },
327 .num_consumer_supplies = ARRAY_SIZE(ldo2_consumers), 330 .num_consumer_supplies = ARRAY_SIZE(ldo2_consumers),
@@ -459,6 +462,7 @@ static int mx31_wm8350_init(struct wm8350 *wm8350)
459 462
460static struct wm8350_platform_data __initdata mx31_wm8350_pdata = { 463static struct wm8350_platform_data __initdata mx31_wm8350_pdata = {
461 .init = mx31_wm8350_init, 464 .init = mx31_wm8350_init,
465 .irq_base = MXC_BOARD_IRQ_START + MXC_MAX_EXP_IO_LINES,
462}; 466};
463#endif 467#endif
464 468
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 3f1334f62e7a..7027cdc1ba49 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -505,7 +505,7 @@ static void __init gpmc_mem_init(void)
505void __init gpmc_init(void) 505void __init gpmc_init(void)
506{ 506{
507 u32 l; 507 u32 l;
508 char *ck; 508 char *ck = NULL;
509 509
510 if (cpu_is_omap24xx()) { 510 if (cpu_is_omap24xx()) {
511 ck = "core_l3_ck"; 511 ck = "core_l3_ck";
@@ -521,6 +521,9 @@ void __init gpmc_init(void)
521 l = OMAP44XX_GPMC_BASE; 521 l = OMAP44XX_GPMC_BASE;
522 } 522 }
523 523
524 if (WARN_ON(!ck))
525 return;
526
524 gpmc_l3_clk = clk_get(NULL, ck); 527 gpmc_l3_clk = clk_get(NULL, ck);
525 if (IS_ERR(gpmc_l3_clk)) { 528 if (IS_ERR(gpmc_l3_clk)) {
526 printk(KERN_ERR "Could not get GPMC clock %s\n", ck); 529 printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 27054025da2b..26aeef560aa3 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -194,7 +194,7 @@ void __init omap_init_irq(void)
194 int i; 194 int i;
195 195
196 for (i = 0; i < ARRAY_SIZE(irq_banks); i++) { 196 for (i = 0; i < ARRAY_SIZE(irq_banks); i++) {
197 unsigned long base; 197 unsigned long base = 0;
198 struct omap_irq_bank *bank = irq_banks + i; 198 struct omap_irq_bank *bank = irq_banks + i;
199 199
200 if (cpu_is_omap24xx()) 200 if (cpu_is_omap24xx())
@@ -202,6 +202,8 @@ void __init omap_init_irq(void)
202 else if (cpu_is_omap34xx()) 202 else if (cpu_is_omap34xx())
203 base = OMAP34XX_IC_BASE; 203 base = OMAP34XX_IC_BASE;
204 204
205 BUG_ON(!base);
206
205 /* Static mapping, never released */ 207 /* Static mapping, never released */
206 bank->base_reg = ioremap(base, SZ_4K); 208 bank->base_reg = ioremap(base, SZ_4K);
207 if (!bank->base_reg) { 209 if (!bank->base_reg) {
diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c
index 0c3c72d934bf..8afe9dd3f150 100644
--- a/arch/arm/mach-omap2/mmc-twl4030.c
+++ b/arch/arm/mach-omap2/mmc-twl4030.c
@@ -408,6 +408,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
408{ 408{
409 struct twl4030_hsmmc_info *c; 409 struct twl4030_hsmmc_info *c;
410 int nr_hsmmc = ARRAY_SIZE(hsmmc_data); 410 int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
411 int i;
411 412
412 if (cpu_is_omap2430()) { 413 if (cpu_is_omap2430()) {
413 control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE; 414 control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
@@ -434,7 +435,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
434 mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL); 435 mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
435 if (!mmc) { 436 if (!mmc) {
436 pr_err("Cannot allocate memory for mmc device!\n"); 437 pr_err("Cannot allocate memory for mmc device!\n");
437 return; 438 goto done;
438 } 439 }
439 440
440 if (c->name) 441 if (c->name)
@@ -532,6 +533,10 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
532 continue; 533 continue;
533 c->dev = mmc->dev; 534 c->dev = mmc->dev;
534 } 535 }
536
537done:
538 for (i = 0; i < nr_hsmmc; i++)
539 kfree(hsmmc_data[i]);
535} 540}
536 541
537#endif 542#endif
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 3f59bd12cbbf..5fef73f4743d 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -486,7 +486,7 @@ int __init omap_mux_init_signal(char *muxname, int val)
486static inline void omap_mux_decode(struct seq_file *s, u16 val) 486static inline void omap_mux_decode(struct seq_file *s, u16 val)
487{ 487{
488 char *flags[OMAP_MUX_MAX_NR_FLAGS]; 488 char *flags[OMAP_MUX_MAX_NR_FLAGS];
489 char mode[14]; 489 char mode[sizeof("OMAP_MUX_MODE") + 1];
490 int i = -1; 490 int i = -1;
491 491
492 sprintf(mode, "OMAP_MUX_MODE%d", val & 0x7); 492 sprintf(mode, "OMAP_MUX_MODE%d", val & 0x7);
@@ -553,6 +553,7 @@ static int omap_mux_dbg_board_show(struct seq_file *s, void *unused)
553 if (!m0_name) 553 if (!m0_name)
554 continue; 554 continue;
555 555
556 /* REVISIT: Needs to be updated if mode0 names get longer */
556 for (i = 0; i < OMAP_MUX_DEFNAME_LEN; i++) { 557 for (i = 0; i < OMAP_MUX_DEFNAME_LEN; i++) {
557 if (m0_name[i] == '\0') { 558 if (m0_name[i] == '\0') {
558 m0_def[i] = m0_name[i]; 559 m0_def[i] = m0_name[i];
@@ -960,7 +961,12 @@ static void __init omap_mux_init_list(struct omap_mux *superset)
960 while (superset->reg_offset != OMAP_MUX_TERMINATOR) { 961 while (superset->reg_offset != OMAP_MUX_TERMINATOR) {
961 struct omap_mux *entry; 962 struct omap_mux *entry;
962 963
963#ifndef CONFIG_OMAP_MUX 964#ifdef CONFIG_OMAP_MUX
965 if (!superset->muxnames || !superset->muxnames[0]) {
966 superset++;
967 continue;
968 }
969#else
964 /* Skip pins that are not muxed as GPIO by bootloader */ 970 /* Skip pins that are not muxed as GPIO by bootloader */
965 if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) { 971 if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
966 superset++; 972 superset++;
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index 68e0a595f9a1..07aa7b3c95f7 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -649,6 +649,53 @@ static struct omap_mux __initdata omap3_muxmodes[] = {
649 _OMAP3_MUXENTRY(UART3_TX_IRTX, 166, 649 _OMAP3_MUXENTRY(UART3_TX_IRTX, 166,
650 "uart3_tx_irtx", NULL, NULL, NULL, 650 "uart3_tx_irtx", NULL, NULL, NULL,
651 "gpio_166", NULL, NULL, "safe_mode"), 651 "gpio_166", NULL, NULL, "safe_mode"),
652
653 /* Only on 3630, see omap36xx_cbp_subset for the signals */
654 _OMAP3_MUXENTRY(GPMC_A11, 0,
655 NULL, NULL, NULL, NULL,
656 NULL, NULL, NULL, NULL),
657 _OMAP3_MUXENTRY(SAD2D_MBUSFLAG, 0,
658 NULL, NULL, NULL, NULL,
659 NULL, NULL, NULL, NULL),
660 _OMAP3_MUXENTRY(SAD2D_MREAD, 0,
661 NULL, NULL, NULL, NULL,
662 NULL, NULL, NULL, NULL),
663 _OMAP3_MUXENTRY(SAD2D_MWRITE, 0,
664 NULL, NULL, NULL, NULL,
665 NULL, NULL, NULL, NULL),
666 _OMAP3_MUXENTRY(SAD2D_SBUSFLAG, 0,
667 NULL, NULL, NULL, NULL,
668 NULL, NULL, NULL, NULL),
669 _OMAP3_MUXENTRY(SAD2D_SREAD, 0,
670 NULL, NULL, NULL, NULL,
671 NULL, NULL, NULL, NULL),
672 _OMAP3_MUXENTRY(SAD2D_SWRITE, 0,
673 NULL, NULL, NULL, NULL,
674 NULL, NULL, NULL, NULL),
675 _OMAP3_MUXENTRY(GPMC_A11, 0,
676 NULL, NULL, NULL, NULL,
677 NULL, NULL, NULL, NULL),
678 _OMAP3_MUXENTRY(SAD2D_MCAD28, 0,
679 NULL, NULL, NULL, NULL,
680 NULL, NULL, NULL, NULL),
681 _OMAP3_MUXENTRY(SAD2D_MCAD29, 0,
682 NULL, NULL, NULL, NULL,
683 NULL, NULL, NULL, NULL),
684 _OMAP3_MUXENTRY(SAD2D_MCAD32, 0,
685 NULL, NULL, NULL, NULL,
686 NULL, NULL, NULL, NULL),
687 _OMAP3_MUXENTRY(SAD2D_MCAD33, 0,
688 NULL, NULL, NULL, NULL,
689 NULL, NULL, NULL, NULL),
690 _OMAP3_MUXENTRY(SAD2D_MCAD34, 0,
691 NULL, NULL, NULL, NULL,
692 NULL, NULL, NULL, NULL),
693 _OMAP3_MUXENTRY(SAD2D_MCAD35, 0,
694 NULL, NULL, NULL, NULL,
695 NULL, NULL, NULL, NULL),
696 _OMAP3_MUXENTRY(SAD2D_MCAD36, 0,
697 NULL, NULL, NULL, NULL,
698 NULL, NULL, NULL, NULL),
652 { .reg_offset = OMAP_MUX_TERMINATOR }, 699 { .reg_offset = OMAP_MUX_TERMINATOR },
653}; 700};
654 701
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 8c964bec8159..e10a02df6e1d 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -36,7 +36,13 @@
36#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52 36#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52
37#define UART_OMAP_WER 0x17 /* Wake-up enable register */ 37#define UART_OMAP_WER 0x17 /* Wake-up enable register */
38 38
39#define DEFAULT_TIMEOUT (5 * HZ) 39/*
40 * NOTE: By default the serial timeout is disabled as it causes lost characters
41 * over the serial ports. This means that the UART clocks will stay on until
42 * disabled via sysfs. This also causes that any deeper omap sleep states are
43 * blocked.
44 */
45#define DEFAULT_TIMEOUT 0
40 46
41struct omap_uart_state { 47struct omap_uart_state {
42 int num; 48 int num;
@@ -422,7 +428,8 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
422 uart->timeout = DEFAULT_TIMEOUT; 428 uart->timeout = DEFAULT_TIMEOUT;
423 setup_timer(&uart->timer, omap_uart_idle_timer, 429 setup_timer(&uart->timer, omap_uart_idle_timer,
424 (unsigned long) uart); 430 (unsigned long) uart);
425 mod_timer(&uart->timer, jiffies + uart->timeout); 431 if (uart->timeout)
432 mod_timer(&uart->timer, jiffies + uart->timeout);
426 omap_uart_smart_idle_enable(uart, 0); 433 omap_uart_smart_idle_enable(uart, 0);
427 434
428 if (cpu_is_omap34xx()) { 435 if (cpu_is_omap34xx()) {
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index a21a4b395f73..d94857eb0690 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -334,8 +334,8 @@ static void realview_pbx_reset(char mode)
334 * in the system FPGA 334 * in the system FPGA
335 */ 335 */
336 __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl); 336 __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
337 __raw_writel(0x0000, reset_ctrl); 337 __raw_writel(0x00F0, reset_ctrl);
338 __raw_writel(0x0004, reset_ctrl); 338 __raw_writel(0x00F4, reset_ctrl);
339} 339}
340 340
341static void __init realview_pbx_init(void) 341static void __init realview_pbx_init(void)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b270d6228fe2..62820eda84d9 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -11,6 +11,7 @@
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14#include <linux/moduleparam.h>
14#include <linux/compiler.h> 15#include <linux/compiler.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/errno.h> 17#include <linux/errno.h>
@@ -77,6 +78,8 @@ static unsigned long ai_dword;
77static unsigned long ai_multi; 78static unsigned long ai_multi;
78static int ai_usermode; 79static int ai_usermode;
79 80
81core_param(alignment, ai_usermode, int, 0600);
82
80#define UM_WARN (1 << 0) 83#define UM_WARN (1 << 0)
81#define UM_FIXUP (1 << 1) 84#define UM_FIXUP (1 << 1)
82#define UM_SIGNAL (1 << 2) 85#define UM_SIGNAL (1 << 2)
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 3f9cd3d8f6d5..795dc615f43b 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -41,7 +41,7 @@ ENTRY(cpu_arm7_dcache_clean_area)
41ENTRY(cpu_arm7_data_abort) 41ENTRY(cpu_arm7_data_abort)
42 mrc p15, 0, r1, c5, c0, 0 @ get FSR 42 mrc p15, 0, r1, c5, c0, 0 @ get FSR
43 mrc p15, 0, r0, c6, c0, 0 @ get FAR 43 mrc p15, 0, r0, c6, c0, 0 @ get FAR
44 ldr r8, [r0] @ read arm instruction 44 ldr r8, [r2] @ read arm instruction
45 tst r8, #1 << 20 @ L = 0 -> write? 45 tst r8, #1 << 20 @ L = 0 -> write?
46 orreq r1, r1, #1 << 11 @ yes. 46 orreq r1, r1, #1 << 11 @ yes.
47 and r7, r8, #15 << 24 47 and r7, r8, #15 << 24
diff --git a/arch/arm/plat-mxc/audmux-v2.c b/arch/arm/plat-mxc/audmux-v2.c
index 6f21096086fd..b06954a84436 100644
--- a/arch/arm/plat-mxc/audmux-v2.c
+++ b/arch/arm/plat-mxc/audmux-v2.c
@@ -23,6 +23,7 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/debugfs.h>
26#include <mach/audmux.h> 27#include <mach/audmux.h>
27#include <mach/hardware.h> 28#include <mach/hardware.h>
28 29
@@ -32,6 +33,140 @@ static void __iomem *audmux_base;
32#define MXC_AUDMUX_V2_PTCR(x) ((x) * 8) 33#define MXC_AUDMUX_V2_PTCR(x) ((x) * 8)
33#define MXC_AUDMUX_V2_PDCR(x) ((x) * 8 + 4) 34#define MXC_AUDMUX_V2_PDCR(x) ((x) * 8 + 4)
34 35
36#ifdef CONFIG_DEBUG_FS
37static struct dentry *audmux_debugfs_root;
38
39static int audmux_open_file(struct inode *inode, struct file *file)
40{
41 file->private_data = inode->i_private;
42 return 0;
43}
44
45/* There is an annoying discontinuity in the SSI numbering with regard
46 * to the Linux number of the devices */
47static const char *audmux_port_string(int port)
48{
49 switch (port) {
50 case MX31_AUDMUX_PORT1_SSI0:
51 return "imx-ssi.0";
52 case MX31_AUDMUX_PORT2_SSI1:
53 return "imx-ssi.1";
54 case MX31_AUDMUX_PORT3_SSI_PINS_3:
55 return "SSI3";
56 case MX31_AUDMUX_PORT4_SSI_PINS_4:
57 return "SSI4";
58 case MX31_AUDMUX_PORT5_SSI_PINS_5:
59 return "SSI5";
60 case MX31_AUDMUX_PORT6_SSI_PINS_6:
61 return "SSI6";
62 default:
63 return "UNKNOWN";
64 }
65}
66
67static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
68 size_t count, loff_t *ppos)
69{
70 ssize_t ret;
71 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
72 int port = (int)file->private_data;
73 u32 pdcr, ptcr;
74
75 if (!buf)
76 return -ENOMEM;
77
78 if (audmux_clk)
79 clk_enable(audmux_clk);
80
81 ptcr = readl(audmux_base + MXC_AUDMUX_V2_PTCR(port));
82 pdcr = readl(audmux_base + MXC_AUDMUX_V2_PDCR(port));
83
84 if (audmux_clk)
85 clk_disable(audmux_clk);
86
87 ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
88 pdcr, ptcr);
89
90 if (ptcr & MXC_AUDMUX_V2_PTCR_TFSDIR)
91 ret += snprintf(buf + ret, PAGE_SIZE - ret,
92 "TxFS output from %s, ",
93 audmux_port_string((ptcr >> 27) & 0x7));
94 else
95 ret += snprintf(buf + ret, PAGE_SIZE - ret,
96 "TxFS input, ");
97
98 if (ptcr & MXC_AUDMUX_V2_PTCR_TCLKDIR)
99 ret += snprintf(buf + ret, PAGE_SIZE - ret,
100 "TxClk output from %s",
101 audmux_port_string((ptcr >> 22) & 0x7));
102 else
103 ret += snprintf(buf + ret, PAGE_SIZE - ret,
104 "TxClk input");
105
106 ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
107
108 if (ptcr & MXC_AUDMUX_V2_PTCR_SYN) {
109 ret += snprintf(buf + ret, PAGE_SIZE - ret,
110 "Port is symmetric");
111 } else {
112 if (ptcr & MXC_AUDMUX_V2_PTCR_RFSDIR)
113 ret += snprintf(buf + ret, PAGE_SIZE - ret,
114 "RxFS output from %s, ",
115 audmux_port_string((ptcr >> 17) & 0x7));
116 else
117 ret += snprintf(buf + ret, PAGE_SIZE - ret,
118 "RxFS input, ");
119
120 if (ptcr & MXC_AUDMUX_V2_PTCR_RCLKDIR)
121 ret += snprintf(buf + ret, PAGE_SIZE - ret,
122 "RxClk output from %s",
123 audmux_port_string((ptcr >> 12) & 0x7));
124 else
125 ret += snprintf(buf + ret, PAGE_SIZE - ret,
126 "RxClk input");
127 }
128
129 ret += snprintf(buf + ret, PAGE_SIZE - ret,
130 "\nData received from %s\n",
131 audmux_port_string((pdcr >> 13) & 0x7));
132
133 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
134
135 kfree(buf);
136
137 return ret;
138}
139
140static const struct file_operations audmux_debugfs_fops = {
141 .open = audmux_open_file,
142 .read = audmux_read_file,
143};
144
145static void audmux_debugfs_init(void)
146{
147 int i;
148 char buf[20];
149
150 audmux_debugfs_root = debugfs_create_dir("audmux", NULL);
151 if (!audmux_debugfs_root) {
152 pr_warning("Failed to create AUDMUX debugfs root\n");
153 return;
154 }
155
156 for (i = 1; i < 8; i++) {
157 snprintf(buf, sizeof(buf), "ssi%d", i);
158 if (!debugfs_create_file(buf, 0444, audmux_debugfs_root,
159 (void *)i, &audmux_debugfs_fops))
160 pr_warning("Failed to create AUDMUX port %d debugfs file\n",
161 i);
162 }
163}
164#else
165static inline void audmux_debugfs_init(void)
166{
167}
168#endif
169
35int mxc_audmux_v2_configure_port(unsigned int port, unsigned int ptcr, 170int mxc_audmux_v2_configure_port(unsigned int port, unsigned int ptcr,
36 unsigned int pdcr) 171 unsigned int pdcr)
37{ 172{
@@ -68,6 +203,8 @@ static int mxc_audmux_v2_init(void)
68 if (cpu_is_mx31() || cpu_is_mx35()) 203 if (cpu_is_mx31() || cpu_is_mx35())
69 audmux_base = IO_ADDRESS(AUDMUX_BASE_ADDR); 204 audmux_base = IO_ADDRESS(AUDMUX_BASE_ADDR);
70 205
206 audmux_debugfs_init();
207
71 return 0; 208 return 0;
72} 209}
73 210
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31lite.h b/arch/arm/plat-mxc/include/mach/board-mx31lite.h
index 0184b638c268..2b2da0367578 100644
--- a/arch/arm/plat-mxc/include/mach/board-mx31lite.h
+++ b/arch/arm/plat-mxc/include/mach/board-mx31lite.h
@@ -25,7 +25,7 @@
25 25
26#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
27 27
28enum mx31lilly_boards { 28enum mx31lite_boards {
29 MX31LITE_NOBOARD = 0, 29 MX31LITE_NOBOARD = 0,
30 MX31LITE_DB = 1, 30 MX31LITE_DB = 1,
31}; 31};
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 286cb9b0a25b..4bf1068ffad9 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -32,7 +32,7 @@ extern void mxc91231_init_irq(void);
32extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int); 32extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
33extern int mx1_clocks_init(unsigned long fref); 33extern int mx1_clocks_init(unsigned long fref);
34extern int mx21_clocks_init(unsigned long lref, unsigned long fref); 34extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
35extern int mx25_clocks_init(unsigned long fref); 35extern int mx25_clocks_init(void);
36extern int mx27_clocks_init(unsigned long fref); 36extern int mx27_clocks_init(unsigned long fref);
37extern int mx31_clocks_init(unsigned long fref); 37extern int mx31_clocks_init(unsigned long fref);
38extern int mx35_clocks_init(void); 38extern int mx35_clocks_init(void);
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx35.h b/arch/arm/plat-mxc/include/mach/iomux-mx35.h
index 00b0ac1db225..c88d40795f7a 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx35.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx35.h
@@ -671,7 +671,7 @@
671#define MX35_PAD_LD8__SDMA_SDMA_DEBUG_PC_8 IOMUX_PAD(0x634, 0x1d0, 6, 0x0, 0, NO_PAD_CTRL) 671#define MX35_PAD_LD8__SDMA_SDMA_DEBUG_PC_8 IOMUX_PAD(0x634, 0x1d0, 6, 0x0, 0, NO_PAD_CTRL)
672 672
673#define MX35_PAD_LD9__IPU_DISPB_DAT_9 IOMUX_PAD(0x638, 0x1d4, 0, 0x0, 0, NO_PAD_CTRL) 673#define MX35_PAD_LD9__IPU_DISPB_DAT_9 IOMUX_PAD(0x638, 0x1d4, 0, 0x0, 0, NO_PAD_CTRL)
674#define MX35_PAD_LD9__GPIO2_9 IOMUX_PAD(0x638, 0x1d4, 5, 0x8e4 0, NO_PAD_CTRL) 674#define MX35_PAD_LD9__GPIO2_9 IOMUX_PAD(0x638, 0x1d4, 5, 0x8e4, 0, NO_PAD_CTRL)
675#define MX35_PAD_LD9__SDMA_SDMA_DEBUG_PC_9 IOMUX_PAD(0x638, 0x1d4, 6, 0x0, 0, NO_PAD_CTRL) 675#define MX35_PAD_LD9__SDMA_SDMA_DEBUG_PC_9 IOMUX_PAD(0x638, 0x1d4, 6, 0x0, 0, NO_PAD_CTRL)
676 676
677#define MX35_PAD_LD10__IPU_DISPB_DAT_10 IOMUX_PAD(0x63c, 0x1d8, 0, 0x0, 0, NO_PAD_CTRL) 677#define MX35_PAD_LD10__IPU_DISPB_DAT_10 IOMUX_PAD(0x63c, 0x1d8, 0, 0x0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index ead9d592168d..0cb347645db4 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -37,7 +37,12 @@
37 * within sensible limits. 37 * within sensible limits.
38 */ 38 */
39#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS) 39#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS)
40
41#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
42#define MXC_BOARD_IRQS 80
43#else
40#define MXC_BOARD_IRQS 16 44#define MXC_BOARD_IRQS 16
45#endif
41 46
42#define MXC_IPU_IRQ_START (MXC_BOARD_IRQ_START + MXC_BOARD_IRQS) 47#define MXC_IPU_IRQ_START (MXC_BOARD_IRQ_START + MXC_BOARD_IRQS)
43 48
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index d9f8c844c385..4becbdd1935c 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -391,7 +391,7 @@ static struct dentry *clk_debugfs_root;
391static int clk_debugfs_register_one(struct clk *c) 391static int clk_debugfs_register_one(struct clk *c)
392{ 392{
393 int err; 393 int err;
394 struct dentry *d, *child; 394 struct dentry *d, *child, *child_tmp;
395 struct clk *pa = c->parent; 395 struct clk *pa = c->parent;
396 char s[255]; 396 char s[255];
397 char *p = s; 397 char *p = s;
@@ -423,7 +423,7 @@ static int clk_debugfs_register_one(struct clk *c)
423 423
424err_out: 424err_out:
425 d = c->dent; 425 d = c->dent;
426 list_for_each_entry(child, &d->d_subdirs, d_u.d_child) 426 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
427 debugfs_remove(child); 427 debugfs_remove(child);
428 debugfs_remove(c->dent); 428 debugfs_remove(c->dent);
429 return err; 429 return err;
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index d17620c50c28..d2422c766cca 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -750,6 +750,7 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
750} 750}
751#endif 751#endif
752 752
753#ifdef CONFIG_ARCH_OMAP1
753/* 754/*
754 * This only applies to chips that can't do both rising and falling edge 755 * This only applies to chips that can't do both rising and falling edge
755 * detection at once. For all other chips, this function is a noop. 756 * detection at once. For all other chips, this function is a noop.
@@ -760,11 +761,9 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
760 u32 l = 0; 761 u32 l = 0;
761 762
762 switch (bank->method) { 763 switch (bank->method) {
763#ifdef CONFIG_ARCH_OMAP1
764 case METHOD_MPUIO: 764 case METHOD_MPUIO:
765 reg += OMAP_MPUIO_GPIO_INT_EDGE; 765 reg += OMAP_MPUIO_GPIO_INT_EDGE;
766 break; 766 break;
767#endif
768#ifdef CONFIG_ARCH_OMAP15XX 767#ifdef CONFIG_ARCH_OMAP15XX
769 case METHOD_GPIO_1510: 768 case METHOD_GPIO_1510:
770 reg += OMAP1510_GPIO_INT_CONTROL; 769 reg += OMAP1510_GPIO_INT_CONTROL;
@@ -787,6 +786,7 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
787 786
788 __raw_writel(l, reg); 787 __raw_writel(l, reg);
789} 788}
789#endif
790 790
791static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger) 791static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
792{ 792{
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 1e5648d3e3d8..2ed72013c2e2 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -89,16 +89,6 @@
89#define USE_WAKEUP_LAT 0 89#define USE_WAKEUP_LAT 0
90#define IGNORE_WAKEUP_LAT 1 90#define IGNORE_WAKEUP_LAT 1
91 91
92/* XXX this should be moved into a separate file */
93#if defined(CONFIG_ARCH_OMAP2420)
94# define OMAP_32KSYNCT_BASE 0x48004000
95#elif defined(CONFIG_ARCH_OMAP2430)
96# define OMAP_32KSYNCT_BASE 0x49020000
97#elif defined(CONFIG_ARCH_OMAP3430)
98# define OMAP_32KSYNCT_BASE 0x48320000
99#else
100# error Unknown OMAP device
101#endif
102 92
103/* Private functions */ 93/* Private functions */
104 94
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 5a79fc6ee818..31c2f4c30a95 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Thu Jan 28 22:15:54 2010 15# Last update: Sat Feb 20 14:16:15 2010
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -2257,7 +2257,7 @@ oratisalog MACH_ORATISALOG ORATISALOG 2268
2257oratismadi MACH_ORATISMADI ORATISMADI 2269 2257oratismadi MACH_ORATISMADI ORATISMADI 2269
2258oratisot16 MACH_ORATISOT16 ORATISOT16 2270 2258oratisot16 MACH_ORATISOT16 ORATISOT16 2270
2259oratisdesk MACH_ORATISDESK ORATISDESK 2271 2259oratisdesk MACH_ORATISDESK ORATISDESK 2271
2260v2_ca9 MACH_V2P_CA9 V2P_CA9 2272 2260vexpress MACH_VEXPRESS VEXPRESS 2272
2261sintexo MACH_SINTEXO SINTEXO 2273 2261sintexo MACH_SINTEXO SINTEXO 2273
2262cm3389 MACH_CM3389 CM3389 2274 2262cm3389 MACH_CM3389 CM3389 2274
2263omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275 2263omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
@@ -2636,3 +2636,45 @@ hw90240 MACH_HW90240 HW90240 2648
2636dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649 2636dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649
2637mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650 2637mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650
2638scat110 MACH_SCAT110 SCAT110 2651 2638scat110 MACH_SCAT110 SCAT110 2651
2639acer_a1 MACH_ACER_A1 ACER_A1 2652
2640cmcontrol MACH_CMCONTROL CMCONTROL 2653
2641pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654
2642rfp43 MACH_RFP43 RFP43 2655
2643sk86r0301 MACH_SK86R0301 SK86R0301 2656
2644ctpxa MACH_CTPXA CTPXA 2657
2645epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658
2646guruplug MACH_GURUPLUG GURUPLUG 2659
2647spear310 MACH_SPEAR310 SPEAR310 2660
2648spear320 MACH_SPEAR320 SPEAR320 2661
2649robotx MACH_ROBOTX ROBOTX 2662
2650lsxhl MACH_LSXHL LSXHL 2663
2651smartlite MACH_SMARTLITE SMARTLITE 2664
2652cws2 MACH_CWS2 CWS2 2665
2653m619 MACH_M619 M619 2666
2654smartview MACH_SMARTVIEW SMARTVIEW 2667
2655lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668
2656kizbox MACH_KIZBOX KIZBOX 2669
2657htccharmer MACH_HTCCHARMER HTCCHARMER 2670
2658guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671
2659pm9g45 MACH_PM9G45 PM9G45 2672
2660htcpanther MACH_HTCPANTHER HTCPANTHER 2673
2661htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674
2662reb01 MACH_REB01 REB01 2675
2663aquila MACH_AQUILA AQUILA 2676
2664spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677
2665sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
2666surf7x30 MACH_SURF7X30 SURF7X30 2679
2667micro2440 MACH_MICRO2440 MICRO2440 2680
2668am2440 MACH_AM2440 AM2440 2681
2669tq2440 MACH_TQ2440 TQ2440 2682
2670lpc2478oem MACH_LPC2478OEM LPC2478OEM 2683
2671ak880x MACH_AK880X AK880X 2684
2672cobra3530 MACH_COBRA3530 COBRA3530 2685
2673pmppb MACH_PMPPB PMPPB 2686
2674u6715 MACH_U6715 U6715 2687
2675axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688
2676g30_dvb MACH_G30_DVB G30_DVB 2689
2677vc088x MACH_VC088X VC088X 2690
2678mioa702 MACH_MIOA702 MIOA702 2691
2679hpmin MACH_HPMIN HPMIN 2692
2680ak880xak MACH_AK880XAK AK880XAK 2693
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f60a5400a25b..a63c4be99b36 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -197,10 +197,13 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
197 } 197 }
198 198
199 /* 199 /*
200 * Update the FPSCR with the additional exception flags. 200 * If any of the status flags are set, update the FPSCR.
201 * Comparison instructions always return at least one of 201 * Comparison instructions always return at least one of
202 * these flags set. 202 * these flags set.
203 */ 203 */
204 if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
205 fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
206
204 fpscr |= exceptions; 207 fpscr |= exceptions;
205 208
206 fmxr(FPSCR, fpscr); 209 fmxr(FPSCR, fpscr);
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 1aa1ea5e9212..b13d1879e51b 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1325,7 +1325,7 @@ struct platform_device *__init
1325at32_add_device_mci(unsigned int id, struct mci_platform_data *data) 1325at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1326{ 1326{
1327 struct platform_device *pdev; 1327 struct platform_device *pdev;
1328 struct mci_dma_slave *slave; 1328 struct mci_dma_data *slave;
1329 u32 pioa_mask; 1329 u32 pioa_mask;
1330 u32 piob_mask; 1330 u32 piob_mask;
1331 1331
@@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1344 ARRAY_SIZE(atmel_mci0_resource))) 1344 ARRAY_SIZE(atmel_mci0_resource)))
1345 goto fail; 1345 goto fail;
1346 1346
1347 slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL); 1347 slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
1348 if (!slave)
1349 goto fail;
1348 1350
1349 slave->sdata.dma_dev = &dw_dmac0_device.dev; 1351 slave->sdata.dma_dev = &dw_dmac0_device.dev;
1350 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; 1352 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
@@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1357 1359
1358 if (platform_device_add_data(pdev, data, 1360 if (platform_device_add_data(pdev, data,
1359 sizeof(struct mci_platform_data))) 1361 sizeof(struct mci_platform_data)))
1360 goto fail; 1362 goto fail_free;
1361 1363
1362 /* CLK line is common to both slots */ 1364 /* CLK line is common to both slots */
1363 pioa_mask = 1 << 10; 1365 pioa_mask = 1 << 10;
@@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1381 /* Slot is unused */ 1383 /* Slot is unused */
1382 break; 1384 break;
1383 default: 1385 default:
1384 goto fail; 1386 goto fail_free;
1385 } 1387 }
1386 1388
1387 select_peripheral(PIOA, pioa_mask, PERIPH_A, 0); 1389 select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
@@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1408 break; 1410 break;
1409 default: 1411 default:
1410 if (!data->slot[0].bus_width) 1412 if (!data->slot[0].bus_width)
1411 goto fail; 1413 goto fail_free;
1412 1414
1413 data->slot[1].bus_width = 0; 1415 data->slot[1].bus_width = 0;
1414 break; 1416 break;
@@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1419 platform_device_add(pdev); 1421 platform_device_add(pdev);
1420 return pdev; 1422 return pdev;
1421 1423
1424fail_free:
1425 kfree(slave);
1422fail: 1426fail:
1423 data->dma_slave = NULL; 1427 data->dma_slave = NULL;
1424 kfree(slave);
1425 platform_device_put(pdev); 1428 platform_device_put(pdev);
1426 return NULL; 1429 return NULL;
1427} 1430}
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 7ae58892ba8d..e97b255d97bc 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
94#define acpi_noirq 0 /* ACPI always enabled on IA64 */ 94#define acpi_noirq 0 /* ACPI always enabled on IA64 */
95#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ 95#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
96#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ 96#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
97#define acpi_ht 0 /* no HT-only mode on IA64 */
97#endif 98#endif
98#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 99#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
99static inline void disable_acpi(void) { } 100static inline void disable_acpi(void) { }
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index e14108b19c09..4c41656ede87 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -201,7 +201,9 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
201 relevant until we have real hardware to play with... */ 201 relevant until we have real hardware to play with... */
202#define ELF_PLATFORM NULL 202#define ELF_PLATFORM NULL
203 203
204#define SET_PERSONALITY(ex) set_personality(PER_LINUX) 204#define SET_PERSONALITY(ex) \
205 set_personality((current->personality & ~PER_MASK) | PER_LINUX)
206
205#define elf_read_implies_exec(ex, executable_stack) \ 207#define elf_read_implies_exec(ex, executable_stack) \
206 ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0) 208 ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
207 209
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 9adac441ac9b..7026b29e277a 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
870 return 1; 870 return 1;
871 871
872ss_probe: 872ss_probe:
873#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) 873#if !defined(CONFIG_PREEMPT)
874 if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { 874 if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
875 /* Boost up -- we can execute copied instructions directly */ 875 /* Boost up -- we can execute copied instructions directly */
876 ia64_psr(regs)->ri = p->ainsn.slot; 876 ia64_psr(regs)->ri = p->ainsn.slot;
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index ece1bf994499..e456f062f241 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
72EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 72EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
73 73
74DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); 74DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
76 76
77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); 77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index bb7c374713ad..6fced1fe3bf0 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31 3# Linux kernel version: 2.6.33-rc6
4# Thu Sep 24 10:28:50 2009 4# Wed Feb 3 10:02:59 2010
5# 5#
6CONFIG_MICROBLAZE=y 6CONFIG_MICROBLAZE=y
7# CONFIG_SWAP is not set 7# CONFIG_SWAP is not set
@@ -19,8 +19,12 @@ CONFIG_GENERIC_CLOCKEVENTS=y
19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
20CONFIG_GENERIC_GPIO=y 20CONFIG_GENERIC_GPIO=y
21CONFIG_GENERIC_CSUM=y 21CONFIG_GENERIC_CSUM=y
22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_LOCKDEP_SUPPORT=y
24CONFIG_HAVE_LATENCYTOP_SUPPORT=y
22# CONFIG_PCI is not set 25# CONFIG_PCI is not set
23CONFIG_NO_DMA=y 26CONFIG_NO_DMA=y
27CONFIG_DTC=y
24CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 28CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
25CONFIG_CONSTRUCTORS=y 29CONFIG_CONSTRUCTORS=y
26 30
@@ -44,6 +48,7 @@ CONFIG_SYSVIPC_SYSCTL=y
44# 48#
45CONFIG_TREE_RCU=y 49CONFIG_TREE_RCU=y
46# CONFIG_TREE_PREEMPT_RCU is not set 50# CONFIG_TREE_PREEMPT_RCU is not set
51# CONFIG_TINY_RCU is not set
47# CONFIG_RCU_TRACE is not set 52# CONFIG_RCU_TRACE is not set
48CONFIG_RCU_FANOUT=32 53CONFIG_RCU_FANOUT=32
49# CONFIG_RCU_FANOUT_EXACT is not set 54# CONFIG_RCU_FANOUT_EXACT is not set
@@ -64,10 +69,12 @@ CONFIG_INITRAMFS_ROOT_GID=0
64CONFIG_RD_GZIP=y 69CONFIG_RD_GZIP=y
65# CONFIG_RD_BZIP2 is not set 70# CONFIG_RD_BZIP2 is not set
66# CONFIG_RD_LZMA is not set 71# CONFIG_RD_LZMA is not set
72# CONFIG_RD_LZO is not set
67# CONFIG_INITRAMFS_COMPRESSION_NONE is not set 73# CONFIG_INITRAMFS_COMPRESSION_NONE is not set
68CONFIG_INITRAMFS_COMPRESSION_GZIP=y 74CONFIG_INITRAMFS_COMPRESSION_GZIP=y
69# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set 75# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
70# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set 76# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
77# CONFIG_INITRAMFS_COMPRESSION_LZO is not set
71# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 78# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
72CONFIG_SYSCTL=y 79CONFIG_SYSCTL=y
73CONFIG_ANON_INODES=y 80CONFIG_ANON_INODES=y
@@ -90,21 +97,20 @@ CONFIG_EVENTFD=y
90CONFIG_AIO=y 97CONFIG_AIO=y
91 98
92# 99#
93# Performance Counters 100# Kernel Performance Events And Counters
94# 101#
95CONFIG_VM_EVENT_COUNTERS=y 102CONFIG_VM_EVENT_COUNTERS=y
96# CONFIG_STRIP_ASM_SYMS is not set
97CONFIG_COMPAT_BRK=y 103CONFIG_COMPAT_BRK=y
98CONFIG_SLAB=y 104CONFIG_SLAB=y
99# CONFIG_SLUB is not set 105# CONFIG_SLUB is not set
100# CONFIG_SLOB is not set 106# CONFIG_SLOB is not set
101# CONFIG_PROFILING is not set 107# CONFIG_PROFILING is not set
102# CONFIG_MARKERS is not set 108CONFIG_HAVE_OPROFILE=y
103 109
104# 110#
105# GCOV-based kernel profiling 111# GCOV-based kernel profiling
106# 112#
107# CONFIG_SLOW_WORK is not set 113CONFIG_SLOW_WORK=y
108# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 114# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
109CONFIG_SLABINFO=y 115CONFIG_SLABINFO=y
110CONFIG_BASE_SMALL=1 116CONFIG_BASE_SMALL=1
@@ -123,14 +129,41 @@ CONFIG_LBDAF=y
123# IO Schedulers 129# IO Schedulers
124# 130#
125CONFIG_IOSCHED_NOOP=y 131CONFIG_IOSCHED_NOOP=y
126CONFIG_IOSCHED_AS=y
127CONFIG_IOSCHED_DEADLINE=y 132CONFIG_IOSCHED_DEADLINE=y
128CONFIG_IOSCHED_CFQ=y 133CONFIG_IOSCHED_CFQ=y
129# CONFIG_DEFAULT_AS is not set
130# CONFIG_DEFAULT_DEADLINE is not set 134# CONFIG_DEFAULT_DEADLINE is not set
131CONFIG_DEFAULT_CFQ=y 135CONFIG_DEFAULT_CFQ=y
132# CONFIG_DEFAULT_NOOP is not set 136# CONFIG_DEFAULT_NOOP is not set
133CONFIG_DEFAULT_IOSCHED="cfq" 137CONFIG_DEFAULT_IOSCHED="cfq"
138# CONFIG_INLINE_SPIN_TRYLOCK is not set
139# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
140# CONFIG_INLINE_SPIN_LOCK is not set
141# CONFIG_INLINE_SPIN_LOCK_BH is not set
142# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
143# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
144# CONFIG_INLINE_SPIN_UNLOCK is not set
145# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
146# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
147# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
148# CONFIG_INLINE_READ_TRYLOCK is not set
149# CONFIG_INLINE_READ_LOCK is not set
150# CONFIG_INLINE_READ_LOCK_BH is not set
151# CONFIG_INLINE_READ_LOCK_IRQ is not set
152# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
153# CONFIG_INLINE_READ_UNLOCK is not set
154# CONFIG_INLINE_READ_UNLOCK_BH is not set
155# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
156# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
157# CONFIG_INLINE_WRITE_TRYLOCK is not set
158# CONFIG_INLINE_WRITE_LOCK is not set
159# CONFIG_INLINE_WRITE_LOCK_BH is not set
160# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
161# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
162# CONFIG_INLINE_WRITE_UNLOCK is not set
163# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
164# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
165# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
166# CONFIG_MUTEX_SPIN_ON_OWNER is not set
134# CONFIG_FREEZER is not set 167# CONFIG_FREEZER is not set
135 168
136# 169#
@@ -139,11 +172,6 @@ CONFIG_DEFAULT_IOSCHED="cfq"
139CONFIG_PLATFORM_GENERIC=y 172CONFIG_PLATFORM_GENERIC=y
140CONFIG_OPT_LIB_FUNCTION=y 173CONFIG_OPT_LIB_FUNCTION=y
141CONFIG_OPT_LIB_ASM=y 174CONFIG_OPT_LIB_ASM=y
142CONFIG_ALLOW_EDIT_AUTO=y
143
144#
145# Automatic platform settings from Kconfig.auto
146#
147 175
148# 176#
149# Definitions for MICROBLAZE0 177# Definitions for MICROBLAZE0
@@ -203,12 +231,11 @@ CONFIG_FLATMEM_MANUAL=y
203CONFIG_FLATMEM=y 231CONFIG_FLATMEM=y
204CONFIG_FLAT_NODE_MEM_MAP=y 232CONFIG_FLAT_NODE_MEM_MAP=y
205CONFIG_PAGEFLAGS_EXTENDED=y 233CONFIG_PAGEFLAGS_EXTENDED=y
206CONFIG_SPLIT_PTLOCK_CPUS=4 234CONFIG_SPLIT_PTLOCK_CPUS=999999
207# CONFIG_PHYS_ADDR_T_64BIT is not set 235# CONFIG_PHYS_ADDR_T_64BIT is not set
208CONFIG_ZONE_DMA_FLAG=0 236CONFIG_ZONE_DMA_FLAG=0
209CONFIG_VIRT_TO_BUS=y 237CONFIG_VIRT_TO_BUS=y
210CONFIG_HAVE_MLOCK=y 238# CONFIG_KSM is not set
211CONFIG_HAVE_MLOCKED_PAGE_BIT=y
212CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 239CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
213 240
214# 241#
@@ -289,7 +316,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
289# CONFIG_IRDA is not set 316# CONFIG_IRDA is not set
290# CONFIG_BT is not set 317# CONFIG_BT is not set
291# CONFIG_AF_RXRPC is not set 318# CONFIG_AF_RXRPC is not set
292# CONFIG_WIRELESS is not set 319CONFIG_WIRELESS=y
320# CONFIG_CFG80211 is not set
321# CONFIG_LIB80211 is not set
322
323#
324# CFG80211 needs to be enabled for MAC80211
325#
293# CONFIG_WIMAX is not set 326# CONFIG_WIMAX is not set
294# CONFIG_RFKILL is not set 327# CONFIG_RFKILL is not set
295# CONFIG_NET_9P is not set 328# CONFIG_NET_9P is not set
@@ -313,6 +346,10 @@ CONFIG_OF_DEVICE=y
313CONFIG_BLK_DEV=y 346CONFIG_BLK_DEV=y
314# CONFIG_BLK_DEV_COW_COMMON is not set 347# CONFIG_BLK_DEV_COW_COMMON is not set
315# CONFIG_BLK_DEV_LOOP is not set 348# CONFIG_BLK_DEV_LOOP is not set
349
350#
351# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
352#
316# CONFIG_BLK_DEV_NBD is not set 353# CONFIG_BLK_DEV_NBD is not set
317CONFIG_BLK_DEV_RAM=y 354CONFIG_BLK_DEV_RAM=y
318CONFIG_BLK_DEV_RAM_COUNT=16 355CONFIG_BLK_DEV_RAM_COUNT=16
@@ -349,7 +386,6 @@ CONFIG_NETDEVICES=y
349# CONFIG_PHYLIB is not set 386# CONFIG_PHYLIB is not set
350CONFIG_NET_ETHERNET=y 387CONFIG_NET_ETHERNET=y
351# CONFIG_MII is not set 388# CONFIG_MII is not set
352# CONFIG_ETHOC is not set
353# CONFIG_DNET is not set 389# CONFIG_DNET is not set
354# CONFIG_IBM_NEW_EMAC_ZMII is not set 390# CONFIG_IBM_NEW_EMAC_ZMII is not set
355# CONFIG_IBM_NEW_EMAC_RGMII is not set 391# CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -359,12 +395,12 @@ CONFIG_NET_ETHERNET=y
359# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 395# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
360# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 396# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
361# CONFIG_KS8842 is not set 397# CONFIG_KS8842 is not set
398# CONFIG_KS8851_MLL is not set
362CONFIG_XILINX_EMACLITE=y 399CONFIG_XILINX_EMACLITE=y
363CONFIG_NETDEV_1000=y 400CONFIG_NETDEV_1000=y
364CONFIG_NETDEV_10000=y 401CONFIG_NETDEV_10000=y
365CONFIG_WLAN=y 402CONFIG_WLAN=y
366# CONFIG_WLAN_PRE80211 is not set 403# CONFIG_HOSTAP is not set
367# CONFIG_WLAN_80211 is not set
368 404
369# 405#
370# Enable WiMAX (Networking options) to see the WiMAX drivers 406# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -408,6 +444,7 @@ CONFIG_SERIAL_UARTLITE=y
408CONFIG_SERIAL_UARTLITE_CONSOLE=y 444CONFIG_SERIAL_UARTLITE_CONSOLE=y
409CONFIG_SERIAL_CORE=y 445CONFIG_SERIAL_CORE=y
410CONFIG_SERIAL_CORE_CONSOLE=y 446CONFIG_SERIAL_CORE_CONSOLE=y
447# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
411CONFIG_UNIX98_PTYS=y 448CONFIG_UNIX98_PTYS=y
412# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 449# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
413CONFIG_LEGACY_PTYS=y 450CONFIG_LEGACY_PTYS=y
@@ -433,7 +470,6 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
433# CONFIG_POWER_SUPPLY is not set 470# CONFIG_POWER_SUPPLY is not set
434# CONFIG_HWMON is not set 471# CONFIG_HWMON is not set
435# CONFIG_THERMAL is not set 472# CONFIG_THERMAL is not set
436# CONFIG_THERMAL_HWMON is not set
437# CONFIG_WATCHDOG is not set 473# CONFIG_WATCHDOG is not set
438 474
439# 475#
@@ -526,8 +562,6 @@ CONFIG_PROC_FS=y
526CONFIG_PROC_SYSCTL=y 562CONFIG_PROC_SYSCTL=y
527CONFIG_PROC_PAGE_MONITOR=y 563CONFIG_PROC_PAGE_MONITOR=y
528CONFIG_SYSFS=y 564CONFIG_SYSFS=y
529CONFIG_TMPFS=y
530# CONFIG_TMPFS_POSIX_ACL is not set
531# CONFIG_HUGETLB_PAGE is not set 565# CONFIG_HUGETLB_PAGE is not set
532# CONFIG_CONFIGFS_FS is not set 566# CONFIG_CONFIGFS_FS is not set
533CONFIG_MISC_FILESYSTEMS=y 567CONFIG_MISC_FILESYSTEMS=y
@@ -638,11 +672,13 @@ CONFIG_NLS_DEFAULT="iso8859-1"
638# 672#
639# Kernel hacking 673# Kernel hacking
640# 674#
675CONFIG_TRACE_IRQFLAGS_SUPPORT=y
641# CONFIG_PRINTK_TIME is not set 676# CONFIG_PRINTK_TIME is not set
642CONFIG_ENABLE_WARN_DEPRECATED=y 677CONFIG_ENABLE_WARN_DEPRECATED=y
643CONFIG_ENABLE_MUST_CHECK=y 678CONFIG_ENABLE_MUST_CHECK=y
644CONFIG_FRAME_WARN=1024 679CONFIG_FRAME_WARN=1024
645# CONFIG_MAGIC_SYSRQ is not set 680# CONFIG_MAGIC_SYSRQ is not set
681# CONFIG_STRIP_ASM_SYMS is not set
646# CONFIG_UNUSED_SYMBOLS is not set 682# CONFIG_UNUSED_SYMBOLS is not set
647# CONFIG_DEBUG_FS is not set 683# CONFIG_DEBUG_FS is not set
648# CONFIG_HEADERS_CHECK is not set 684# CONFIG_HEADERS_CHECK is not set
@@ -662,6 +698,9 @@ CONFIG_DEBUG_SLAB=y
662# CONFIG_DEBUG_SLAB_LEAK is not set 698# CONFIG_DEBUG_SLAB_LEAK is not set
663CONFIG_DEBUG_SPINLOCK=y 699CONFIG_DEBUG_SPINLOCK=y
664# CONFIG_DEBUG_MUTEXES is not set 700# CONFIG_DEBUG_MUTEXES is not set
701# CONFIG_DEBUG_LOCK_ALLOC is not set
702# CONFIG_PROVE_LOCKING is not set
703# CONFIG_LOCK_STAT is not set
665# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 704# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
666# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 705# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
667# CONFIG_DEBUG_KOBJECT is not set 706# CONFIG_DEBUG_KOBJECT is not set
@@ -680,10 +719,29 @@ CONFIG_DEBUG_INFO=y
680# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 719# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
681# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set 720# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
682# CONFIG_FAULT_INJECTION is not set 721# CONFIG_FAULT_INJECTION is not set
722# CONFIG_LATENCYTOP is not set
683# CONFIG_SYSCTL_SYSCALL_CHECK is not set 723# CONFIG_SYSCTL_SYSCALL_CHECK is not set
684# CONFIG_PAGE_POISONING is not set 724# CONFIG_PAGE_POISONING is not set
725CONFIG_HAVE_FUNCTION_TRACER=y
726CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
727CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
728CONFIG_HAVE_DYNAMIC_FTRACE=y
729CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
730CONFIG_TRACING_SUPPORT=y
731CONFIG_FTRACE=y
732# CONFIG_FUNCTION_TRACER is not set
733# CONFIG_IRQSOFF_TRACER is not set
734# CONFIG_SCHED_TRACER is not set
735# CONFIG_ENABLE_DEFAULT_TRACERS is not set
736# CONFIG_BOOT_TRACER is not set
737CONFIG_BRANCH_PROFILE_NONE=y
738# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
739# CONFIG_PROFILE_ALL_BRANCHES is not set
740# CONFIG_STACK_TRACER is not set
741# CONFIG_KMEMTRACE is not set
742# CONFIG_WORKQUEUE_TRACER is not set
743# CONFIG_BLK_DEV_IO_TRACE is not set
685# CONFIG_SAMPLES is not set 744# CONFIG_SAMPLES is not set
686# CONFIG_KMEMCHECK is not set
687CONFIG_EARLY_PRINTK=y 745CONFIG_EARLY_PRINTK=y
688# CONFIG_HEART_BEAT is not set 746# CONFIG_HEART_BEAT is not set
689CONFIG_DEBUG_BOOTMEM=y 747CONFIG_DEBUG_BOOTMEM=y
@@ -694,7 +752,11 @@ CONFIG_DEBUG_BOOTMEM=y
694# CONFIG_KEYS is not set 752# CONFIG_KEYS is not set
695# CONFIG_SECURITY is not set 753# CONFIG_SECURITY is not set
696# CONFIG_SECURITYFS is not set 754# CONFIG_SECURITYFS is not set
697# CONFIG_SECURITY_FILE_CAPABILITIES is not set 755# CONFIG_DEFAULT_SECURITY_SELINUX is not set
756# CONFIG_DEFAULT_SECURITY_SMACK is not set
757# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
758CONFIG_DEFAULT_SECURITY_DAC=y
759CONFIG_DEFAULT_SECURITY=""
698CONFIG_CRYPTO=y 760CONFIG_CRYPTO=y
699 761
700# 762#
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index adb839bab704..ce2da535246a 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31 3# Linux kernel version: 2.6.33-rc6
4# Thu Sep 24 10:29:43 2009 4# Wed Feb 3 10:03:21 2010
5# 5#
6CONFIG_MICROBLAZE=y 6CONFIG_MICROBLAZE=y
7# CONFIG_SWAP is not set 7# CONFIG_SWAP is not set
@@ -19,8 +19,12 @@ CONFIG_GENERIC_CLOCKEVENTS=y
19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
20CONFIG_GENERIC_GPIO=y 20CONFIG_GENERIC_GPIO=y
21CONFIG_GENERIC_CSUM=y 21CONFIG_GENERIC_CSUM=y
22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_LOCKDEP_SUPPORT=y
24CONFIG_HAVE_LATENCYTOP_SUPPORT=y
22# CONFIG_PCI is not set 25# CONFIG_PCI is not set
23CONFIG_NO_DMA=y 26CONFIG_NO_DMA=y
27CONFIG_DTC=y
24CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 28CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
25CONFIG_CONSTRUCTORS=y 29CONFIG_CONSTRUCTORS=y
26 30
@@ -46,6 +50,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
46# 50#
47CONFIG_TREE_RCU=y 51CONFIG_TREE_RCU=y
48# CONFIG_TREE_PREEMPT_RCU is not set 52# CONFIG_TREE_PREEMPT_RCU is not set
53# CONFIG_TINY_RCU is not set
49# CONFIG_RCU_TRACE is not set 54# CONFIG_RCU_TRACE is not set
50CONFIG_RCU_FANOUT=32 55CONFIG_RCU_FANOUT=32
51# CONFIG_RCU_FANOUT_EXACT is not set 56# CONFIG_RCU_FANOUT_EXACT is not set
@@ -81,16 +86,16 @@ CONFIG_EVENTFD=y
81CONFIG_AIO=y 86CONFIG_AIO=y
82 87
83# 88#
84# Performance Counters 89# Kernel Performance Events And Counters
85# 90#
86CONFIG_VM_EVENT_COUNTERS=y 91CONFIG_VM_EVENT_COUNTERS=y
87# CONFIG_STRIP_ASM_SYMS is not set
88CONFIG_COMPAT_BRK=y 92CONFIG_COMPAT_BRK=y
89CONFIG_SLAB=y 93CONFIG_SLAB=y
90# CONFIG_SLUB is not set 94# CONFIG_SLUB is not set
91# CONFIG_SLOB is not set 95# CONFIG_SLOB is not set
96# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set
92# CONFIG_PROFILING is not set 97# CONFIG_PROFILING is not set
93# CONFIG_MARKERS is not set 98CONFIG_HAVE_OPROFILE=y
94 99
95# 100#
96# GCOV-based kernel profiling 101# GCOV-based kernel profiling
@@ -116,14 +121,41 @@ CONFIG_LBDAF=y
116# IO Schedulers 121# IO Schedulers
117# 122#
118CONFIG_IOSCHED_NOOP=y 123CONFIG_IOSCHED_NOOP=y
119CONFIG_IOSCHED_AS=y
120CONFIG_IOSCHED_DEADLINE=y 124CONFIG_IOSCHED_DEADLINE=y
121CONFIG_IOSCHED_CFQ=y 125CONFIG_IOSCHED_CFQ=y
122# CONFIG_DEFAULT_AS is not set
123# CONFIG_DEFAULT_DEADLINE is not set 126# CONFIG_DEFAULT_DEADLINE is not set
124CONFIG_DEFAULT_CFQ=y 127CONFIG_DEFAULT_CFQ=y
125# CONFIG_DEFAULT_NOOP is not set 128# CONFIG_DEFAULT_NOOP is not set
126CONFIG_DEFAULT_IOSCHED="cfq" 129CONFIG_DEFAULT_IOSCHED="cfq"
130# CONFIG_INLINE_SPIN_TRYLOCK is not set
131# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
132# CONFIG_INLINE_SPIN_LOCK is not set
133# CONFIG_INLINE_SPIN_LOCK_BH is not set
134# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
135# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
136CONFIG_INLINE_SPIN_UNLOCK=y
137# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
138CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
139# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
140# CONFIG_INLINE_READ_TRYLOCK is not set
141# CONFIG_INLINE_READ_LOCK is not set
142# CONFIG_INLINE_READ_LOCK_BH is not set
143# CONFIG_INLINE_READ_LOCK_IRQ is not set
144# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
145CONFIG_INLINE_READ_UNLOCK=y
146# CONFIG_INLINE_READ_UNLOCK_BH is not set
147CONFIG_INLINE_READ_UNLOCK_IRQ=y
148# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
149# CONFIG_INLINE_WRITE_TRYLOCK is not set
150# CONFIG_INLINE_WRITE_LOCK is not set
151# CONFIG_INLINE_WRITE_LOCK_BH is not set
152# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
153# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
154CONFIG_INLINE_WRITE_UNLOCK=y
155# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
156CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
157# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
158# CONFIG_MUTEX_SPIN_ON_OWNER is not set
127# CONFIG_FREEZER is not set 159# CONFIG_FREEZER is not set
128 160
129# 161#
@@ -132,7 +164,10 @@ CONFIG_DEFAULT_IOSCHED="cfq"
132CONFIG_PLATFORM_GENERIC=y 164CONFIG_PLATFORM_GENERIC=y
133# CONFIG_SELFMOD is not set 165# CONFIG_SELFMOD is not set
134# CONFIG_OPT_LIB_FUNCTION is not set 166# CONFIG_OPT_LIB_FUNCTION is not set
135# CONFIG_ALLOW_EDIT_AUTO is not set 167
168#
169# Definitions for MICROBLAZE0
170#
136CONFIG_KERNEL_BASE_ADDR=0x90000000 171CONFIG_KERNEL_BASE_ADDR=0x90000000
137CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5" 172CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5"
138CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 173CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
@@ -190,7 +225,6 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
190# CONFIG_PHYS_ADDR_T_64BIT is not set 225# CONFIG_PHYS_ADDR_T_64BIT is not set
191CONFIG_ZONE_DMA_FLAG=0 226CONFIG_ZONE_DMA_FLAG=0
192CONFIG_VIRT_TO_BUS=y 227CONFIG_VIRT_TO_BUS=y
193CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
194CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1 228CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
195 229
196# 230#
@@ -274,9 +308,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
274# CONFIG_AF_RXRPC is not set 308# CONFIG_AF_RXRPC is not set
275CONFIG_WIRELESS=y 309CONFIG_WIRELESS=y
276# CONFIG_CFG80211 is not set 310# CONFIG_CFG80211 is not set
277CONFIG_CFG80211_DEFAULT_PS_VALUE=0
278CONFIG_WIRELESS_OLD_REGULATORY=y
279# CONFIG_WIRELESS_EXT is not set
280# CONFIG_LIB80211 is not set 311# CONFIG_LIB80211 is not set
281 312
282# 313#
@@ -301,9 +332,9 @@ CONFIG_STANDALONE=y
301# CONFIG_CONNECTOR is not set 332# CONFIG_CONNECTOR is not set
302CONFIG_MTD=y 333CONFIG_MTD=y
303# CONFIG_MTD_DEBUG is not set 334# CONFIG_MTD_DEBUG is not set
335# CONFIG_MTD_TESTS is not set
304CONFIG_MTD_CONCAT=y 336CONFIG_MTD_CONCAT=y
305CONFIG_MTD_PARTITIONS=y 337CONFIG_MTD_PARTITIONS=y
306# CONFIG_MTD_TESTS is not set
307# CONFIG_MTD_REDBOOT_PARTS is not set 338# CONFIG_MTD_REDBOOT_PARTS is not set
308CONFIG_MTD_CMDLINE_PARTS=y 339CONFIG_MTD_CMDLINE_PARTS=y
309# CONFIG_MTD_OF_PARTS is not set 340# CONFIG_MTD_OF_PARTS is not set
@@ -387,6 +418,10 @@ CONFIG_OF_DEVICE=y
387CONFIG_BLK_DEV=y 418CONFIG_BLK_DEV=y
388# CONFIG_BLK_DEV_COW_COMMON is not set 419# CONFIG_BLK_DEV_COW_COMMON is not set
389# CONFIG_BLK_DEV_LOOP is not set 420# CONFIG_BLK_DEV_LOOP is not set
421
422#
423# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
424#
390CONFIG_BLK_DEV_NBD=y 425CONFIG_BLK_DEV_NBD=y
391CONFIG_BLK_DEV_RAM=y 426CONFIG_BLK_DEV_RAM=y
392CONFIG_BLK_DEV_RAM_COUNT=16 427CONFIG_BLK_DEV_RAM_COUNT=16
@@ -423,7 +458,6 @@ CONFIG_NETDEVICES=y
423# CONFIG_PHYLIB is not set 458# CONFIG_PHYLIB is not set
424CONFIG_NET_ETHERNET=y 459CONFIG_NET_ETHERNET=y
425# CONFIG_MII is not set 460# CONFIG_MII is not set
426# CONFIG_ETHOC is not set
427# CONFIG_DNET is not set 461# CONFIG_DNET is not set
428# CONFIG_IBM_NEW_EMAC_ZMII is not set 462# CONFIG_IBM_NEW_EMAC_ZMII is not set
429# CONFIG_IBM_NEW_EMAC_RGMII is not set 463# CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -433,12 +467,12 @@ CONFIG_NET_ETHERNET=y
433# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 467# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
434# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 468# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
435# CONFIG_KS8842 is not set 469# CONFIG_KS8842 is not set
470# CONFIG_KS8851_MLL is not set
436# CONFIG_XILINX_EMACLITE is not set 471# CONFIG_XILINX_EMACLITE is not set
437CONFIG_NETDEV_1000=y 472CONFIG_NETDEV_1000=y
438CONFIG_NETDEV_10000=y 473CONFIG_NETDEV_10000=y
439CONFIG_WLAN=y 474CONFIG_WLAN=y
440# CONFIG_WLAN_PRE80211 is not set 475# CONFIG_HOSTAP is not set
441# CONFIG_WLAN_80211 is not set
442 476
443# 477#
444# Enable WiMAX (Networking options) to see the WiMAX drivers 478# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -482,6 +516,7 @@ CONFIG_SERIAL_UARTLITE=y
482CONFIG_SERIAL_UARTLITE_CONSOLE=y 516CONFIG_SERIAL_UARTLITE_CONSOLE=y
483CONFIG_SERIAL_CORE=y 517CONFIG_SERIAL_CORE=y
484CONFIG_SERIAL_CORE_CONSOLE=y 518CONFIG_SERIAL_CORE_CONSOLE=y
519# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
485CONFIG_UNIX98_PTYS=y 520CONFIG_UNIX98_PTYS=y
486# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 521# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
487CONFIG_LEGACY_PTYS=y 522CONFIG_LEGACY_PTYS=y
@@ -508,7 +543,6 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
508# CONFIG_POWER_SUPPLY is not set 543# CONFIG_POWER_SUPPLY is not set
509# CONFIG_HWMON is not set 544# CONFIG_HWMON is not set
510# CONFIG_THERMAL is not set 545# CONFIG_THERMAL is not set
511# CONFIG_THERMAL_HWMON is not set
512# CONFIG_WATCHDOG is not set 546# CONFIG_WATCHDOG is not set
513 547
514# 548#
@@ -616,7 +650,6 @@ CONFIG_INOTIFY_USER=y
616CONFIG_PROC_FS=y 650CONFIG_PROC_FS=y
617CONFIG_PROC_SYSCTL=y 651CONFIG_PROC_SYSCTL=y
618CONFIG_SYSFS=y 652CONFIG_SYSFS=y
619# CONFIG_TMPFS is not set
620# CONFIG_HUGETLB_PAGE is not set 653# CONFIG_HUGETLB_PAGE is not set
621# CONFIG_CONFIGFS_FS is not set 654# CONFIG_CONFIGFS_FS is not set
622CONFIG_MISC_FILESYSTEMS=y 655CONFIG_MISC_FILESYSTEMS=y
@@ -672,11 +705,13 @@ CONFIG_MSDOS_PARTITION=y
672# 705#
673# Kernel hacking 706# Kernel hacking
674# 707#
708CONFIG_TRACE_IRQFLAGS_SUPPORT=y
675# CONFIG_PRINTK_TIME is not set 709# CONFIG_PRINTK_TIME is not set
676CONFIG_ENABLE_WARN_DEPRECATED=y 710CONFIG_ENABLE_WARN_DEPRECATED=y
677CONFIG_ENABLE_MUST_CHECK=y 711CONFIG_ENABLE_MUST_CHECK=y
678CONFIG_FRAME_WARN=1024 712CONFIG_FRAME_WARN=1024
679# CONFIG_MAGIC_SYSRQ is not set 713# CONFIG_MAGIC_SYSRQ is not set
714# CONFIG_STRIP_ASM_SYMS is not set
680CONFIG_UNUSED_SYMBOLS=y 715CONFIG_UNUSED_SYMBOLS=y
681CONFIG_DEBUG_FS=y 716CONFIG_DEBUG_FS=y
682# CONFIG_HEADERS_CHECK is not set 717# CONFIG_HEADERS_CHECK is not set
@@ -695,12 +730,16 @@ CONFIG_DEBUG_OBJECTS=y
695CONFIG_DEBUG_OBJECTS_SELFTEST=y 730CONFIG_DEBUG_OBJECTS_SELFTEST=y
696CONFIG_DEBUG_OBJECTS_FREE=y 731CONFIG_DEBUG_OBJECTS_FREE=y
697CONFIG_DEBUG_OBJECTS_TIMERS=y 732CONFIG_DEBUG_OBJECTS_TIMERS=y
733# CONFIG_DEBUG_OBJECTS_WORK is not set
698CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 734CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
699# CONFIG_DEBUG_SLAB is not set 735# CONFIG_DEBUG_SLAB is not set
700# CONFIG_DEBUG_RT_MUTEXES is not set 736# CONFIG_DEBUG_RT_MUTEXES is not set
701# CONFIG_RT_MUTEX_TESTER is not set 737# CONFIG_RT_MUTEX_TESTER is not set
702# CONFIG_DEBUG_SPINLOCK is not set 738# CONFIG_DEBUG_SPINLOCK is not set
703# CONFIG_DEBUG_MUTEXES is not set 739# CONFIG_DEBUG_MUTEXES is not set
740# CONFIG_DEBUG_LOCK_ALLOC is not set
741# CONFIG_PROVE_LOCKING is not set
742# CONFIG_LOCK_STAT is not set
704# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 743# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
705# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 744# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
706# CONFIG_DEBUG_KOBJECT is not set 745# CONFIG_DEBUG_KOBJECT is not set
@@ -720,8 +759,28 @@ CONFIG_DEBUG_SG=y
720# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 759# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
721# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set 760# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
722# CONFIG_FAULT_INJECTION is not set 761# CONFIG_FAULT_INJECTION is not set
762# CONFIG_LATENCYTOP is not set
723CONFIG_SYSCTL_SYSCALL_CHECK=y 763CONFIG_SYSCTL_SYSCALL_CHECK=y
724# CONFIG_PAGE_POISONING is not set 764# CONFIG_PAGE_POISONING is not set
765CONFIG_HAVE_FUNCTION_TRACER=y
766CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
767CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
768CONFIG_HAVE_DYNAMIC_FTRACE=y
769CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
770CONFIG_TRACING_SUPPORT=y
771CONFIG_FTRACE=y
772# CONFIG_FUNCTION_TRACER is not set
773# CONFIG_IRQSOFF_TRACER is not set
774# CONFIG_SCHED_TRACER is not set
775# CONFIG_ENABLE_DEFAULT_TRACERS is not set
776# CONFIG_BOOT_TRACER is not set
777CONFIG_BRANCH_PROFILE_NONE=y
778# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
779# CONFIG_PROFILE_ALL_BRANCHES is not set
780# CONFIG_STACK_TRACER is not set
781# CONFIG_KMEMTRACE is not set
782# CONFIG_WORKQUEUE_TRACER is not set
783# CONFIG_BLK_DEV_IO_TRACE is not set
725# CONFIG_DYNAMIC_DEBUG is not set 784# CONFIG_DYNAMIC_DEBUG is not set
726# CONFIG_SAMPLES is not set 785# CONFIG_SAMPLES is not set
727CONFIG_EARLY_PRINTK=y 786CONFIG_EARLY_PRINTK=y
@@ -734,7 +793,11 @@ CONFIG_EARLY_PRINTK=y
734# CONFIG_KEYS is not set 793# CONFIG_KEYS is not set
735# CONFIG_SECURITY is not set 794# CONFIG_SECURITY is not set
736# CONFIG_SECURITYFS is not set 795# CONFIG_SECURITYFS is not set
737# CONFIG_SECURITY_FILE_CAPABILITIES is not set 796# CONFIG_DEFAULT_SECURITY_SELINUX is not set
797# CONFIG_DEFAULT_SECURITY_SMACK is not set
798# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
799CONFIG_DEFAULT_SECURITY_DAC=y
800CONFIG_DEFAULT_SECURITY=""
738CONFIG_CRYPTO=y 801CONFIG_CRYPTO=y
739 802
740# 803#
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index fc9997b73c09..267c7c779e53 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -217,7 +217,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
217 * Little endian 217 * Little endian
218 */ 218 */
219 219
220#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a)); 220#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
221#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a)) 221#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
222 222
223#define in_le32(a) __le32_to_cpu(__raw_readl(a)) 223#define in_le32(a) __le32_to_cpu(__raw_readl(a))
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index d9d63831cc2f..2a56bccce4e0 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -172,16 +172,15 @@ do { \
172/* It is used only first parameter for OP - for wic, wdc */ 172/* It is used only first parameter for OP - for wic, wdc */
173#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ 173#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
174do { \ 174do { \
175 int step = -line_length; \ 175 int volatile temp; \
176 int count = end - start; \ 176 BUG_ON(end - start <= 0); \
177 BUG_ON(count <= 0); \
178 \ 177 \
179 __asm__ __volatile__ (" 1: addk %0, %0, %1; \ 178 __asm__ __volatile__ (" 1: " #op " %1, r0; \
180 " #op " %0, r0; \ 179 cmpu %0, %1, %2; \
181 bgtid %1, 1b; \ 180 bgtid %0, 1b; \
182 addk %1, %1, %2; \ 181 addk %1, %1, %3; \
183 " : : "r" (start), "r" (count), \ 182 " : : "r" (temp), "r" (start), "r" (end),\
184 "r" (step) : "memory"); \ 183 "r" (line_length) : "memory"); \
185} while (0); 184} while (0);
186 185
187static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 186static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
@@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
313 pr_debug("%s\n", __func__); 312 pr_debug("%s\n", __func__);
314 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 313 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
315 wdc.clear) 314 wdc.clear)
316
317#if 0
318 unsigned int i;
319
320 pr_debug("%s\n", __func__);
321
322 /* Just loop through cache size and invalidate it */
323 for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
324 __invalidate_dcache(0, i);
325#endif
326} 315}
327 316
328static void __invalidate_dcache_range_wb(unsigned long start, 317static void __invalidate_dcache_range_wb(unsigned long start,
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 95b0855802df..391d6197fc3b 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -122,7 +122,7 @@ ENTRY(_interrupt)
122 122
123ret_from_intr: 123ret_from_intr:
124 lwi r11, r1, PT_MODE 124 lwi r11, r1, PT_MODE
125 bneid r11, 3f 125 bneid r11, no_intr_resched
126 126
127 lwi r6, r31, TS_THREAD_INFO /* get thread info */ 127 lwi r6, r31, TS_THREAD_INFO /* get thread info */
128 lwi r19, r6, TI_FLAGS /* get flags in thread info */ 128 lwi r19, r6, TI_FLAGS /* get flags in thread info */
@@ -133,16 +133,18 @@ ret_from_intr:
133 bralid r15, schedule 133 bralid r15, schedule
134 nop 134 nop
1351: andi r11, r19, _TIF_SIGPENDING 1351: andi r11, r19, _TIF_SIGPENDING
136 beqid r11, no_intr_reshed 136 beqid r11, no_intr_resched
137 addk r5, r1, r0 137 addk r5, r1, r0
138 addk r7, r0, r0 138 addk r7, r0, r0
139 bralid r15, do_signal 139 bralid r15, do_signal
140 addk r6, r0, r0 140 addk r6, r0, r0
141 141
142no_intr_reshed: 142no_intr_resched:
143 /* Disable interrupts, we are now committed to the state restore */
144 disable_irq
145
143 /* save mode indicator */ 146 /* save mode indicator */
144 lwi r11, r1, PT_MODE 147 lwi r11, r1, PT_MODE
1453:
146 swi r11, r0, PER_CPU(KM) 148 swi r11, r0, PER_CPU(KM)
147 149
148 /* save r31 */ 150 /* save r31 */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 5372b24ad049..bb8c4b9ccb80 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p)
54 54
55 microblaze_cache_init(); 55 microblaze_cache_init();
56 56
57 invalidate_dcache();
57 enable_dcache(); 58 enable_dcache();
58 59
59 invalidate_icache(); 60 invalidate_icache();
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index c51405e57921..29d3cbf9555f 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -141,6 +141,14 @@ static __init void prom_init_mem(void)
141 break; 141 break;
142 } 142 }
143 143
144 /* Ignoring the last page when ddr size is 128M. Cached
145 * accesses to last page is causing the processor to prefetch
146 * using address above 128M stepping out of the ddr address
147 * space.
148 */
149 if (mem == 0x8000000)
150 mem -= 0x1000;
151
144 add_memory_region(0, mem, BOOT_MEM_RAM); 152 add_memory_region(0, mem, BOOT_MEM_RAM);
145} 153}
146 154
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index ed84b4cb3c8d..84b6503f10b9 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.23-rc2 3# Linux kernel version: 2.6.33-rc6
4# Tue Aug 7 13:04:24 2007 4# Wed Feb 3 18:12:31 2010
5# 5#
6CONFIG_MIPS=y 6CONFIG_MIPS=y
7 7
@@ -9,20 +9,28 @@ CONFIG_MIPS=y
9# Machine selection 9# Machine selection
10# 10#
11# CONFIG_MACH_ALCHEMY is not set 11# CONFIG_MACH_ALCHEMY is not set
12# CONFIG_AR7 is not set
13# CONFIG_BCM47XX is not set
14# CONFIG_BCM63XX is not set
12# CONFIG_MIPS_COBALT is not set 15# CONFIG_MIPS_COBALT is not set
13# CONFIG_MACH_DECSTATION is not set 16# CONFIG_MACH_DECSTATION is not set
14# CONFIG_MACH_JAZZ is not set 17# CONFIG_MACH_JAZZ is not set
15# CONFIG_LEMOTE_FULONG is not set 18# CONFIG_LASAT is not set
19# CONFIG_MACH_LOONGSON is not set
16# CONFIG_MIPS_MALTA is not set 20# CONFIG_MIPS_MALTA is not set
17# CONFIG_MIPS_SIM is not set 21# CONFIG_MIPS_SIM is not set
18# CONFIG_MARKEINS is not set 22# CONFIG_NEC_MARKEINS is not set
19# CONFIG_MACH_VR41XX is not set 23# CONFIG_MACH_VR41XX is not set
24# CONFIG_NXP_STB220 is not set
25# CONFIG_NXP_STB225 is not set
20# CONFIG_PNX8550_JBS is not set 26# CONFIG_PNX8550_JBS is not set
21# CONFIG_PNX8550_STB810 is not set 27# CONFIG_PNX8550_STB810 is not set
22# CONFIG_PMC_MSP is not set 28# CONFIG_PMC_MSP is not set
23# CONFIG_PMC_YOSEMITE is not set 29# CONFIG_PMC_YOSEMITE is not set
30# CONFIG_POWERTV is not set
24# CONFIG_SGI_IP22 is not set 31# CONFIG_SGI_IP22 is not set
25CONFIG_SGI_IP27=y 32CONFIG_SGI_IP27=y
33# CONFIG_SGI_IP28 is not set
26# CONFIG_SGI_IP32 is not set 34# CONFIG_SGI_IP32 is not set
27# CONFIG_SIBYTE_CRHINE is not set 35# CONFIG_SIBYTE_CRHINE is not set
28# CONFIG_SIBYTE_CARMEL is not set 36# CONFIG_SIBYTE_CARMEL is not set
@@ -33,32 +41,39 @@ CONFIG_SGI_IP27=y
33# CONFIG_SIBYTE_SENTOSA is not set 41# CONFIG_SIBYTE_SENTOSA is not set
34# CONFIG_SIBYTE_BIGSUR is not set 42# CONFIG_SIBYTE_BIGSUR is not set
35# CONFIG_SNI_RM is not set 43# CONFIG_SNI_RM is not set
36# CONFIG_TOSHIBA_JMR3927 is not set 44# CONFIG_MACH_TX39XX is not set
37# CONFIG_TOSHIBA_RBTX4927 is not set 45# CONFIG_MACH_TX49XX is not set
38# CONFIG_TOSHIBA_RBTX4938 is not set 46# CONFIG_MIKROTIK_RB532 is not set
39# CONFIG_WR_PPMC is not set 47# CONFIG_WR_PPMC is not set
48# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
49# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
50# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
40CONFIG_SGI_SN_M_MODE=y 51CONFIG_SGI_SN_M_MODE=y
41# CONFIG_SGI_SN_N_MODE is not set 52# CONFIG_SGI_SN_N_MODE is not set
42# CONFIG_MAPPED_KERNEL is not set 53# CONFIG_MAPPED_KERNEL is not set
43# CONFIG_REPLICATE_KTEXT is not set 54# CONFIG_REPLICATE_KTEXT is not set
44# CONFIG_REPLICATE_EXHANDLERS is not set 55# CONFIG_REPLICATE_EXHANDLERS is not set
56CONFIG_LOONGSON_UART_BASE=y
45CONFIG_RWSEM_GENERIC_SPINLOCK=y 57CONFIG_RWSEM_GENERIC_SPINLOCK=y
46# CONFIG_ARCH_HAS_ILOG2_U32 is not set 58# CONFIG_ARCH_HAS_ILOG2_U32 is not set
47# CONFIG_ARCH_HAS_ILOG2_U64 is not set 59# CONFIG_ARCH_HAS_ILOG2_U64 is not set
60CONFIG_ARCH_SUPPORTS_OPROFILE=y
48CONFIG_GENERIC_FIND_NEXT_BIT=y 61CONFIG_GENERIC_FIND_NEXT_BIT=y
49CONFIG_GENERIC_HWEIGHT=y 62CONFIG_GENERIC_HWEIGHT=y
50CONFIG_GENERIC_CALIBRATE_DELAY=y 63CONFIG_GENERIC_CALIBRATE_DELAY=y
64CONFIG_GENERIC_CLOCKEVENTS=y
51CONFIG_GENERIC_TIME=y 65CONFIG_GENERIC_TIME=y
52CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 66CONFIG_GENERIC_CMOS_UPDATE=y
67CONFIG_SCHED_OMIT_FRAME_POINTER=y
53CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 68CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
54CONFIG_ARC=y 69CONFIG_ARC=y
55CONFIG_DMA_COHERENT=y 70CONFIG_DMA_COHERENT=y
56CONFIG_EARLY_PRINTK=y
57CONFIG_SYS_HAS_EARLY_PRINTK=y 71CONFIG_SYS_HAS_EARLY_PRINTK=y
58# CONFIG_NO_IOPORT is not set 72# CONFIG_NO_IOPORT is not set
59CONFIG_CPU_BIG_ENDIAN=y 73CONFIG_CPU_BIG_ENDIAN=y
60# CONFIG_CPU_LITTLE_ENDIAN is not set 74# CONFIG_CPU_LITTLE_ENDIAN is not set
61CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y 75CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
76CONFIG_DEFAULT_SGI_PARTITION=y
62CONFIG_MIPS_L1_CACHE_SHIFT=7 77CONFIG_MIPS_L1_CACHE_SHIFT=7
63CONFIG_ARC64=y 78CONFIG_ARC64=y
64CONFIG_BOOT_ELF64=y 79CONFIG_BOOT_ELF64=y
@@ -66,7 +81,8 @@ CONFIG_BOOT_ELF64=y
66# 81#
67# CPU selection 82# CPU selection
68# 83#
69# CONFIG_CPU_LOONGSON2 is not set 84# CONFIG_CPU_LOONGSON2E is not set
85# CONFIG_CPU_LOONGSON2F is not set
70# CONFIG_CPU_MIPS32_R1 is not set 86# CONFIG_CPU_MIPS32_R1 is not set
71# CONFIG_CPU_MIPS32_R2 is not set 87# CONFIG_CPU_MIPS32_R2 is not set
72# CONFIG_CPU_MIPS64_R1 is not set 88# CONFIG_CPU_MIPS64_R1 is not set
@@ -79,6 +95,7 @@ CONFIG_BOOT_ELF64=y
79# CONFIG_CPU_TX49XX is not set 95# CONFIG_CPU_TX49XX is not set
80# CONFIG_CPU_R5000 is not set 96# CONFIG_CPU_R5000 is not set
81# CONFIG_CPU_R5432 is not set 97# CONFIG_CPU_R5432 is not set
98# CONFIG_CPU_R5500 is not set
82# CONFIG_CPU_R6000 is not set 99# CONFIG_CPU_R6000 is not set
83# CONFIG_CPU_NEVADA is not set 100# CONFIG_CPU_NEVADA is not set
84# CONFIG_CPU_R8000 is not set 101# CONFIG_CPU_R8000 is not set
@@ -86,6 +103,7 @@ CONFIG_CPU_R10000=y
86# CONFIG_CPU_RM7000 is not set 103# CONFIG_CPU_RM7000 is not set
87# CONFIG_CPU_RM9000 is not set 104# CONFIG_CPU_RM9000 is not set
88# CONFIG_CPU_SB1 is not set 105# CONFIG_CPU_SB1 is not set
106# CONFIG_CPU_CAVIUM_OCTEON is not set
89CONFIG_SYS_HAS_CPU_R10000=y 107CONFIG_SYS_HAS_CPU_R10000=y
90CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y 108CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
91CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y 109CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
@@ -99,6 +117,7 @@ CONFIG_64BIT=y
99CONFIG_PAGE_SIZE_4KB=y 117CONFIG_PAGE_SIZE_4KB=y
100# CONFIG_PAGE_SIZE_8KB is not set 118# CONFIG_PAGE_SIZE_8KB is not set
101# CONFIG_PAGE_SIZE_16KB is not set 119# CONFIG_PAGE_SIZE_16KB is not set
120# CONFIG_PAGE_SIZE_32KB is not set
102# CONFIG_PAGE_SIZE_64KB is not set 121# CONFIG_PAGE_SIZE_64KB is not set
103CONFIG_CPU_HAS_PREFETCH=y 122CONFIG_CPU_HAS_PREFETCH=y
104CONFIG_MIPS_MT_DISABLED=y 123CONFIG_MIPS_MT_DISABLED=y
@@ -110,6 +129,7 @@ CONFIG_GENERIC_IRQ_PROBE=y
110CONFIG_IRQ_PER_CPU=y 129CONFIG_IRQ_PER_CPU=y
111CONFIG_CPU_SUPPORTS_HIGHMEM=y 130CONFIG_CPU_SUPPORTS_HIGHMEM=y
112CONFIG_ARCH_DISCONTIGMEM_ENABLE=y 131CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
132CONFIG_ARCH_POPULATES_NODE_MAP=y
113CONFIG_NUMA=y 133CONFIG_NUMA=y
114CONFIG_SYS_SUPPORTS_NUMA=y 134CONFIG_SYS_SUPPORTS_NUMA=y
115CONFIG_NODES_SHIFT=6 135CONFIG_NODES_SHIFT=6
@@ -120,16 +140,22 @@ CONFIG_DISCONTIGMEM_MANUAL=y
120CONFIG_DISCONTIGMEM=y 140CONFIG_DISCONTIGMEM=y
121CONFIG_FLAT_NODE_MEM_MAP=y 141CONFIG_FLAT_NODE_MEM_MAP=y
122CONFIG_NEED_MULTIPLE_NODES=y 142CONFIG_NEED_MULTIPLE_NODES=y
123# CONFIG_SPARSEMEM_STATIC is not set 143CONFIG_PAGEFLAGS_EXTENDED=y
124CONFIG_SPLIT_PTLOCK_CPUS=4 144CONFIG_SPLIT_PTLOCK_CPUS=4
125CONFIG_MIGRATION=y 145CONFIG_MIGRATION=y
126CONFIG_RESOURCES_64BIT=y 146CONFIG_PHYS_ADDR_T_64BIT=y
127CONFIG_ZONE_DMA_FLAG=0 147CONFIG_ZONE_DMA_FLAG=0
128CONFIG_VIRT_TO_BUS=y 148CONFIG_VIRT_TO_BUS=y
149# CONFIG_KSM is not set
150CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
129CONFIG_SMP=y 151CONFIG_SMP=y
130CONFIG_SYS_SUPPORTS_SMP=y 152CONFIG_SYS_SUPPORTS_SMP=y
131CONFIG_NR_CPUS_DEFAULT_64=y 153CONFIG_NR_CPUS_DEFAULT_64=y
132CONFIG_NR_CPUS=64 154CONFIG_NR_CPUS=64
155CONFIG_TICK_ONESHOT=y
156CONFIG_NO_HZ=y
157CONFIG_HIGH_RES_TIMERS=y
158CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
133# CONFIG_HZ_48 is not set 159# CONFIG_HZ_48 is not set
134# CONFIG_HZ_100 is not set 160# CONFIG_HZ_100 is not set
135# CONFIG_HZ_128 is not set 161# CONFIG_HZ_128 is not set
@@ -142,13 +168,13 @@ CONFIG_HZ=1000
142CONFIG_PREEMPT_NONE=y 168CONFIG_PREEMPT_NONE=y
143# CONFIG_PREEMPT_VOLUNTARY is not set 169# CONFIG_PREEMPT_VOLUNTARY is not set
144# CONFIG_PREEMPT is not set 170# CONFIG_PREEMPT is not set
145CONFIG_PREEMPT_BKL=y
146# CONFIG_MIPS_INSANE_LARGE is not set 171# CONFIG_MIPS_INSANE_LARGE is not set
147# CONFIG_KEXEC is not set 172# CONFIG_KEXEC is not set
148CONFIG_SECCOMP=y 173CONFIG_SECCOMP=y
149CONFIG_LOCKDEP_SUPPORT=y 174CONFIG_LOCKDEP_SUPPORT=y
150CONFIG_STACKTRACE_SUPPORT=y 175CONFIG_STACKTRACE_SUPPORT=y
151CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 176CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
177CONFIG_CONSTRUCTORS=y
152 178
153# 179#
154# General setup 180# General setup
@@ -162,20 +188,41 @@ CONFIG_SWAP=y
162CONFIG_SYSVIPC=y 188CONFIG_SYSVIPC=y
163CONFIG_SYSVIPC_SYSCTL=y 189CONFIG_SYSVIPC_SYSCTL=y
164CONFIG_POSIX_MQUEUE=y 190CONFIG_POSIX_MQUEUE=y
191CONFIG_POSIX_MQUEUE_SYSCTL=y
165# CONFIG_BSD_PROCESS_ACCT is not set 192# CONFIG_BSD_PROCESS_ACCT is not set
166# CONFIG_TASKSTATS is not set 193# CONFIG_TASKSTATS is not set
167# CONFIG_USER_NS is not set
168# CONFIG_AUDIT is not set 194# CONFIG_AUDIT is not set
195
196#
197# RCU Subsystem
198#
199CONFIG_TREE_RCU=y
200# CONFIG_TREE_PREEMPT_RCU is not set
201# CONFIG_TINY_RCU is not set
202# CONFIG_RCU_TRACE is not set
203CONFIG_RCU_FANOUT=64
204# CONFIG_RCU_FANOUT_EXACT is not set
205# CONFIG_TREE_RCU_TRACE is not set
169CONFIG_IKCONFIG=y 206CONFIG_IKCONFIG=y
170CONFIG_IKCONFIG_PROC=y 207CONFIG_IKCONFIG_PROC=y
171CONFIG_LOG_BUF_SHIFT=15 208CONFIG_LOG_BUF_SHIFT=15
209# CONFIG_GROUP_SCHED is not set
172CONFIG_CGROUPS=y 210CONFIG_CGROUPS=y
211# CONFIG_CGROUP_DEBUG is not set
212# CONFIG_CGROUP_NS is not set
213# CONFIG_CGROUP_FREEZER is not set
214# CONFIG_CGROUP_DEVICE is not set
173CONFIG_CPUSETS=y 215CONFIG_CPUSETS=y
174CONFIG_SYSFS_DEPRECATED=y 216CONFIG_PROC_PID_CPUSET=y
217# CONFIG_CGROUP_CPUACCT is not set
218# CONFIG_RESOURCE_COUNTERS is not set
219# CONFIG_SYSFS_DEPRECATED_V2 is not set
175CONFIG_RELAY=y 220CONFIG_RELAY=y
221# CONFIG_NAMESPACES is not set
176# CONFIG_BLK_DEV_INITRD is not set 222# CONFIG_BLK_DEV_INITRD is not set
177# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 223# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
178CONFIG_SYSCTL=y 224CONFIG_SYSCTL=y
225CONFIG_ANON_INODES=y
179CONFIG_EMBEDDED=y 226CONFIG_EMBEDDED=y
180CONFIG_SYSCTL_SYSCALL=y 227CONFIG_SYSCTL_SYSCALL=y
181CONFIG_KALLSYMS=y 228CONFIG_KALLSYMS=y
@@ -184,44 +231,92 @@ CONFIG_HOTPLUG=y
184CONFIG_PRINTK=y 231CONFIG_PRINTK=y
185CONFIG_BUG=y 232CONFIG_BUG=y
186CONFIG_ELF_CORE=y 233CONFIG_ELF_CORE=y
234# CONFIG_PCSPKR_PLATFORM is not set
187CONFIG_BASE_FULL=y 235CONFIG_BASE_FULL=y
188CONFIG_FUTEX=y 236CONFIG_FUTEX=y
189CONFIG_ANON_INODES=y
190CONFIG_EPOLL=y 237CONFIG_EPOLL=y
191CONFIG_SIGNALFD=y 238CONFIG_SIGNALFD=y
192CONFIG_TIMERFD=y 239CONFIG_TIMERFD=y
193CONFIG_EVENTFD=y 240CONFIG_EVENTFD=y
194CONFIG_SHMEM=y 241CONFIG_SHMEM=y
242CONFIG_AIO=y
243
244#
245# Kernel Performance Events And Counters
246#
195CONFIG_VM_EVENT_COUNTERS=y 247CONFIG_VM_EVENT_COUNTERS=y
248CONFIG_PCI_QUIRKS=y
249CONFIG_COMPAT_BRK=y
196CONFIG_SLAB=y 250CONFIG_SLAB=y
197# CONFIG_SLUB is not set 251# CONFIG_SLUB is not set
198# CONFIG_SLOB is not set 252# CONFIG_SLOB is not set
253# CONFIG_PROFILING is not set
254CONFIG_HAVE_OPROFILE=y
255CONFIG_HAVE_SYSCALL_WRAPPERS=y
256CONFIG_USE_GENERIC_SMP_HELPERS=y
257
258#
259# GCOV-based kernel profiling
260#
261CONFIG_SLOW_WORK=y
262CONFIG_HAVE_GENERIC_DMA_COHERENT=y
263CONFIG_SLABINFO=y
199CONFIG_RT_MUTEXES=y 264CONFIG_RT_MUTEXES=y
200# CONFIG_TINY_SHMEM is not set
201CONFIG_BASE_SMALL=0 265CONFIG_BASE_SMALL=0
202CONFIG_MODULES=y 266CONFIG_MODULES=y
267# CONFIG_MODULE_FORCE_LOAD is not set
203CONFIG_MODULE_UNLOAD=y 268CONFIG_MODULE_UNLOAD=y
204# CONFIG_MODULE_FORCE_UNLOAD is not set 269# CONFIG_MODULE_FORCE_UNLOAD is not set
205# CONFIG_MODVERSIONS is not set 270# CONFIG_MODVERSIONS is not set
206CONFIG_MODULE_SRCVERSION_ALL=y 271CONFIG_MODULE_SRCVERSION_ALL=y
207CONFIG_KMOD=y
208CONFIG_STOP_MACHINE=y 272CONFIG_STOP_MACHINE=y
209CONFIG_BLOCK=y 273CONFIG_BLOCK=y
210# CONFIG_BLK_DEV_IO_TRACE is not set
211# CONFIG_BLK_DEV_BSG is not set 274# CONFIG_BLK_DEV_BSG is not set
275# CONFIG_BLK_DEV_INTEGRITY is not set
276# CONFIG_BLK_CGROUP is not set
277CONFIG_BLOCK_COMPAT=y
212 278
213# 279#
214# IO Schedulers 280# IO Schedulers
215# 281#
216CONFIG_IOSCHED_NOOP=y 282CONFIG_IOSCHED_NOOP=y
217CONFIG_IOSCHED_AS=y
218CONFIG_IOSCHED_DEADLINE=y 283CONFIG_IOSCHED_DEADLINE=y
219CONFIG_IOSCHED_CFQ=y 284CONFIG_IOSCHED_CFQ=y
220CONFIG_DEFAULT_AS=y 285# CONFIG_CFQ_GROUP_IOSCHED is not set
221# CONFIG_DEFAULT_DEADLINE is not set 286# CONFIG_DEFAULT_DEADLINE is not set
222# CONFIG_DEFAULT_CFQ is not set 287CONFIG_DEFAULT_CFQ=y
223# CONFIG_DEFAULT_NOOP is not set 288# CONFIG_DEFAULT_NOOP is not set
224CONFIG_DEFAULT_IOSCHED="anticipatory" 289CONFIG_DEFAULT_IOSCHED="cfq"
290# CONFIG_INLINE_SPIN_TRYLOCK is not set
291# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
292# CONFIG_INLINE_SPIN_LOCK is not set
293# CONFIG_INLINE_SPIN_LOCK_BH is not set
294# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
295# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
296CONFIG_INLINE_SPIN_UNLOCK=y
297# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
298CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
299# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
300# CONFIG_INLINE_READ_TRYLOCK is not set
301# CONFIG_INLINE_READ_LOCK is not set
302# CONFIG_INLINE_READ_LOCK_BH is not set
303# CONFIG_INLINE_READ_LOCK_IRQ is not set
304# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
305CONFIG_INLINE_READ_UNLOCK=y
306# CONFIG_INLINE_READ_UNLOCK_BH is not set
307CONFIG_INLINE_READ_UNLOCK_IRQ=y
308# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
309# CONFIG_INLINE_WRITE_TRYLOCK is not set
310# CONFIG_INLINE_WRITE_LOCK is not set
311# CONFIG_INLINE_WRITE_LOCK_BH is not set
312# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
313# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
314CONFIG_INLINE_WRITE_UNLOCK=y
315# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
316CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
317# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
318CONFIG_MUTEX_SPIN_ON_OWNER=y
319# CONFIG_FREEZER is not set
225 320
226# 321#
227# Bus options (PCI, PCMCIA, EISA, ISA, TC) 322# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -230,11 +325,10 @@ CONFIG_HW_HAS_PCI=y
230CONFIG_PCI=y 325CONFIG_PCI=y
231CONFIG_PCI_DOMAINS=y 326CONFIG_PCI_DOMAINS=y
232# CONFIG_ARCH_SUPPORTS_MSI is not set 327# CONFIG_ARCH_SUPPORTS_MSI is not set
328# CONFIG_PCI_LEGACY is not set
329# CONFIG_PCI_STUB is not set
330# CONFIG_PCI_IOV is not set
233CONFIG_MMU=y 331CONFIG_MMU=y
234
235#
236# PCCARD (PCMCIA/CardBus) support
237#
238# CONFIG_PCCARD is not set 332# CONFIG_PCCARD is not set
239# CONFIG_HOTPLUG_PCI is not set 333# CONFIG_HOTPLUG_PCI is not set
240 334
@@ -242,8 +336,9 @@ CONFIG_MMU=y
242# Executable file formats 336# Executable file formats
243# 337#
244CONFIG_BINFMT_ELF=y 338CONFIG_BINFMT_ELF=y
339CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
340# CONFIG_HAVE_AOUT is not set
245# CONFIG_BINFMT_MISC is not set 341# CONFIG_BINFMT_MISC is not set
246# CONFIG_BUILD_ELF64 is not set
247CONFIG_MIPS32_COMPAT=y 342CONFIG_MIPS32_COMPAT=y
248CONFIG_COMPAT=y 343CONFIG_COMPAT=y
249CONFIG_SYSVIPC_COMPAT=y 344CONFIG_SYSVIPC_COMPAT=y
@@ -255,13 +350,10 @@ CONFIG_BINFMT_ELF32=y
255# Power management options 350# Power management options
256# 351#
257CONFIG_PM=y 352CONFIG_PM=y
258# CONFIG_PM_LEGACY is not set
259# CONFIG_PM_DEBUG is not set 353# CONFIG_PM_DEBUG is not set
260 354# CONFIG_PM_RUNTIME is not set
261#
262# Networking
263#
264CONFIG_NET=y 355CONFIG_NET=y
356CONFIG_COMPAT_NETLINK_MESSAGES=y
265 357
266# 358#
267# Networking options 359# Networking options
@@ -273,6 +365,8 @@ CONFIG_XFRM=y
273CONFIG_XFRM_USER=m 365CONFIG_XFRM_USER=m
274# CONFIG_XFRM_SUB_POLICY is not set 366# CONFIG_XFRM_SUB_POLICY is not set
275CONFIG_XFRM_MIGRATE=y 367CONFIG_XFRM_MIGRATE=y
368CONFIG_XFRM_STATISTICS=y
369CONFIG_XFRM_IPCOMP=m
276CONFIG_NET_KEY=y 370CONFIG_NET_KEY=y
277CONFIG_NET_KEY_MIGRATE=y 371CONFIG_NET_KEY_MIGRATE=y
278CONFIG_INET=y 372CONFIG_INET=y
@@ -292,19 +386,40 @@ CONFIG_IP_PNP=y
292# CONFIG_INET_ESP is not set 386# CONFIG_INET_ESP is not set
293# CONFIG_INET_IPCOMP is not set 387# CONFIG_INET_IPCOMP is not set
294# CONFIG_INET_XFRM_TUNNEL is not set 388# CONFIG_INET_XFRM_TUNNEL is not set
295# CONFIG_INET_TUNNEL is not set 389CONFIG_INET_TUNNEL=m
296CONFIG_INET_XFRM_MODE_TRANSPORT=m 390CONFIG_INET_XFRM_MODE_TRANSPORT=m
297CONFIG_INET_XFRM_MODE_TUNNEL=m 391CONFIG_INET_XFRM_MODE_TUNNEL=m
298CONFIG_INET_XFRM_MODE_BEET=m 392CONFIG_INET_XFRM_MODE_BEET=m
393CONFIG_INET_LRO=y
299CONFIG_INET_DIAG=y 394CONFIG_INET_DIAG=y
300CONFIG_INET_TCP_DIAG=y 395CONFIG_INET_TCP_DIAG=y
301# CONFIG_TCP_CONG_ADVANCED is not set 396# CONFIG_TCP_CONG_ADVANCED is not set
302CONFIG_TCP_CONG_CUBIC=y 397CONFIG_TCP_CONG_CUBIC=y
303CONFIG_DEFAULT_TCP_CONG="cubic" 398CONFIG_DEFAULT_TCP_CONG="cubic"
304CONFIG_TCP_MD5SIG=y 399CONFIG_TCP_MD5SIG=y
305# CONFIG_IPV6 is not set 400CONFIG_IPV6=y
306# CONFIG_INET6_XFRM_TUNNEL is not set 401CONFIG_IPV6_PRIVACY=y
307# CONFIG_INET6_TUNNEL is not set 402CONFIG_IPV6_ROUTER_PREF=y
403CONFIG_IPV6_ROUTE_INFO=y
404CONFIG_IPV6_OPTIMISTIC_DAD=y
405CONFIG_INET6_AH=m
406CONFIG_INET6_ESP=m
407CONFIG_INET6_IPCOMP=m
408CONFIG_IPV6_MIP6=m
409CONFIG_INET6_XFRM_TUNNEL=m
410CONFIG_INET6_TUNNEL=m
411CONFIG_INET6_XFRM_MODE_TRANSPORT=m
412CONFIG_INET6_XFRM_MODE_TUNNEL=m
413CONFIG_INET6_XFRM_MODE_BEET=m
414CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
415CONFIG_IPV6_SIT=m
416CONFIG_IPV6_SIT_6RD=y
417CONFIG_IPV6_NDISC_NODETYPE=y
418CONFIG_IPV6_TUNNEL=m
419CONFIG_IPV6_MULTIPLE_TABLES=y
420CONFIG_IPV6_SUBTREES=y
421CONFIG_IPV6_MROUTE=y
422CONFIG_IPV6_PIMSM_V2=y
308CONFIG_NETWORK_SECMARK=y 423CONFIG_NETWORK_SECMARK=y
309# CONFIG_NETFILTER is not set 424# CONFIG_NETFILTER is not set
310# CONFIG_IP_DCCP is not set 425# CONFIG_IP_DCCP is not set
@@ -314,9 +429,11 @@ CONFIG_IP_SCTP=m
314# CONFIG_SCTP_HMAC_NONE is not set 429# CONFIG_SCTP_HMAC_NONE is not set
315# CONFIG_SCTP_HMAC_SHA1 is not set 430# CONFIG_SCTP_HMAC_SHA1 is not set
316CONFIG_SCTP_HMAC_MD5=y 431CONFIG_SCTP_HMAC_MD5=y
432# CONFIG_RDS is not set
317# CONFIG_TIPC is not set 433# CONFIG_TIPC is not set
318# CONFIG_ATM is not set 434# CONFIG_ATM is not set
319# CONFIG_BRIDGE is not set 435# CONFIG_BRIDGE is not set
436# CONFIG_NET_DSA is not set
320# CONFIG_VLAN_8021Q is not set 437# CONFIG_VLAN_8021Q is not set
321# CONFIG_DECNET is not set 438# CONFIG_DECNET is not set
322# CONFIG_LLC2 is not set 439# CONFIG_LLC2 is not set
@@ -326,12 +443,9 @@ CONFIG_SCTP_HMAC_MD5=y
326# CONFIG_LAPB is not set 443# CONFIG_LAPB is not set
327# CONFIG_ECONET is not set 444# CONFIG_ECONET is not set
328# CONFIG_WAN_ROUTER is not set 445# CONFIG_WAN_ROUTER is not set
329 446# CONFIG_PHONET is not set
330# 447# CONFIG_IEEE802154 is not set
331# QoS and/or fair queueing
332#
333CONFIG_NET_SCHED=y 448CONFIG_NET_SCHED=y
334CONFIG_NET_SCH_FIFO=y
335 449
336# 450#
337# Queueing/Scheduling 451# Queueing/Scheduling
@@ -340,7 +454,7 @@ CONFIG_NET_SCH_CBQ=m
340CONFIG_NET_SCH_HTB=m 454CONFIG_NET_SCH_HTB=m
341CONFIG_NET_SCH_HFSC=m 455CONFIG_NET_SCH_HFSC=m
342CONFIG_NET_SCH_PRIO=m 456CONFIG_NET_SCH_PRIO=m
343CONFIG_NET_SCH_RR=m 457CONFIG_NET_SCH_MULTIQ=y
344CONFIG_NET_SCH_RED=m 458CONFIG_NET_SCH_RED=m
345CONFIG_NET_SCH_SFQ=m 459CONFIG_NET_SCH_SFQ=m
346CONFIG_NET_SCH_TEQL=m 460CONFIG_NET_SCH_TEQL=m
@@ -348,6 +462,7 @@ CONFIG_NET_SCH_TBF=m
348CONFIG_NET_SCH_GRED=m 462CONFIG_NET_SCH_GRED=m
349CONFIG_NET_SCH_DSMARK=m 463CONFIG_NET_SCH_DSMARK=m
350CONFIG_NET_SCH_NETEM=m 464CONFIG_NET_SCH_NETEM=m
465# CONFIG_NET_SCH_DRR is not set
351CONFIG_NET_SCH_INGRESS=m 466CONFIG_NET_SCH_INGRESS=m
352 467
353# 468#
@@ -364,41 +479,63 @@ CONFIG_NET_CLS_U32=m
364CONFIG_CLS_U32_MARK=y 479CONFIG_CLS_U32_MARK=y
365CONFIG_NET_CLS_RSVP=m 480CONFIG_NET_CLS_RSVP=m
366CONFIG_NET_CLS_RSVP6=m 481CONFIG_NET_CLS_RSVP6=m
482CONFIG_NET_CLS_FLOW=m
483CONFIG_NET_CLS_CGROUP=y
367# CONFIG_NET_EMATCH is not set 484# CONFIG_NET_EMATCH is not set
368CONFIG_NET_CLS_ACT=y 485CONFIG_NET_CLS_ACT=y
369CONFIG_NET_ACT_POLICE=y 486CONFIG_NET_ACT_POLICE=y
370CONFIG_NET_ACT_GACT=m 487CONFIG_NET_ACT_GACT=m
371CONFIG_GACT_PROB=y 488CONFIG_GACT_PROB=y
372CONFIG_NET_ACT_MIRRED=m 489CONFIG_NET_ACT_MIRRED=m
490CONFIG_NET_ACT_NAT=m
373CONFIG_NET_ACT_PEDIT=m 491CONFIG_NET_ACT_PEDIT=m
374# CONFIG_NET_ACT_SIMP is not set 492# CONFIG_NET_ACT_SIMP is not set
375CONFIG_NET_CLS_POLICE=y 493CONFIG_NET_ACT_SKBEDIT=m
376# CONFIG_NET_CLS_IND is not set 494# CONFIG_NET_CLS_IND is not set
495CONFIG_NET_SCH_FIFO=y
496# CONFIG_DCB is not set
377 497
378# 498#
379# Network testing 499# Network testing
380# 500#
381# CONFIG_NET_PKTGEN is not set 501# CONFIG_NET_PKTGEN is not set
382# CONFIG_HAMRADIO is not set 502# CONFIG_HAMRADIO is not set
503# CONFIG_CAN is not set
383# CONFIG_IRDA is not set 504# CONFIG_IRDA is not set
384# CONFIG_BT is not set 505# CONFIG_BT is not set
385# CONFIG_AF_RXRPC is not set 506# CONFIG_AF_RXRPC is not set
386 507CONFIG_FIB_RULES=y
387# 508CONFIG_WIRELESS=y
388# Wireless
389#
390CONFIG_CFG80211=m
391CONFIG_WIRELESS_EXT=y 509CONFIG_WIRELESS_EXT=y
510CONFIG_WEXT_CORE=y
511CONFIG_WEXT_PROC=y
512CONFIG_WEXT_SPY=y
513CONFIG_WEXT_PRIV=y
514CONFIG_CFG80211=m
515# CONFIG_NL80211_TESTMODE is not set
516# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
517# CONFIG_CFG80211_REG_DEBUG is not set
518CONFIG_CFG80211_DEFAULT_PS=y
519# CONFIG_WIRELESS_OLD_REGULATORY is not set
520CONFIG_CFG80211_WEXT=y
521CONFIG_WIRELESS_EXT_SYSFS=y
522CONFIG_LIB80211=m
523CONFIG_LIB80211_CRYPT_WEP=m
524CONFIG_LIB80211_CRYPT_CCMP=m
525CONFIG_LIB80211_CRYPT_TKIP=m
526# CONFIG_LIB80211_DEBUG is not set
392CONFIG_MAC80211=m 527CONFIG_MAC80211=m
393# CONFIG_MAC80211_DEBUG is not set 528CONFIG_MAC80211_RC_PID=y
394CONFIG_IEEE80211=m 529CONFIG_MAC80211_RC_MINSTREL=y
395# CONFIG_IEEE80211_DEBUG is not set 530# CONFIG_MAC80211_RC_DEFAULT_PID is not set
396CONFIG_IEEE80211_CRYPT_WEP=m 531CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
397CONFIG_IEEE80211_CRYPT_CCMP=m 532CONFIG_MAC80211_RC_DEFAULT="minstrel"
398CONFIG_IEEE80211_CRYPT_TKIP=m 533# CONFIG_MAC80211_MESH is not set
399CONFIG_IEEE80211_SOFTMAC=m 534CONFIG_MAC80211_LEDS=y
400# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set 535# CONFIG_MAC80211_DEBUG_MENU is not set
536# CONFIG_WIMAX is not set
401CONFIG_RFKILL=m 537CONFIG_RFKILL=m
538CONFIG_RFKILL_LEDS=y
402# CONFIG_NET_9P is not set 539# CONFIG_NET_9P is not set
403 540
404# 541#
@@ -408,9 +545,13 @@ CONFIG_RFKILL=m
408# 545#
409# Generic Driver Options 546# Generic Driver Options
410# 547#
548CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
549# CONFIG_DEVTMPFS is not set
411CONFIG_STANDALONE=y 550CONFIG_STANDALONE=y
412CONFIG_PREVENT_FIRMWARE_BUILD=y 551CONFIG_PREVENT_FIRMWARE_BUILD=y
413CONFIG_FW_LOADER=y 552CONFIG_FW_LOADER=y
553CONFIG_FIRMWARE_IN_KERNEL=y
554CONFIG_EXTRA_FIRMWARE=""
414# CONFIG_SYS_HYPERVISOR is not set 555# CONFIG_SYS_HYPERVISOR is not set
415CONFIG_CONNECTOR=m 556CONFIG_CONNECTOR=m
416# CONFIG_MTD is not set 557# CONFIG_MTD is not set
@@ -423,14 +564,19 @@ CONFIG_BLK_DEV=y
423# CONFIG_BLK_DEV_COW_COMMON is not set 564# CONFIG_BLK_DEV_COW_COMMON is not set
424CONFIG_BLK_DEV_LOOP=y 565CONFIG_BLK_DEV_LOOP=y
425CONFIG_BLK_DEV_CRYPTOLOOP=m 566CONFIG_BLK_DEV_CRYPTOLOOP=m
567# CONFIG_BLK_DEV_DRBD is not set
426# CONFIG_BLK_DEV_NBD is not set 568# CONFIG_BLK_DEV_NBD is not set
569CONFIG_BLK_DEV_OSD=m
427# CONFIG_BLK_DEV_SX8 is not set 570# CONFIG_BLK_DEV_SX8 is not set
428# CONFIG_BLK_DEV_RAM is not set 571# CONFIG_BLK_DEV_RAM is not set
429CONFIG_CDROM_PKTCDVD=m 572CONFIG_CDROM_PKTCDVD=m
430CONFIG_CDROM_PKTCDVD_BUFFERS=8 573CONFIG_CDROM_PKTCDVD_BUFFERS=8
431# CONFIG_CDROM_PKTCDVD_WCACHE is not set 574# CONFIG_CDROM_PKTCDVD_WCACHE is not set
432CONFIG_ATA_OVER_ETH=m 575CONFIG_ATA_OVER_ETH=m
576# CONFIG_BLK_DEV_HD is not set
433# CONFIG_MISC_DEVICES is not set 577# CONFIG_MISC_DEVICES is not set
578CONFIG_EEPROM_93CX6=m
579CONFIG_HAVE_IDE=y
434# CONFIG_IDE is not set 580# CONFIG_IDE is not set
435 581
436# 582#
@@ -453,10 +599,6 @@ CONFIG_BLK_DEV_SR=m
453CONFIG_BLK_DEV_SR_VENDOR=y 599CONFIG_BLK_DEV_SR_VENDOR=y
454CONFIG_CHR_DEV_SG=m 600CONFIG_CHR_DEV_SG=m
455CONFIG_CHR_DEV_SCH=m 601CONFIG_CHR_DEV_SCH=m
456
457#
458# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
459#
460# CONFIG_SCSI_MULTI_LUN is not set 602# CONFIG_SCSI_MULTI_LUN is not set
461CONFIG_SCSI_CONSTANTS=y 603CONFIG_SCSI_CONSTANTS=y
462CONFIG_SCSI_LOGGING=y 604CONFIG_SCSI_LOGGING=y
@@ -471,11 +613,18 @@ CONFIG_SCSI_FC_ATTRS=y
471CONFIG_SCSI_ISCSI_ATTRS=m 613CONFIG_SCSI_ISCSI_ATTRS=m
472CONFIG_SCSI_SAS_ATTRS=m 614CONFIG_SCSI_SAS_ATTRS=m
473CONFIG_SCSI_SAS_LIBSAS=m 615CONFIG_SCSI_SAS_LIBSAS=m
616CONFIG_SCSI_SAS_HOST_SMP=y
474# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set 617# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
618# CONFIG_SCSI_SRP_ATTRS is not set
475CONFIG_SCSI_LOWLEVEL=y 619CONFIG_SCSI_LOWLEVEL=y
476# CONFIG_ISCSI_TCP is not set 620# CONFIG_ISCSI_TCP is not set
621CONFIG_SCSI_CXGB3_ISCSI=m
622CONFIG_SCSI_BNX2_ISCSI=m
623CONFIG_BE2ISCSI=m
477# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 624# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
625CONFIG_SCSI_HPSA=m
478# CONFIG_SCSI_3W_9XXX is not set 626# CONFIG_SCSI_3W_9XXX is not set
627CONFIG_SCSI_3W_SAS=m
479# CONFIG_SCSI_ACARD is not set 628# CONFIG_SCSI_ACARD is not set
480# CONFIG_SCSI_AACRAID is not set 629# CONFIG_SCSI_AACRAID is not set
481# CONFIG_SCSI_AIC7XXX is not set 630# CONFIG_SCSI_AIC7XXX is not set
@@ -483,11 +632,21 @@ CONFIG_SCSI_LOWLEVEL=y
483# CONFIG_SCSI_AIC79XX is not set 632# CONFIG_SCSI_AIC79XX is not set
484CONFIG_SCSI_AIC94XX=m 633CONFIG_SCSI_AIC94XX=m
485# CONFIG_AIC94XX_DEBUG is not set 634# CONFIG_AIC94XX_DEBUG is not set
635CONFIG_SCSI_MVSAS=m
636# CONFIG_SCSI_MVSAS_DEBUG is not set
637CONFIG_SCSI_DPT_I2O=m
638# CONFIG_SCSI_ADVANSYS is not set
486# CONFIG_SCSI_ARCMSR is not set 639# CONFIG_SCSI_ARCMSR is not set
487# CONFIG_MEGARAID_NEWGEN is not set 640# CONFIG_MEGARAID_NEWGEN is not set
488# CONFIG_MEGARAID_LEGACY is not set 641# CONFIG_MEGARAID_LEGACY is not set
489# CONFIG_MEGARAID_SAS is not set 642# CONFIG_MEGARAID_SAS is not set
643CONFIG_SCSI_MPT2SAS=m
644CONFIG_SCSI_MPT2SAS_MAX_SGE=128
645# CONFIG_SCSI_MPT2SAS_LOGGING is not set
490# CONFIG_SCSI_HPTIOP is not set 646# CONFIG_SCSI_HPTIOP is not set
647CONFIG_LIBFC=m
648# CONFIG_LIBFCOE is not set
649# CONFIG_FCOE is not set
491# CONFIG_SCSI_DMX3191D is not set 650# CONFIG_SCSI_DMX3191D is not set
492# CONFIG_SCSI_FUTURE_DOMAIN is not set 651# CONFIG_SCSI_FUTURE_DOMAIN is not set
493# CONFIG_SCSI_IPS is not set 652# CONFIG_SCSI_IPS is not set
@@ -502,16 +661,31 @@ CONFIG_SCSI_QLOGIC_1280=y
502# CONFIG_SCSI_DC395x is not set 661# CONFIG_SCSI_DC395x is not set
503# CONFIG_SCSI_DC390T is not set 662# CONFIG_SCSI_DC390T is not set
504# CONFIG_SCSI_DEBUG is not set 663# CONFIG_SCSI_DEBUG is not set
664CONFIG_SCSI_PMCRAID=m
665# CONFIG_SCSI_PM8001 is not set
505# CONFIG_SCSI_SRP is not set 666# CONFIG_SCSI_SRP is not set
667CONFIG_SCSI_BFA_FC=m
668CONFIG_SCSI_DH=m
669CONFIG_SCSI_DH_RDAC=m
670CONFIG_SCSI_DH_HP_SW=m
671CONFIG_SCSI_DH_EMC=m
672CONFIG_SCSI_DH_ALUA=m
673CONFIG_SCSI_OSD_INITIATOR=m
674CONFIG_SCSI_OSD_ULD=m
675CONFIG_SCSI_OSD_DPRINT_SENSE=1
676# CONFIG_SCSI_OSD_DEBUG is not set
506# CONFIG_ATA is not set 677# CONFIG_ATA is not set
507CONFIG_MD=y 678CONFIG_MD=y
508CONFIG_BLK_DEV_MD=y 679CONFIG_BLK_DEV_MD=y
680CONFIG_MD_AUTODETECT=y
509CONFIG_MD_LINEAR=m 681CONFIG_MD_LINEAR=m
510CONFIG_MD_RAID0=y 682CONFIG_MD_RAID0=y
511CONFIG_MD_RAID1=y 683CONFIG_MD_RAID1=y
512CONFIG_MD_RAID10=m 684CONFIG_MD_RAID10=m
513CONFIG_MD_RAID456=y 685CONFIG_MD_RAID456=y
514CONFIG_MD_RAID5_RESHAPE=y 686# CONFIG_MULTICORE_RAID456 is not set
687CONFIG_MD_RAID6_PQ=y
688# CONFIG_ASYNC_RAID6_TEST is not set
515CONFIG_MD_MULTIPATH=m 689CONFIG_MD_MULTIPATH=m
516CONFIG_MD_FAULTY=m 690CONFIG_MD_FAULTY=m
517CONFIG_BLK_DEV_DM=m 691CONFIG_BLK_DEV_DM=m
@@ -519,36 +693,39 @@ CONFIG_BLK_DEV_DM=m
519CONFIG_DM_CRYPT=m 693CONFIG_DM_CRYPT=m
520CONFIG_DM_SNAPSHOT=m 694CONFIG_DM_SNAPSHOT=m
521CONFIG_DM_MIRROR=m 695CONFIG_DM_MIRROR=m
696CONFIG_DM_LOG_USERSPACE=m
522CONFIG_DM_ZERO=m 697CONFIG_DM_ZERO=m
523CONFIG_DM_MULTIPATH=m 698CONFIG_DM_MULTIPATH=m
524CONFIG_DM_MULTIPATH_EMC=m 699CONFIG_DM_MULTIPATH_QL=m
525CONFIG_DM_MULTIPATH_RDAC=m 700CONFIG_DM_MULTIPATH_ST=m
526# CONFIG_DM_DELAY is not set 701# CONFIG_DM_DELAY is not set
702CONFIG_DM_UEVENT=y
703# CONFIG_FUSION is not set
527 704
528# 705#
529# Fusion MPT device support 706# IEEE 1394 (FireWire) support
530# 707#
531# CONFIG_FUSION is not set
532# CONFIG_FUSION_SPI is not set
533# CONFIG_FUSION_FC is not set
534# CONFIG_FUSION_SAS is not set
535 708
536# 709#
537# IEEE 1394 (FireWire) support 710# You can enable one or both FireWire driver stacks.
711#
712
713#
714# The newer stack is recommended.
538# 715#
539# CONFIG_FIREWIRE is not set 716# CONFIG_FIREWIRE is not set
540# CONFIG_IEEE1394 is not set 717# CONFIG_IEEE1394 is not set
541# CONFIG_I2O is not set 718# CONFIG_I2O is not set
542CONFIG_NETDEVICES=y 719CONFIG_NETDEVICES=y
543CONFIG_NETDEVICES_MULTIQUEUE=y
544CONFIG_IFB=m 720CONFIG_IFB=m
545# CONFIG_DUMMY is not set 721# CONFIG_DUMMY is not set
546# CONFIG_BONDING is not set 722# CONFIG_BONDING is not set
547CONFIG_MACVLAN=m 723CONFIG_MACVLAN=m
548# CONFIG_EQUALIZER is not set 724# CONFIG_EQUALIZER is not set
549# CONFIG_TUN is not set 725# CONFIG_TUN is not set
726CONFIG_VETH=m
550# CONFIG_ARCNET is not set 727# CONFIG_ARCNET is not set
551CONFIG_PHYLIB=m 728CONFIG_PHYLIB=y
552 729
553# 730#
554# MII PHY device drivers 731# MII PHY device drivers
@@ -562,23 +739,51 @@ CONFIG_VITESSE_PHY=m
562CONFIG_SMSC_PHY=m 739CONFIG_SMSC_PHY=m
563# CONFIG_BROADCOM_PHY is not set 740# CONFIG_BROADCOM_PHY is not set
564CONFIG_ICPLUS_PHY=m 741CONFIG_ICPLUS_PHY=m
742CONFIG_REALTEK_PHY=m
743CONFIG_NATIONAL_PHY=m
744CONFIG_STE10XP=m
745CONFIG_LSI_ET1011C_PHY=m
565# CONFIG_FIXED_PHY is not set 746# CONFIG_FIXED_PHY is not set
747CONFIG_MDIO_BITBANG=m
566CONFIG_NET_ETHERNET=y 748CONFIG_NET_ETHERNET=y
567CONFIG_MII=y 749CONFIG_MII=y
568CONFIG_AX88796=m 750CONFIG_AX88796=m
751CONFIG_AX88796_93CX6=y
569CONFIG_SGI_IOC3_ETH=y 752CONFIG_SGI_IOC3_ETH=y
570# CONFIG_HAPPYMEAL is not set 753# CONFIG_HAPPYMEAL is not set
571# CONFIG_SUNGEM is not set 754# CONFIG_SUNGEM is not set
572# CONFIG_CASSINI is not set 755# CONFIG_CASSINI is not set
573# CONFIG_NET_VENDOR_3COM is not set 756# CONFIG_NET_VENDOR_3COM is not set
757CONFIG_SMC91X=m
574# CONFIG_DM9000 is not set 758# CONFIG_DM9000 is not set
759CONFIG_ETHOC=m
760CONFIG_SMSC911X=m
761CONFIG_DNET=m
575# CONFIG_NET_TULIP is not set 762# CONFIG_NET_TULIP is not set
576# CONFIG_HP100 is not set 763# CONFIG_HP100 is not set
764# CONFIG_IBM_NEW_EMAC_ZMII is not set
765# CONFIG_IBM_NEW_EMAC_RGMII is not set
766# CONFIG_IBM_NEW_EMAC_TAH is not set
767# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
768# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
769# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
770# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
577# CONFIG_NET_PCI is not set 771# CONFIG_NET_PCI is not set
772CONFIG_B44=m
773CONFIG_B44_PCI_AUTOSELECT=y
774CONFIG_B44_PCICORE_AUTOSELECT=y
775CONFIG_B44_PCI=y
776CONFIG_KS8842=m
777CONFIG_KS8851_MLL=m
778CONFIG_ATL2=m
578CONFIG_NETDEV_1000=y 779CONFIG_NETDEV_1000=y
579# CONFIG_ACENIC is not set 780# CONFIG_ACENIC is not set
580# CONFIG_DL2K is not set 781# CONFIG_DL2K is not set
581# CONFIG_E1000 is not set 782# CONFIG_E1000 is not set
783CONFIG_E1000E=m
784CONFIG_IP1000=m
785CONFIG_IGB=m
786CONFIG_IGBVF=m
582# CONFIG_NS83820 is not set 787# CONFIG_NS83820 is not set
583# CONFIG_HAMACHI is not set 788# CONFIG_HAMACHI is not set
584# CONFIG_YELLOWFIN is not set 789# CONFIG_YELLOWFIN is not set
@@ -588,24 +793,75 @@ CONFIG_NETDEV_1000=y
588# CONFIG_SKY2 is not set 793# CONFIG_SKY2 is not set
589CONFIG_VIA_VELOCITY=m 794CONFIG_VIA_VELOCITY=m
590# CONFIG_TIGON3 is not set 795# CONFIG_TIGON3 is not set
591# CONFIG_BNX2 is not set 796CONFIG_BNX2=m
797CONFIG_CNIC=m
592CONFIG_QLA3XXX=m 798CONFIG_QLA3XXX=m
593# CONFIG_ATL1 is not set 799# CONFIG_ATL1 is not set
800CONFIG_ATL1E=m
801CONFIG_ATL1C=m
802CONFIG_JME=m
594CONFIG_NETDEV_10000=y 803CONFIG_NETDEV_10000=y
804CONFIG_MDIO=m
595# CONFIG_CHELSIO_T1 is not set 805# CONFIG_CHELSIO_T1 is not set
806CONFIG_CHELSIO_T3_DEPENDS=y
596CONFIG_CHELSIO_T3=m 807CONFIG_CHELSIO_T3=m
808CONFIG_ENIC=m
809CONFIG_IXGBE=m
597# CONFIG_IXGB is not set 810# CONFIG_IXGB is not set
598# CONFIG_S2IO is not set 811# CONFIG_S2IO is not set
812CONFIG_VXGE=m
813# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
599# CONFIG_MYRI10GE is not set 814# CONFIG_MYRI10GE is not set
600CONFIG_NETXEN_NIC=m 815CONFIG_NETXEN_NIC=m
601# CONFIG_MLX4_CORE is not set 816CONFIG_NIU=m
817CONFIG_MLX4_EN=m
818CONFIG_MLX4_CORE=m
819# CONFIG_MLX4_DEBUG is not set
820CONFIG_TEHUTI=m
821CONFIG_BNX2X=m
822CONFIG_QLGE=m
823CONFIG_SFC=m
824CONFIG_BE2NET=m
602# CONFIG_TR is not set 825# CONFIG_TR is not set
603 826CONFIG_WLAN=y
604# 827CONFIG_LIBERTAS_THINFIRM=m
605# Wireless LAN 828CONFIG_ATMEL=m
606# 829CONFIG_PCI_ATMEL=m
607# CONFIG_WLAN_PRE80211 is not set 830CONFIG_PRISM54=m
608CONFIG_WLAN_80211=y 831CONFIG_RTL8180=m
832CONFIG_ADM8211=m
833# CONFIG_MAC80211_HWSIM is not set
834CONFIG_MWL8K=m
835CONFIG_ATH_COMMON=m
836# CONFIG_ATH_DEBUG is not set
837CONFIG_ATH5K=m
838# CONFIG_ATH5K_DEBUG is not set
839CONFIG_ATH9K_HW=m
840CONFIG_ATH9K_COMMON=m
841CONFIG_ATH9K=m
842CONFIG_B43=m
843CONFIG_B43_PCI_AUTOSELECT=y
844CONFIG_B43_PCICORE_AUTOSELECT=y
845CONFIG_B43_PHY_LP=y
846CONFIG_B43_LEDS=y
847CONFIG_B43_HWRNG=y
848# CONFIG_B43_DEBUG is not set
849CONFIG_B43LEGACY=m
850CONFIG_B43LEGACY_PCI_AUTOSELECT=y
851CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
852CONFIG_B43LEGACY_LEDS=y
853CONFIG_B43LEGACY_HWRNG=y
854# CONFIG_B43LEGACY_DEBUG is not set
855CONFIG_B43LEGACY_DMA=y
856CONFIG_B43LEGACY_PIO=y
857CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
858# CONFIG_B43LEGACY_DMA_MODE is not set
859# CONFIG_B43LEGACY_PIO_MODE is not set
860CONFIG_HOSTAP=m
861CONFIG_HOSTAP_FIRMWARE=y
862CONFIG_HOSTAP_FIRMWARE_NVRAM=y
863CONFIG_HOSTAP_PLX=m
864CONFIG_HOSTAP_PCI=m
609CONFIG_IPW2100=m 865CONFIG_IPW2100=m
610CONFIG_IPW2100_MONITOR=y 866CONFIG_IPW2100_MONITOR=y
611CONFIG_IPW2100_DEBUG=y 867CONFIG_IPW2100_DEBUG=y
@@ -615,38 +871,57 @@ CONFIG_IPW2200_RADIOTAP=y
615CONFIG_IPW2200_PROMISCUOUS=y 871CONFIG_IPW2200_PROMISCUOUS=y
616CONFIG_IPW2200_QOS=y 872CONFIG_IPW2200_QOS=y
617CONFIG_IPW2200_DEBUG=y 873CONFIG_IPW2200_DEBUG=y
874CONFIG_LIBIPW=m
875# CONFIG_LIBIPW_DEBUG is not set
876CONFIG_IWLWIFI=m
877CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT=y
878# CONFIG_IWLWIFI_DEBUG is not set
879CONFIG_IWLAGN=m
880CONFIG_IWL4965=y
881CONFIG_IWL5000=y
882CONFIG_IWL3945=m
883CONFIG_IWL3945_SPECTRUM_MEASUREMENT=y
618CONFIG_LIBERTAS=m 884CONFIG_LIBERTAS=m
619# CONFIG_LIBERTAS_DEBUG is not set 885# CONFIG_LIBERTAS_DEBUG is not set
620CONFIG_HERMES=m 886CONFIG_HERMES=m
887# CONFIG_HERMES_CACHE_FW_ON_INIT is not set
621CONFIG_PLX_HERMES=m 888CONFIG_PLX_HERMES=m
622CONFIG_TMD_HERMES=m 889CONFIG_TMD_HERMES=m
623CONFIG_NORTEL_HERMES=m 890CONFIG_NORTEL_HERMES=m
624CONFIG_PCI_HERMES=m 891CONFIG_PCI_HERMES=m
625CONFIG_ATMEL=m 892CONFIG_P54_COMMON=m
626CONFIG_PCI_ATMEL=m 893CONFIG_P54_PCI=m
627CONFIG_PRISM54=m 894CONFIG_P54_LEDS=y
628CONFIG_HOSTAP=m 895CONFIG_RT2X00=m
629CONFIG_HOSTAP_FIRMWARE=y 896CONFIG_RT2400PCI=m
630CONFIG_HOSTAP_FIRMWARE_NVRAM=y 897CONFIG_RT2500PCI=m
631CONFIG_HOSTAP_PLX=m 898CONFIG_RT61PCI=m
632CONFIG_HOSTAP_PCI=m 899CONFIG_RT2800PCI_PCI=m
633CONFIG_BCM43XX=m 900CONFIG_RT2800PCI=m
634CONFIG_BCM43XX_DEBUG=y 901CONFIG_RT2800_LIB=m
635CONFIG_BCM43XX_DMA=y 902CONFIG_RT2X00_LIB_PCI=m
636CONFIG_BCM43XX_PIO=y 903CONFIG_RT2X00_LIB=m
637CONFIG_BCM43XX_DMA_AND_PIO_MODE=y 904CONFIG_RT2X00_LIB_HT=y
638# CONFIG_BCM43XX_DMA_MODE is not set 905CONFIG_RT2X00_LIB_FIRMWARE=y
639# CONFIG_BCM43XX_PIO_MODE is not set 906CONFIG_RT2X00_LIB_CRYPTO=y
907CONFIG_RT2X00_LIB_LEDS=y
908# CONFIG_RT2X00_DEBUG is not set
909CONFIG_WL12XX=m
910CONFIG_WL1251=m
911
912#
913# Enable WiMAX (Networking options) to see the WiMAX drivers
914#
640# CONFIG_WAN is not set 915# CONFIG_WAN is not set
641# CONFIG_FDDI is not set 916# CONFIG_FDDI is not set
642# CONFIG_HIPPI is not set 917# CONFIG_HIPPI is not set
643# CONFIG_PPP is not set 918# CONFIG_PPP is not set
644# CONFIG_SLIP is not set 919# CONFIG_SLIP is not set
645# CONFIG_NET_FC is not set 920# CONFIG_NET_FC is not set
646# CONFIG_SHAPER is not set
647# CONFIG_NETCONSOLE is not set 921# CONFIG_NETCONSOLE is not set
648# CONFIG_NETPOLL is not set 922# CONFIG_NETPOLL is not set
649# CONFIG_NET_POLL_CONTROLLER is not set 923# CONFIG_NET_POLL_CONTROLLER is not set
924# CONFIG_VMXNET3 is not set
650# CONFIG_ISDN is not set 925# CONFIG_ISDN is not set
651# CONFIG_PHONE is not set 926# CONFIG_PHONE is not set
652 927
@@ -664,13 +939,16 @@ CONFIG_SERIO_SERPORT=y
664# CONFIG_SERIO_PCIPS2 is not set 939# CONFIG_SERIO_PCIPS2 is not set
665CONFIG_SERIO_LIBPS2=m 940CONFIG_SERIO_LIBPS2=m
666CONFIG_SERIO_RAW=m 941CONFIG_SERIO_RAW=m
942CONFIG_SERIO_ALTERA_PS2=m
667# CONFIG_GAMEPORT is not set 943# CONFIG_GAMEPORT is not set
668 944
669# 945#
670# Character devices 946# Character devices
671# 947#
672# CONFIG_VT is not set 948# CONFIG_VT is not set
949CONFIG_DEVKMEM=y
673# CONFIG_SERIAL_NONSTANDARD is not set 950# CONFIG_SERIAL_NONSTANDARD is not set
951CONFIG_NOZOMI=m
674 952
675# 953#
676# Serial drivers 954# Serial drivers
@@ -693,95 +971,258 @@ CONFIG_SERIAL_CORE=y
693CONFIG_SERIAL_CORE_CONSOLE=y 971CONFIG_SERIAL_CORE_CONSOLE=y
694# CONFIG_SERIAL_JSM is not set 972# CONFIG_SERIAL_JSM is not set
695CONFIG_UNIX98_PTYS=y 973CONFIG_UNIX98_PTYS=y
974CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
696CONFIG_LEGACY_PTYS=y 975CONFIG_LEGACY_PTYS=y
697CONFIG_LEGACY_PTY_COUNT=256 976CONFIG_LEGACY_PTY_COUNT=256
698# CONFIG_IPMI_HANDLER is not set 977# CONFIG_IPMI_HANDLER is not set
699# CONFIG_WATCHDOG is not set
700CONFIG_HW_RANDOM=m 978CONFIG_HW_RANDOM=m
701# CONFIG_RTC is not set 979CONFIG_HW_RANDOM_TIMERIOMEM=m
702# CONFIG_R3964 is not set 980# CONFIG_R3964 is not set
703# CONFIG_APPLICOM is not set 981# CONFIG_APPLICOM is not set
704# CONFIG_DRM is not set
705# CONFIG_RAW_DRIVER is not set 982# CONFIG_RAW_DRIVER is not set
706# CONFIG_TCG_TPM is not set 983# CONFIG_TCG_TPM is not set
707CONFIG_DEVPORT=y 984CONFIG_DEVPORT=y
708# CONFIG_I2C is not set 985CONFIG_I2C=m
986CONFIG_I2C_BOARDINFO=y
987CONFIG_I2C_COMPAT=y
988CONFIG_I2C_CHARDEV=m
989CONFIG_I2C_HELPER_AUTO=y
990CONFIG_I2C_ALGOBIT=m
991CONFIG_I2C_ALGOPCA=m
992
993#
994# I2C Hardware Bus support
995#
996
997#
998# PC SMBus host controller drivers
999#
1000CONFIG_I2C_ALI1535=m
1001CONFIG_I2C_ALI1563=m
1002CONFIG_I2C_ALI15X3=m
1003CONFIG_I2C_AMD756=m
1004CONFIG_I2C_AMD8111=m
1005CONFIG_I2C_I801=m
1006CONFIG_I2C_ISCH=m
1007CONFIG_I2C_PIIX4=m
1008CONFIG_I2C_NFORCE2=m
1009CONFIG_I2C_SIS5595=m
1010CONFIG_I2C_SIS630=m
1011CONFIG_I2C_SIS96X=m
1012CONFIG_I2C_VIA=m
1013CONFIG_I2C_VIAPRO=m
709 1014
710# 1015#
711# SPI support 1016# I2C system bus drivers (mostly embedded / system-on-chip)
712# 1017#
1018CONFIG_I2C_OCORES=m
1019CONFIG_I2C_SIMTEC=m
1020
1021#
1022# External I2C/SMBus adapter drivers
1023#
1024CONFIG_I2C_PARPORT_LIGHT=m
1025CONFIG_I2C_TAOS_EVM=m
1026
1027#
1028# Other I2C/SMBus bus drivers
1029#
1030CONFIG_I2C_PCA_PLATFORM=m
1031CONFIG_I2C_STUB=m
1032
1033#
1034# Miscellaneous I2C Chip support
1035#
1036CONFIG_SENSORS_TSL2550=m
1037# CONFIG_I2C_DEBUG_CORE is not set
1038# CONFIG_I2C_DEBUG_ALGO is not set
1039# CONFIG_I2C_DEBUG_BUS is not set
1040# CONFIG_I2C_DEBUG_CHIP is not set
713# CONFIG_SPI is not set 1041# CONFIG_SPI is not set
714# CONFIG_SPI_MASTER is not set 1042
1043#
1044# PPS support
1045#
1046CONFIG_PPS=m
1047# CONFIG_PPS_DEBUG is not set
715# CONFIG_W1 is not set 1048# CONFIG_W1 is not set
716# CONFIG_POWER_SUPPLY is not set 1049# CONFIG_POWER_SUPPLY is not set
717# CONFIG_HWMON is not set 1050# CONFIG_HWMON is not set
1051CONFIG_THERMAL=m
1052# CONFIG_WATCHDOG is not set
1053CONFIG_SSB_POSSIBLE=y
718 1054
719# 1055#
720# Multifunction device drivers 1056# Sonics Silicon Backplane
721# 1057#
722# CONFIG_MFD_SM501 is not set 1058CONFIG_SSB=m
1059CONFIG_SSB_SPROM=y
1060CONFIG_SSB_PCIHOST_POSSIBLE=y
1061CONFIG_SSB_PCIHOST=y
1062CONFIG_SSB_B43_PCI_BRIDGE=y
1063# CONFIG_SSB_SILENT is not set
1064# CONFIG_SSB_DEBUG is not set
1065CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
1066CONFIG_SSB_DRIVER_PCICORE=y
1067# CONFIG_SSB_DRIVER_MIPS is not set
723 1068
724# 1069#
725# Multimedia devices 1070# Multifunction device drivers
726# 1071#
727# CONFIG_VIDEO_DEV is not set 1072# CONFIG_MFD_CORE is not set
728# CONFIG_DVB_CORE is not set 1073# CONFIG_MFD_SM501 is not set
729# CONFIG_DAB is not set 1074# CONFIG_HTC_PASIC3 is not set
1075# CONFIG_MFD_TMIO is not set
1076# CONFIG_MFD_WM8400 is not set
1077CONFIG_MFD_WM8350=m
1078CONFIG_MFD_WM8350_I2C=m
1079CONFIG_MFD_PCF50633=m
1080CONFIG_PCF50633_ADC=m
1081CONFIG_PCF50633_GPIO=m
1082CONFIG_AB3100_CORE=m
1083CONFIG_AB3100_OTP=m
1084# CONFIG_REGULATOR is not set
1085# CONFIG_MEDIA_SUPPORT is not set
730 1086
731# 1087#
732# Graphics support 1088# Graphics support
733# 1089#
734# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 1090# CONFIG_VGA_ARB is not set
735 1091# CONFIG_DRM is not set
736#
737# Display device support
738#
739# CONFIG_DISPLAY_SUPPORT is not set
740# CONFIG_VGASTATE is not set 1092# CONFIG_VGASTATE is not set
741# CONFIG_VIDEO_OUTPUT_CONTROL is not set 1093# CONFIG_VIDEO_OUTPUT_CONTROL is not set
742# CONFIG_FB is not set 1094# CONFIG_FB is not set
1095# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
743 1096
744# 1097#
745# Sound 1098# Display device support
746# 1099#
1100# CONFIG_DISPLAY_SUPPORT is not set
747# CONFIG_SOUND is not set 1101# CONFIG_SOUND is not set
748CONFIG_USB_SUPPORT=y 1102CONFIG_USB_SUPPORT=y
749CONFIG_USB_ARCH_HAS_HCD=y 1103CONFIG_USB_ARCH_HAS_HCD=y
750CONFIG_USB_ARCH_HAS_OHCI=y 1104CONFIG_USB_ARCH_HAS_OHCI=y
751CONFIG_USB_ARCH_HAS_EHCI=y 1105CONFIG_USB_ARCH_HAS_EHCI=y
752# CONFIG_USB is not set 1106# CONFIG_USB is not set
1107# CONFIG_USB_OTG_WHITELIST is not set
1108# CONFIG_USB_OTG_BLACKLIST_HUB is not set
753 1109
754# 1110#
755# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1111# Enable Host or Gadget support to see Inventra options
756# 1112#
757 1113
758# 1114#
759# USB Gadget Support 1115# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
760# 1116#
761# CONFIG_USB_GADGET is not set 1117# CONFIG_USB_GADGET is not set
1118
1119#
1120# OTG and related infrastructure
1121#
1122# CONFIG_UWB is not set
762# CONFIG_MMC is not set 1123# CONFIG_MMC is not set
763# CONFIG_NEW_LEDS is not set 1124# CONFIG_MEMSTICK is not set
764# CONFIG_INFINIBAND is not set 1125CONFIG_NEW_LEDS=y
765# CONFIG_RTC_CLASS is not set 1126CONFIG_LEDS_CLASS=m
1127
1128#
1129# LED drivers
1130#
1131CONFIG_LEDS_LP3944=m
1132CONFIG_LEDS_PCA955X=m
1133CONFIG_LEDS_WM8350=m
1134CONFIG_LEDS_BD2802=m
1135
1136#
1137# LED Triggers
1138#
1139CONFIG_LEDS_TRIGGERS=y
1140CONFIG_LEDS_TRIGGER_TIMER=m
1141CONFIG_LEDS_TRIGGER_HEARTBEAT=m
1142CONFIG_LEDS_TRIGGER_BACKLIGHT=m
1143CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
766 1144
767# 1145#
768# DMA Engine support 1146# iptables trigger is under Netfilter config (LED target)
769# 1147#
770# CONFIG_DMA_ENGINE is not set 1148# CONFIG_ACCESSIBILITY is not set
1149# CONFIG_INFINIBAND is not set
1150CONFIG_RTC_LIB=y
1151CONFIG_RTC_CLASS=y
1152CONFIG_RTC_HCTOSYS=y
1153CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1154# CONFIG_RTC_DEBUG is not set
771 1155
772# 1156#
773# DMA Clients 1157# RTC interfaces
774# 1158#
1159CONFIG_RTC_INTF_SYSFS=y
1160CONFIG_RTC_INTF_PROC=y
1161CONFIG_RTC_INTF_DEV=y
1162# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1163# CONFIG_RTC_DRV_TEST is not set
775 1164
776# 1165#
777# DMA Devices 1166# I2C RTC drivers
1167#
1168# CONFIG_RTC_DRV_DS1307 is not set
1169# CONFIG_RTC_DRV_DS1374 is not set
1170# CONFIG_RTC_DRV_DS1672 is not set
1171# CONFIG_RTC_DRV_MAX6900 is not set
1172# CONFIG_RTC_DRV_RS5C372 is not set
1173# CONFIG_RTC_DRV_ISL1208 is not set
1174# CONFIG_RTC_DRV_X1205 is not set
1175# CONFIG_RTC_DRV_PCF8563 is not set
1176# CONFIG_RTC_DRV_PCF8583 is not set
1177# CONFIG_RTC_DRV_M41T80 is not set
1178# CONFIG_RTC_DRV_BQ32K is not set
1179# CONFIG_RTC_DRV_S35390A is not set
1180# CONFIG_RTC_DRV_FM3130 is not set
1181# CONFIG_RTC_DRV_RX8581 is not set
1182# CONFIG_RTC_DRV_RX8025 is not set
1183
1184#
1185# SPI RTC drivers
1186#
1187
778# 1188#
1189# Platform RTC drivers
1190#
1191# CONFIG_RTC_DRV_CMOS is not set
1192# CONFIG_RTC_DRV_DS1286 is not set
1193# CONFIG_RTC_DRV_DS1511 is not set
1194# CONFIG_RTC_DRV_DS1553 is not set
1195# CONFIG_RTC_DRV_DS1742 is not set
1196# CONFIG_RTC_DRV_STK17TA8 is not set
1197# CONFIG_RTC_DRV_M48T86 is not set
1198CONFIG_RTC_DRV_M48T35=y
1199# CONFIG_RTC_DRV_M48T59 is not set
1200# CONFIG_RTC_DRV_MSM6242 is not set
1201# CONFIG_RTC_DRV_BQ4802 is not set
1202# CONFIG_RTC_DRV_RP5C01 is not set
1203# CONFIG_RTC_DRV_V3020 is not set
1204# CONFIG_RTC_DRV_WM8350 is not set
1205# CONFIG_RTC_DRV_PCF50633 is not set
1206CONFIG_RTC_DRV_AB3100=m
779 1207
780# 1208#
781# Userspace I/O 1209# on-CPU RTC drivers
782# 1210#
1211# CONFIG_DMADEVICES is not set
1212# CONFIG_AUXDISPLAY is not set
783CONFIG_UIO=y 1213CONFIG_UIO=y
784# CONFIG_UIO_CIF is not set 1214# CONFIG_UIO_CIF is not set
1215# CONFIG_UIO_PDRV is not set
1216# CONFIG_UIO_PDRV_GENIRQ is not set
1217CONFIG_UIO_SMX=m
1218CONFIG_UIO_AEC=m
1219CONFIG_UIO_SERCOS3=m
1220CONFIG_UIO_PCI_GENERIC=m
1221
1222#
1223# TI VLYNQ
1224#
1225# CONFIG_STAGING is not set
785 1226
786# 1227#
787# File systems 1228# File systems
@@ -792,36 +1233,58 @@ CONFIG_EXT2_FS_POSIX_ACL=y
792CONFIG_EXT2_FS_SECURITY=y 1233CONFIG_EXT2_FS_SECURITY=y
793# CONFIG_EXT2_FS_XIP is not set 1234# CONFIG_EXT2_FS_XIP is not set
794CONFIG_EXT3_FS=y 1235CONFIG_EXT3_FS=y
1236# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
795CONFIG_EXT3_FS_XATTR=y 1237CONFIG_EXT3_FS_XATTR=y
796CONFIG_EXT3_FS_POSIX_ACL=y 1238CONFIG_EXT3_FS_POSIX_ACL=y
797CONFIG_EXT3_FS_SECURITY=y 1239CONFIG_EXT3_FS_SECURITY=y
798# CONFIG_EXT4DEV_FS is not set 1240CONFIG_EXT4_FS=y
1241CONFIG_EXT4_FS_XATTR=y
1242CONFIG_EXT4_FS_POSIX_ACL=y
1243CONFIG_EXT4_FS_SECURITY=y
1244# CONFIG_EXT4_DEBUG is not set
799CONFIG_JBD=y 1245CONFIG_JBD=y
800CONFIG_JBD_DEBUG=y 1246CONFIG_JBD2=y
801CONFIG_FS_MBCACHE=y 1247CONFIG_FS_MBCACHE=y
802# CONFIG_REISERFS_FS is not set 1248# CONFIG_REISERFS_FS is not set
803# CONFIG_JFS_FS is not set 1249# CONFIG_JFS_FS is not set
804CONFIG_FS_POSIX_ACL=y 1250CONFIG_FS_POSIX_ACL=y
805CONFIG_XFS_FS=m 1251CONFIG_XFS_FS=m
806CONFIG_XFS_QUOTA=y 1252CONFIG_XFS_QUOTA=y
807CONFIG_XFS_SECURITY=y
808CONFIG_XFS_POSIX_ACL=y 1253CONFIG_XFS_POSIX_ACL=y
809# CONFIG_XFS_RT is not set 1254# CONFIG_XFS_RT is not set
1255# CONFIG_XFS_DEBUG is not set
810# CONFIG_GFS2_FS is not set 1256# CONFIG_GFS2_FS is not set
811# CONFIG_OCFS2_FS is not set 1257# CONFIG_OCFS2_FS is not set
812# CONFIG_MINIX_FS is not set 1258CONFIG_BTRFS_FS=m
813# CONFIG_ROMFS_FS is not set 1259CONFIG_BTRFS_FS_POSIX_ACL=y
1260# CONFIG_NILFS2_FS is not set
1261CONFIG_FILE_LOCKING=y
1262CONFIG_FSNOTIFY=y
1263CONFIG_DNOTIFY=y
814CONFIG_INOTIFY=y 1264CONFIG_INOTIFY=y
815CONFIG_INOTIFY_USER=y 1265CONFIG_INOTIFY_USER=y
816# CONFIG_QUOTA is not set 1266# CONFIG_QUOTA is not set
1267CONFIG_QUOTA_NETLINK_INTERFACE=y
817CONFIG_QUOTACTL=y 1268CONFIG_QUOTACTL=y
818CONFIG_DNOTIFY=y
819CONFIG_AUTOFS_FS=m 1269CONFIG_AUTOFS_FS=m
820# CONFIG_AUTOFS4_FS is not set 1270# CONFIG_AUTOFS4_FS is not set
821CONFIG_FUSE_FS=m 1271CONFIG_FUSE_FS=m
1272CONFIG_CUSE=m
822CONFIG_GENERIC_ACL=y 1273CONFIG_GENERIC_ACL=y
823 1274
824# 1275#
1276# Caches
1277#
1278CONFIG_FSCACHE=m
1279CONFIG_FSCACHE_STATS=y
1280# CONFIG_FSCACHE_HISTOGRAM is not set
1281# CONFIG_FSCACHE_DEBUG is not set
1282# CONFIG_FSCACHE_OBJECT_LIST is not set
1283CONFIG_CACHEFILES=m
1284# CONFIG_CACHEFILES_DEBUG is not set
1285# CONFIG_CACHEFILES_HISTOGRAM is not set
1286
1287#
825# CD-ROM/DVD Filesystems 1288# CD-ROM/DVD Filesystems
826# 1289#
827# CONFIG_ISO9660_FS is not set 1290# CONFIG_ISO9660_FS is not set
@@ -840,16 +1303,13 @@ CONFIG_GENERIC_ACL=y
840CONFIG_PROC_FS=y 1303CONFIG_PROC_FS=y
841CONFIG_PROC_KCORE=y 1304CONFIG_PROC_KCORE=y
842CONFIG_PROC_SYSCTL=y 1305CONFIG_PROC_SYSCTL=y
1306CONFIG_PROC_PAGE_MONITOR=y
843CONFIG_SYSFS=y 1307CONFIG_SYSFS=y
844CONFIG_TMPFS=y 1308CONFIG_TMPFS=y
845CONFIG_TMPFS_POSIX_ACL=y 1309CONFIG_TMPFS_POSIX_ACL=y
846# CONFIG_HUGETLB_PAGE is not set 1310# CONFIG_HUGETLB_PAGE is not set
847CONFIG_RAMFS=y
848CONFIG_CONFIGFS_FS=m 1311CONFIG_CONFIGFS_FS=m
849 1312CONFIG_MISC_FILESYSTEMS=y
850#
851# Miscellaneous filesystems
852#
853# CONFIG_ADFS_FS is not set 1313# CONFIG_ADFS_FS is not set
854# CONFIG_AFFS_FS is not set 1314# CONFIG_AFFS_FS is not set
855# CONFIG_ECRYPT_FS is not set 1315# CONFIG_ECRYPT_FS is not set
@@ -859,28 +1319,32 @@ CONFIG_CONFIGFS_FS=m
859# CONFIG_BFS_FS is not set 1319# CONFIG_BFS_FS is not set
860# CONFIG_EFS_FS is not set 1320# CONFIG_EFS_FS is not set
861# CONFIG_CRAMFS is not set 1321# CONFIG_CRAMFS is not set
1322CONFIG_SQUASHFS=m
1323# CONFIG_SQUASHFS_EMBEDDED is not set
1324CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
862# CONFIG_VXFS_FS is not set 1325# CONFIG_VXFS_FS is not set
1326# CONFIG_MINIX_FS is not set
1327CONFIG_OMFS_FS=m
863# CONFIG_HPFS_FS is not set 1328# CONFIG_HPFS_FS is not set
864# CONFIG_QNX4FS_FS is not set 1329# CONFIG_QNX4FS_FS is not set
1330# CONFIG_ROMFS_FS is not set
865# CONFIG_SYSV_FS is not set 1331# CONFIG_SYSV_FS is not set
866# CONFIG_UFS_FS is not set 1332# CONFIG_UFS_FS is not set
867 1333CONFIG_EXOFS_FS=m
868# 1334# CONFIG_EXOFS_DEBUG is not set
869# Network File Systems 1335CONFIG_NETWORK_FILESYSTEMS=y
870#
871CONFIG_NFS_FS=y 1336CONFIG_NFS_FS=y
872CONFIG_NFS_V3=y 1337CONFIG_NFS_V3=y
873# CONFIG_NFS_V3_ACL is not set 1338# CONFIG_NFS_V3_ACL is not set
874# CONFIG_NFS_V4 is not set 1339# CONFIG_NFS_V4 is not set
875# CONFIG_NFS_DIRECTIO is not set
876# CONFIG_NFSD is not set
877# CONFIG_ROOT_NFS is not set 1340# CONFIG_ROOT_NFS is not set
1341# CONFIG_NFSD is not set
878CONFIG_LOCKD=y 1342CONFIG_LOCKD=y
879CONFIG_LOCKD_V4=y 1343CONFIG_LOCKD_V4=y
1344CONFIG_EXPORTFS=m
880CONFIG_NFS_COMMON=y 1345CONFIG_NFS_COMMON=y
881CONFIG_SUNRPC=y 1346CONFIG_SUNRPC=y
882CONFIG_SUNRPC_GSS=y 1347CONFIG_SUNRPC_GSS=y
883# CONFIG_SUNRPC_BIND34 is not set
884CONFIG_RPCSEC_GSS_KRB5=y 1348CONFIG_RPCSEC_GSS_KRB5=y
885# CONFIG_RPCSEC_GSS_SPKM3 is not set 1349# CONFIG_RPCSEC_GSS_SPKM3 is not set
886# CONFIG_SMB_FS is not set 1350# CONFIG_SMB_FS is not set
@@ -910,35 +1374,37 @@ CONFIG_SGI_PARTITION=y
910# CONFIG_KARMA_PARTITION is not set 1374# CONFIG_KARMA_PARTITION is not set
911# CONFIG_EFI_PARTITION is not set 1375# CONFIG_EFI_PARTITION is not set
912# CONFIG_SYSV68_PARTITION is not set 1376# CONFIG_SYSV68_PARTITION is not set
913
914#
915# Native Language Support
916#
917# CONFIG_NLS is not set 1377# CONFIG_NLS is not set
918
919#
920# Distributed Lock Manager
921#
922CONFIG_DLM=m 1378CONFIG_DLM=m
923# CONFIG_DLM_DEBUG is not set 1379# CONFIG_DLM_DEBUG is not set
924 1380
925# 1381#
926# Profiling support
927#
928# CONFIG_PROFILING is not set
929
930#
931# Kernel hacking 1382# Kernel hacking
932# 1383#
933CONFIG_TRACE_IRQFLAGS_SUPPORT=y 1384CONFIG_TRACE_IRQFLAGS_SUPPORT=y
934# CONFIG_PRINTK_TIME is not set 1385# CONFIG_PRINTK_TIME is not set
1386CONFIG_ENABLE_WARN_DEPRECATED=y
935CONFIG_ENABLE_MUST_CHECK=y 1387CONFIG_ENABLE_MUST_CHECK=y
1388CONFIG_FRAME_WARN=2048
936# CONFIG_MAGIC_SYSRQ is not set 1389# CONFIG_MAGIC_SYSRQ is not set
1390# CONFIG_STRIP_ASM_SYMS is not set
937# CONFIG_UNUSED_SYMBOLS is not set 1391# CONFIG_UNUSED_SYMBOLS is not set
938# CONFIG_DEBUG_FS is not set 1392# CONFIG_DEBUG_FS is not set
939# CONFIG_HEADERS_CHECK is not set 1393# CONFIG_HEADERS_CHECK is not set
940# CONFIG_DEBUG_KERNEL is not set 1394# CONFIG_DEBUG_KERNEL is not set
941CONFIG_CROSSCOMPILE=y 1395# CONFIG_DEBUG_MEMORY_INIT is not set
1396# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1397# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1398CONFIG_HAVE_FUNCTION_TRACER=y
1399CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1400CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
1401CONFIG_HAVE_DYNAMIC_FTRACE=y
1402CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
1403CONFIG_TRACING_SUPPORT=y
1404# CONFIG_FTRACE is not set
1405# CONFIG_SAMPLES is not set
1406CONFIG_HAVE_ARCH_KGDB=y
1407CONFIG_EARLY_PRINTK=y
942# CONFIG_CMDLINE_BOOL is not set 1408# CONFIG_CMDLINE_BOOL is not set
943 1409
944# 1410#
@@ -947,65 +1413,140 @@ CONFIG_CROSSCOMPILE=y
947CONFIG_KEYS=y 1413CONFIG_KEYS=y
948CONFIG_KEYS_DEBUG_PROC_KEYS=y 1414CONFIG_KEYS_DEBUG_PROC_KEYS=y
949# CONFIG_SECURITY is not set 1415# CONFIG_SECURITY is not set
950CONFIG_XOR_BLOCKS=m 1416CONFIG_SECURITYFS=y
951CONFIG_ASYNC_CORE=m 1417# CONFIG_DEFAULT_SECURITY_SELINUX is not set
952CONFIG_ASYNC_MEMCPY=m 1418# CONFIG_DEFAULT_SECURITY_SMACK is not set
953CONFIG_ASYNC_XOR=m 1419# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
1420CONFIG_DEFAULT_SECURITY_DAC=y
1421CONFIG_DEFAULT_SECURITY=""
1422CONFIG_XOR_BLOCKS=y
1423CONFIG_ASYNC_CORE=y
1424CONFIG_ASYNC_MEMCPY=y
1425CONFIG_ASYNC_XOR=y
1426CONFIG_ASYNC_PQ=y
1427CONFIG_ASYNC_RAID6_RECOV=y
954CONFIG_CRYPTO=y 1428CONFIG_CRYPTO=y
1429
1430#
1431# Crypto core or helper
1432#
1433CONFIG_CRYPTO_FIPS=y
955CONFIG_CRYPTO_ALGAPI=y 1434CONFIG_CRYPTO_ALGAPI=y
956CONFIG_CRYPTO_ABLKCIPHER=m 1435CONFIG_CRYPTO_ALGAPI2=y
1436CONFIG_CRYPTO_AEAD=m
1437CONFIG_CRYPTO_AEAD2=y
957CONFIG_CRYPTO_BLKCIPHER=y 1438CONFIG_CRYPTO_BLKCIPHER=y
1439CONFIG_CRYPTO_BLKCIPHER2=y
958CONFIG_CRYPTO_HASH=y 1440CONFIG_CRYPTO_HASH=y
1441CONFIG_CRYPTO_HASH2=y
1442CONFIG_CRYPTO_RNG=m
1443CONFIG_CRYPTO_RNG2=y
1444CONFIG_CRYPTO_PCOMP=y
959CONFIG_CRYPTO_MANAGER=y 1445CONFIG_CRYPTO_MANAGER=y
1446CONFIG_CRYPTO_MANAGER2=y
1447CONFIG_CRYPTO_GF128MUL=m
1448CONFIG_CRYPTO_NULL=m
1449CONFIG_CRYPTO_WORKQUEUE=y
1450CONFIG_CRYPTO_CRYPTD=m
1451CONFIG_CRYPTO_AUTHENC=m
1452# CONFIG_CRYPTO_TEST is not set
1453
1454#
1455# Authenticated Encryption with Associated Data
1456#
1457CONFIG_CRYPTO_CCM=m
1458CONFIG_CRYPTO_GCM=m
1459CONFIG_CRYPTO_SEQIV=m
1460
1461#
1462# Block modes
1463#
1464CONFIG_CRYPTO_CBC=y
1465CONFIG_CRYPTO_CTR=m
1466CONFIG_CRYPTO_CTS=m
1467CONFIG_CRYPTO_ECB=m
1468CONFIG_CRYPTO_LRW=m
1469CONFIG_CRYPTO_PCBC=m
1470CONFIG_CRYPTO_XTS=m
1471
1472#
1473# Hash modes
1474#
960CONFIG_CRYPTO_HMAC=y 1475CONFIG_CRYPTO_HMAC=y
961CONFIG_CRYPTO_XCBC=m 1476CONFIG_CRYPTO_XCBC=m
962CONFIG_CRYPTO_NULL=m 1477CONFIG_CRYPTO_VMAC=m
1478
1479#
1480# Digest
1481#
1482CONFIG_CRYPTO_CRC32C=m
1483CONFIG_CRYPTO_GHASH=m
963CONFIG_CRYPTO_MD4=m 1484CONFIG_CRYPTO_MD4=m
964CONFIG_CRYPTO_MD5=y 1485CONFIG_CRYPTO_MD5=y
1486CONFIG_CRYPTO_MICHAEL_MIC=m
1487CONFIG_CRYPTO_RMD128=m
1488CONFIG_CRYPTO_RMD160=m
1489CONFIG_CRYPTO_RMD256=m
1490CONFIG_CRYPTO_RMD320=m
965CONFIG_CRYPTO_SHA1=m 1491CONFIG_CRYPTO_SHA1=m
966CONFIG_CRYPTO_SHA256=m 1492CONFIG_CRYPTO_SHA256=m
967CONFIG_CRYPTO_SHA512=m 1493CONFIG_CRYPTO_SHA512=m
968CONFIG_CRYPTO_WP512=m
969CONFIG_CRYPTO_TGR192=m 1494CONFIG_CRYPTO_TGR192=m
970CONFIG_CRYPTO_GF128MUL=m 1495CONFIG_CRYPTO_WP512=m
971CONFIG_CRYPTO_ECB=m 1496
972CONFIG_CRYPTO_CBC=y 1497#
973CONFIG_CRYPTO_PCBC=m 1498# Ciphers
974CONFIG_CRYPTO_LRW=m 1499#
975CONFIG_CRYPTO_CRYPTD=m
976CONFIG_CRYPTO_DES=y
977CONFIG_CRYPTO_FCRYPT=m
978CONFIG_CRYPTO_BLOWFISH=m
979CONFIG_CRYPTO_TWOFISH=m
980CONFIG_CRYPTO_TWOFISH_COMMON=m
981CONFIG_CRYPTO_SERPENT=m
982CONFIG_CRYPTO_AES=m 1500CONFIG_CRYPTO_AES=m
1501CONFIG_CRYPTO_ANUBIS=m
1502CONFIG_CRYPTO_ARC4=m
1503CONFIG_CRYPTO_BLOWFISH=m
1504CONFIG_CRYPTO_CAMELLIA=m
983CONFIG_CRYPTO_CAST5=m 1505CONFIG_CRYPTO_CAST5=m
984CONFIG_CRYPTO_CAST6=m 1506CONFIG_CRYPTO_CAST6=m
985CONFIG_CRYPTO_TEA=m 1507CONFIG_CRYPTO_DES=y
986CONFIG_CRYPTO_ARC4=m 1508CONFIG_CRYPTO_FCRYPT=m
987CONFIG_CRYPTO_KHAZAD=m 1509CONFIG_CRYPTO_KHAZAD=m
988CONFIG_CRYPTO_ANUBIS=m 1510CONFIG_CRYPTO_SALSA20=m
1511CONFIG_CRYPTO_SEED=m
1512CONFIG_CRYPTO_SERPENT=m
1513CONFIG_CRYPTO_TEA=m
1514CONFIG_CRYPTO_TWOFISH=m
1515CONFIG_CRYPTO_TWOFISH_COMMON=m
1516
1517#
1518# Compression
1519#
989CONFIG_CRYPTO_DEFLATE=m 1520CONFIG_CRYPTO_DEFLATE=m
990CONFIG_CRYPTO_MICHAEL_MIC=m 1521CONFIG_CRYPTO_ZLIB=m
991CONFIG_CRYPTO_CRC32C=m 1522CONFIG_CRYPTO_LZO=m
992CONFIG_CRYPTO_CAMELLIA=m 1523
993# CONFIG_CRYPTO_TEST is not set 1524#
1525# Random Number Generation
1526#
1527CONFIG_CRYPTO_ANSI_CPRNG=m
994CONFIG_CRYPTO_HW=y 1528CONFIG_CRYPTO_HW=y
1529CONFIG_CRYPTO_DEV_HIFN_795X=m
1530# CONFIG_CRYPTO_DEV_HIFN_795X_RNG is not set
1531# CONFIG_BINARY_PRINTF is not set
995 1532
996# 1533#
997# Library routines 1534# Library routines
998# 1535#
999CONFIG_BITREVERSE=y 1536CONFIG_BITREVERSE=y
1537CONFIG_GENERIC_FIND_LAST_BIT=y
1000CONFIG_CRC_CCITT=m 1538CONFIG_CRC_CCITT=m
1001# CONFIG_CRC16 is not set 1539CONFIG_CRC16=y
1002# CONFIG_CRC_ITU_T is not set 1540CONFIG_CRC_T10DIF=m
1541CONFIG_CRC_ITU_T=m
1003CONFIG_CRC32=y 1542CONFIG_CRC32=y
1004# CONFIG_CRC7 is not set 1543CONFIG_CRC7=m
1005CONFIG_LIBCRC32C=m 1544CONFIG_LIBCRC32C=m
1006CONFIG_ZLIB_INFLATE=m 1545CONFIG_ZLIB_INFLATE=m
1007CONFIG_ZLIB_DEFLATE=m 1546CONFIG_ZLIB_DEFLATE=m
1008CONFIG_PLIST=y 1547CONFIG_LZO_COMPRESS=m
1548CONFIG_LZO_DECOMPRESS=m
1009CONFIG_HAS_IOMEM=y 1549CONFIG_HAS_IOMEM=y
1010CONFIG_HAS_IOPORT=y 1550CONFIG_HAS_IOPORT=y
1011CONFIG_HAS_DMA=y 1551CONFIG_HAS_DMA=y
1552CONFIG_NLATTR=y
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 9c187a64649b..758ad426c57f 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -287,9 +287,9 @@ static inline int __cpu_has_fpu(void)
287static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) 287static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
288{ 288{
289#ifdef __NEED_VMBITS_PROBE 289#ifdef __NEED_VMBITS_PROBE
290 write_c0_entryhi(0x3ffffffffffff000ULL); 290 write_c0_entryhi(0x3fffffffffffe000ULL);
291 back_to_back_c0_hazard(); 291 back_to_back_c0_hazard();
292 c->vmbits = fls64(read_c0_entryhi() & 0x3ffffffffffff000ULL); 292 c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
293#endif 293#endif
294} 294}
295 295
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 338dfe8ed002..31b204b26ba0 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1501,6 +1501,7 @@ void __cpuinit per_cpu_trap_init(void)
1501 cp0_perfcount_irq = -1; 1501 cp0_perfcount_irq = -1;
1502 } else { 1502 } else {
1503 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; 1503 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
1504 cp0_compare_irq_shift = cp0_compare_irq;
1504 cp0_perfcount_irq = -1; 1505 cp0_perfcount_irq = -1;
1505 } 1506 }
1506 1507
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 94e05e5733c1..e06f1af760a7 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -174,7 +174,7 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
174 * Probe Octeon's caches 174 * Probe Octeon's caches
175 * 175 *
176 */ 176 */
177static void __devinit probe_octeon(void) 177static void __cpuinit probe_octeon(void)
178{ 178{
179 unsigned long icache_size; 179 unsigned long icache_size;
180 unsigned long dcache_size; 180 unsigned long dcache_size;
@@ -235,7 +235,7 @@ static void __devinit probe_octeon(void)
235 * Setup the Octeon cache flush routines 235 * Setup the Octeon cache flush routines
236 * 236 *
237 */ 237 */
238void __devinit octeon_cache_init(void) 238void __cpuinit octeon_cache_init(void)
239{ 239{
240 extern unsigned long ebase; 240 extern unsigned long ebase;
241 extern char except_vec2_octeon; 241 extern char except_vec2_octeon;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 102b2dfa542a..e716cafc346d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -155,7 +155,7 @@ static inline void setup_protection_map(void)
155 protection_map[15] = PAGE_SHARED; 155 protection_map[15] = PAGE_SHARED;
156} 156}
157 157
158void __devinit cpu_cache_init(void) 158void __cpuinit cpu_cache_init(void)
159{ 159{
160 if (cpu_has_3k_cache) { 160 if (cpu_has_3k_cache) {
161 extern void __weak r3k_cache_init(void); 161 extern void __weak r3k_cache_init(void);
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index e274fda329f4..127d732474bf 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/highmem.h> 2#include <linux/highmem.h>
3#include <linux/sched.h>
3#include <linux/smp.h> 4#include <linux/smp.h>
4#include <asm/fixmap.h> 5#include <asm/fixmap.h>
5#include <asm/tlbflush.h> 6#include <asm/tlbflush.h>
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index 46f00691f448..31e2583ec622 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -404,7 +404,7 @@ void __init sni_rm200_i8259_irqs(void)
404 if (!rm200_pic_master) 404 if (!rm200_pic_master)
405 return; 405 return;
406 rm200_pic_slave = ioremap_nocache(0x160000a0, 4); 406 rm200_pic_slave = ioremap_nocache(0x160000a0, 4);
407 if (!rm200_pic_master) { 407 if (!rm200_pic_slave) {
408 iounmap(rm200_pic_master); 408 iounmap(rm200_pic_master);
409 return; 409 return;
410 } 410 }
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 524d9352f17e..f388dc68f605 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,7 +18,6 @@ config PARISC
18 select BUG 18 select BUG
19 select HAVE_PERF_EVENTS 19 select HAVE_PERF_EVENTS
20 select GENERIC_ATOMIC64 if !64BIT 20 select GENERIC_ATOMIC64 if !64BIT
21 select HAVE_ARCH_TRACEHOOK
22 help 21 help
23 The PA-RISC microprocessor is designed by Hewlett-Packard and used 22 The PA-RISC microprocessor is designed by Hewlett-Packard and used
24 in many of their workstations & servers (HP9000 700 and 800 series, 23 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index f7064abc3bb6..9e74bfe071dc 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -18,7 +18,6 @@
18 18
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/cache.h> /* for L1_CACHE_BYTES */
22#include <asm/superio.h> 21#include <asm/superio.h>
23 22
24#define DEBUG_RESOURCES 0 23#define DEBUG_RESOURCES 0
@@ -123,6 +122,10 @@ static int __init pcibios_init(void)
123 } else { 122 } else {
124 printk(KERN_WARNING "pci_bios != NULL but init() is!\n"); 123 printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
125 } 124 }
125
126 /* Set the CLS for PCI as early as possible. */
127 pci_cache_line_size = pci_dfl_cache_line_size;
128
126 return 0; 129 return 0;
127} 130}
128 131
@@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
171 ** upper byte is PCI_LATENCY_TIMER. 174 ** upper byte is PCI_LATENCY_TIMER.
172 */ 175 */
173 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, 176 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
174 (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32))); 177 (0x80 << 8) | pci_cache_line_size);
175} 178}
176 179
177 180
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index fb37ac52e46c..35c827e94e31 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -468,7 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
468 recalc_sigpending(); 468 recalc_sigpending();
469 spin_unlock_irq(&current->sighand->siglock); 469 spin_unlock_irq(&current->sighand->siglock);
470 470
471 tracehook_signal_handler(sig, info, ka, regs, 0); 471 tracehook_signal_handler(sig, info, ka, regs,
472 test_thread_flag(TIF_SINGLESTEP) ||
473 test_thread_flag(TIF_BLOCKSTEP));
472 474
473 return 1; 475 return 1;
474} 476}
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index a3c11cac3d71..95ad9dad298e 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -495,9 +495,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
495 495
496 entry->nr = 0; 496 entry->nr = 0;
497 497
498 if (current->pid == 0) /* idle task? */
499 return entry;
500
501 if (!user_mode(regs)) { 498 if (!user_mode(regs)) {
502 perf_callchain_kernel(regs, entry); 499 perf_callchain_kernel(regs, entry);
503 if (current->mm) 500 if (current->mm)
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 1eb85fbf53a5..b6cf8f1f4d35 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -718,10 +718,10 @@ static int collect_events(struct perf_event *group, int max_count,
718 return n; 718 return n;
719} 719}
720 720
721static void event_sched_in(struct perf_event *event, int cpu) 721static void event_sched_in(struct perf_event *event)
722{ 722{
723 event->state = PERF_EVENT_STATE_ACTIVE; 723 event->state = PERF_EVENT_STATE_ACTIVE;
724 event->oncpu = cpu; 724 event->oncpu = smp_processor_id();
725 event->tstamp_running += event->ctx->time - event->tstamp_stopped; 725 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
726 if (is_software_event(event)) 726 if (is_software_event(event))
727 event->pmu->enable(event); 727 event->pmu->enable(event);
@@ -735,7 +735,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
735 */ 735 */
736int hw_perf_group_sched_in(struct perf_event *group_leader, 736int hw_perf_group_sched_in(struct perf_event *group_leader,
737 struct perf_cpu_context *cpuctx, 737 struct perf_cpu_context *cpuctx,
738 struct perf_event_context *ctx, int cpu) 738 struct perf_event_context *ctx)
739{ 739{
740 struct cpu_hw_events *cpuhw; 740 struct cpu_hw_events *cpuhw;
741 long i, n, n0; 741 long i, n, n0;
@@ -766,10 +766,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
766 cpuhw->event[i]->hw.config = cpuhw->events[i]; 766 cpuhw->event[i]->hw.config = cpuhw->events[i];
767 cpuctx->active_oncpu += n; 767 cpuctx->active_oncpu += n;
768 n = 1; 768 n = 1;
769 event_sched_in(group_leader, cpu); 769 event_sched_in(group_leader);
770 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { 770 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
771 if (sub->state != PERF_EVENT_STATE_OFF) { 771 if (sub->state != PERF_EVENT_STATE_OFF) {
772 event_sched_in(sub, cpu); 772 event_sched_in(sub);
773 ++n; 773 ++n;
774 } 774 }
775 } 775 }
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 282d9306361f..1ec06576f619 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
63 if (huge) { 63 if (huge) {
64#ifdef CONFIG_HUGETLB_PAGE 64#ifdef CONFIG_HUGETLB_PAGE
65 psize = get_slice_psize(mm, addr); 65 psize = get_slice_psize(mm, addr);
66 /* Mask the address for the correct page size */
67 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
66#else 68#else
67 BUG(); 69 BUG();
68 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
69#endif 71#endif
70 } else 72 } else {
71 psize = pte_pagesize_index(mm, addr, pte); 73 psize = pte_pagesize_index(mm, addr, pte);
74 /* Mask the address for the standard page size. If we
75 * have a 64k page kernel, but the hardware does not
76 * support 64k pages, this might be different from the
77 * hardware page size encoded in the slice table. */
78 addr &= PAGE_MASK;
79 }
72 80
73 /* Mask the address for the correct page size */
74 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
75 81
76 /* Build full vaddr */ 82 /* Build full vaddr */
77 if (!is_kernel_addr(addr)) { 83 if (!is_kernel_addr(addr)) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 21f61b8c445b..cc29c0f5300d 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void)
338 } 338 }
339 339
340 mpic = mpic_alloc(np, r.start, 340 mpic = mpic_alloc(np, r.start,
341 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 341 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
342 MPIC_BROKEN_FRR_NIRQS,
342 0, 256, " OpenPIC "); 343 0, 256, " OpenPIC ");
343 BUG_ON(mpic == NULL); 344 BUG_ON(mpic == NULL);
344 of_node_put(np); 345 of_node_put(np);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 04160a4cc699..a15f582300d8 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr)
46 __iomem u32 *bptr_vaddr; 46 __iomem u32 *bptr_vaddr;
47 struct device_node *np; 47 struct device_node *np;
48 int n = 0; 48 int n = 0;
49 int ioremappable;
49 50
50 WARN_ON (nr < 0 || nr >= NR_CPUS); 51 WARN_ON (nr < 0 || nr >= NR_CPUS);
51 52
@@ -59,21 +60,37 @@ smp_85xx_kick_cpu(int nr)
59 return; 60 return;
60 } 61 }
61 62
63 /*
64 * A secondary core could be in a spinloop in the bootpage
65 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
66 * The bootpage and highmem can be accessed via ioremap(), but
67 * we need to directly access the spinloop if its in lowmem.
68 */
69 ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
70
62 /* Map the spin table */ 71 /* Map the spin table */
63 bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); 72 if (ioremappable)
73 bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
74 else
75 bptr_vaddr = phys_to_virt(*cpu_rel_addr);
64 76
65 local_irq_save(flags); 77 local_irq_save(flags);
66 78
67 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); 79 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
68 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); 80 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
69 81
82 if (!ioremappable)
83 flush_dcache_range((ulong)bptr_vaddr,
84 (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
85
70 /* Wait a bit for the CPU to ack. */ 86 /* Wait a bit for the CPU to ack. */
71 while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) 87 while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
72 mdelay(1); 88 mdelay(1);
73 89
74 local_irq_restore(flags); 90 local_irq_restore(flags);
75 91
76 iounmap(bptr_vaddr); 92 if (ioremappable)
93 iounmap(bptr_vaddr);
77 94
78 pr_debug("waited %d msecs for CPU #%d.\n", n, nr); 95 pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
79} 96}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 1ee66db003be..f5f79196721c 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
784{ 784{
785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
786 786
787 BUG_ON(os_cppr->index != 0); 787 /*
788 * we only really want to set the priority when there's
789 * just one cppr value on the stack
790 */
791 WARN_ON(os_cppr->index != 0);
788 792
789 os_cppr->stack[os_cppr->index] = cppr; 793 os_cppr->stack[0] = cppr;
790 794
791 if (firmware_has_feature(FW_FEATURE_LPAR)) 795 if (firmware_has_feature(FW_FEATURE_LPAR))
792 lpar_cppr_info(cppr); 796 lpar_cppr_info(cppr);
@@ -821,8 +825,14 @@ void xics_setup_cpu(void)
821 825
822void xics_teardown_cpu(void) 826void xics_teardown_cpu(void)
823{ 827{
828 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
824 int cpu = smp_processor_id(); 829 int cpu = smp_processor_id();
825 830
831 /*
832 * we have to reset the cppr index to 0 because we're
833 * not going to return from the IPI
834 */
835 os_cppr->index = 0;
826 xics_set_cpu_priority(0); 836 xics_set_cpu_priority(0);
827 837
828 /* Clear any pending IPI request */ 838 /* Clear any pending IPI request */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index f2ef4b619ce1..c25dfac7dd76 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -293,12 +293,12 @@ struct _lowcore
293 __u64 clock_comparator; /* 0x02d0 */ 293 __u64 clock_comparator; /* 0x02d0 */
294 __u32 machine_flags; /* 0x02d8 */ 294 __u32 machine_flags; /* 0x02d8 */
295 __u32 ftrace_func; /* 0x02dc */ 295 __u32 ftrace_func; /* 0x02dc */
296 __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ 296 __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
297 297
298 /* Interrupt response block */ 298 /* Interrupt response block */
299 __u8 irb[64]; /* 0x0300 */ 299 __u8 irb[64]; /* 0x0300 */
300 300
301 __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ 301 __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
302 302
303 /* 303 /*
304 * 0xe00 contains the address of the IPL Parameter Information 304 * 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3f7e2a22c7c2..f6a389c996cb 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
132 mov #1, r5 132 mov #1, r5
133 133
134call_handle_tlbmiss: 134call_handle_tlbmiss:
135 setup_frame_reg
136 mov.l 1f, r0 135 mov.l 1f, r0
137 mov r5, r8 136 mov r5, r8
138 mov.l @r0, r6 137 mov.l @r0, r6
@@ -365,6 +364,8 @@ handle_exception:
365 mov.l @k2, k2 ! read out vector and keep in k2 364 mov.l @k2, k2 ! read out vector and keep in k2
366 365
367handle_exception_special: 366handle_exception_special:
367 setup_frame_reg
368
368 ! Setup return address and jump to exception handler 369 ! Setup return address and jump to exception handler
369 mov.l 7f, r9 ! fetch return address 370 mov.l 7f, r9 ! fetch return address
370 stc r2_bank, r0 ! k2 (vector) 371 stc r2_bank, r0 ! k2 (vector)
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 88d28ec3780a..e51168064e56 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
540 mempool_free(frame, dwarf_frame_pool); 540 mempool_free(frame, dwarf_frame_pool);
541} 541}
542 542
543extern void ret_from_irq(void);
544
543/** 545/**
544 * dwarf_unwind_stack - unwind the stack 546 * dwarf_unwind_stack - unwind the stack
545 * 547 *
@@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
678 addr = frame->cfa + reg->addr; 680 addr = frame->cfa + reg->addr;
679 frame->return_addr = __raw_readl(addr); 681 frame->return_addr = __raw_readl(addr);
680 682
683 /*
684 * Ah, the joys of unwinding through interrupts.
685 *
686 * Interrupts are tricky - the DWARF info needs to be _really_
687 * accurate and unfortunately I'm seeing a lot of bogus DWARF
688 * info. For example, I've seen interrupts occur in epilogues
689 * just after the frame pointer (r14) had been restored. The
690 * problem was that the DWARF info claimed that the CFA could be
691 * reached by using the value of the frame pointer before it was
692 * restored.
693 *
694 * So until the compiler can be trusted to produce reliable
695 * DWARF info when it really matters, let's stop unwinding once
696 * we've calculated the function that was interrupted.
697 */
698 if (prev && prev->pc == (unsigned long)ret_from_irq)
699 frame->return_addr = 0;
700
681 return frame; 701 return frame;
682 702
683bail: 703bail:
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index f0abd58c3a69..2b15ae60c3a0 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -70,8 +70,14 @@ ret_from_exception:
70 CFI_STARTPROC simple 70 CFI_STARTPROC simple
71 CFI_DEF_CFA r14, 0 71 CFI_DEF_CFA r14, 0
72 CFI_REL_OFFSET 17, 64 72 CFI_REL_OFFSET 17, 64
73 CFI_REL_OFFSET 15, 0 73 CFI_REL_OFFSET 15, 60
74 CFI_REL_OFFSET 14, 56 74 CFI_REL_OFFSET 14, 56
75 CFI_REL_OFFSET 13, 52
76 CFI_REL_OFFSET 12, 48
77 CFI_REL_OFFSET 11, 44
78 CFI_REL_OFFSET 10, 40
79 CFI_REL_OFFSET 9, 36
80 CFI_REL_OFFSET 8, 32
75 preempt_stop() 81 preempt_stop()
76ENTRY(ret_from_irq) 82ENTRY(ret_from_irq)
77 ! 83 !
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index 24ea837eac5b..a9dd3abde28e 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -68,9 +68,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
68 68
69 is_user = user_mode(regs); 69 is_user = user_mode(regs);
70 70
71 if (!current || current->pid == 0)
72 return;
73
74 if (is_user && current->state != TASK_RUNNING) 71 if (is_user && current->state != TASK_RUNNING)
75 return; 72 return;
76 73
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 873ebdc4f98e..b063eb8b18e3 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -133,6 +133,8 @@ void user_enable_single_step(struct task_struct *child)
133 struct pt_regs *regs = child->thread.uregs; 133 struct pt_regs *regs = child->thread.uregs;
134 134
135 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */ 135 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
136
137 set_tsk_thread_flag(child, TIF_SINGLESTEP);
136} 138}
137 139
138void user_disable_single_step(struct task_struct *child) 140void user_disable_single_step(struct task_struct *child)
@@ -140,6 +142,8 @@ void user_disable_single_step(struct task_struct *child)
140 struct pt_regs *regs = child->thread.uregs; 142 struct pt_regs *regs = child->thread.uregs;
141 143
142 regs->sr &= ~SR_SSTEP; 144 regs->sr &= ~SR_SSTEP;
145
146 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
143} 147}
144 148
145static int genregs_get(struct task_struct *target, 149static int genregs_get(struct task_struct *target,
@@ -454,6 +458,8 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
454 458
455asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) 459asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
456{ 460{
461 int step;
462
457 if (unlikely(current->audit_context)) 463 if (unlikely(current->audit_context))
458 audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]), 464 audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
459 regs->regs[9]); 465 regs->regs[9]);
@@ -461,8 +467,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
461 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 467 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
462 trace_sys_exit(regs, regs->regs[9]); 468 trace_sys_exit(regs, regs->regs[9]);
463 469
464 if (test_thread_flag(TIF_SYSCALL_TRACE)) 470 step = test_thread_flag(TIF_SINGLESTEP);
465 tracehook_report_syscall_exit(regs, 0); 471 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
472 tracehook_report_syscall_exit(regs, step);
466} 473}
467 474
468/* Called with interrupts disabled */ 475/* Called with interrupts disabled */
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index ce76dbdef294..580e97d46ca5 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -118,7 +118,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
118 * clear the TS_RESTORE_SIGMASK flag. 118 * clear the TS_RESTORE_SIGMASK flag.
119 */ 119 */
120 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 120 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
121 tracehook_signal_handler(signr, &info, &ka, regs, 0); 121
122 tracehook_signal_handler(signr, &info, &ka, regs,
123 test_thread_flag(TIF_SINGLESTEP));
122 return 1; 124 return 1;
123 } 125 }
124 } 126 }
diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/asm/stat.h
index 55db5eca08e2..39327d6a57eb 100644
--- a/arch/sparc/include/asm/stat.h
+++ b/arch/sparc/include/asm/stat.h
@@ -53,8 +53,8 @@ struct stat {
53 ino_t st_ino; 53 ino_t st_ino;
54 mode_t st_mode; 54 mode_t st_mode;
55 short st_nlink; 55 short st_nlink;
56 uid_t st_uid; 56 uid16_t st_uid;
57 gid_t st_gid; 57 gid16_t st_gid;
58 unsigned short st_rdev; 58 unsigned short st_rdev;
59 off_t st_size; 59 off_t st_size;
60 time_t st_atime; 60 time_t st_atime;
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 4248d969272f..5247283d1c03 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -11,6 +11,10 @@ static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
11{ 11{
12 unsigned long base = (unsigned long) tp; 12 unsigned long base = (unsigned long) tp;
13 13
14 /* Stack pointer must be 16-byte aligned. */
15 if (sp & (16UL - 1))
16 return false;
17
14 if (sp >= (base + sizeof(struct thread_info)) && 18 if (sp >= (base + sizeof(struct thread_info)) &&
15 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) 19 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
16 return true; 20 return true;
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 4c26eb59e742..53a58b349849 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -105,7 +105,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
105 105
106static int of_bus_ambapp_match(struct device_node *np) 106static int of_bus_ambapp_match(struct device_node *np)
107{ 107{
108 return !strcmp(np->name, "ambapp"); 108 return !strcmp(np->type, "ambapp");
109} 109}
110 110
111static void of_bus_ambapp_count_cells(struct device_node *child, 111static void of_bus_ambapp_count_cells(struct device_node *child,
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 539e83f8e087..592b03d85167 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -247,6 +247,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
247 struct pci_bus *bus, int devfn) 247 struct pci_bus *bus, int devfn)
248{ 248{
249 struct dev_archdata *sd; 249 struct dev_archdata *sd;
250 struct pci_slot *slot;
250 struct of_device *op; 251 struct of_device *op;
251 struct pci_dev *dev; 252 struct pci_dev *dev;
252 const char *type; 253 const char *type;
@@ -286,6 +287,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
286 dev->dev.bus = &pci_bus_type; 287 dev->dev.bus = &pci_bus_type;
287 dev->devfn = devfn; 288 dev->devfn = devfn;
288 dev->multifunction = 0; /* maybe a lie? */ 289 dev->multifunction = 0; /* maybe a lie? */
290 set_pcie_port_type(dev);
291
292 list_for_each_entry(slot, &dev->bus->slots, list)
293 if (PCI_SLOT(dev->devfn) == slot->number)
294 dev->slot = slot;
289 295
290 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); 296 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
291 dev->device = of_getintprop_default(node, "device-id", 0xffff); 297 dev->device = of_getintprop_default(node, "device-id", 0xffff);
@@ -322,6 +328,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
322 328
323 dev->current_state = 4; /* unknown power state */ 329 dev->current_state = 4; /* unknown power state */
324 dev->error_state = pci_channel_io_normal; 330 dev->error_state = pci_channel_io_normal;
331 dev->dma_mask = 0xffffffff;
325 332
326 if (!strcmp(node->name, "pci")) { 333 if (!strcmp(node->name, "pci")) {
327 /* a PCI-PCI bridge */ 334 /* a PCI-PCI bridge */
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e856456ec02f..9f2b2bac8b2b 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -980,10 +980,10 @@ static int collect_events(struct perf_event *group, int max_count,
980 return n; 980 return n;
981} 981}
982 982
983static void event_sched_in(struct perf_event *event, int cpu) 983static void event_sched_in(struct perf_event *event)
984{ 984{
985 event->state = PERF_EVENT_STATE_ACTIVE; 985 event->state = PERF_EVENT_STATE_ACTIVE;
986 event->oncpu = cpu; 986 event->oncpu = smp_processor_id();
987 event->tstamp_running += event->ctx->time - event->tstamp_stopped; 987 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
988 if (is_software_event(event)) 988 if (is_software_event(event))
989 event->pmu->enable(event); 989 event->pmu->enable(event);
@@ -991,7 +991,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
991 991
992int hw_perf_group_sched_in(struct perf_event *group_leader, 992int hw_perf_group_sched_in(struct perf_event *group_leader,
993 struct perf_cpu_context *cpuctx, 993 struct perf_cpu_context *cpuctx,
994 struct perf_event_context *ctx, int cpu) 994 struct perf_event_context *ctx)
995{ 995{
996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
997 struct perf_event *sub; 997 struct perf_event *sub;
@@ -1015,10 +1015,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
1015 1015
1016 cpuctx->active_oncpu += n; 1016 cpuctx->active_oncpu += n;
1017 n = 1; 1017 n = 1;
1018 event_sched_in(group_leader, cpu); 1018 event_sched_in(group_leader);
1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { 1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1020 if (sub->state != PERF_EVENT_STATE_OFF) { 1020 if (sub->state != PERF_EVENT_STATE_OFF) {
1021 event_sched_in(sub, cpu); 1021 event_sched_in(sub);
1022 n++; 1022 n++;
1023 } 1023 }
1024 } 1024 }
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 2830b415e214..c49865b30719 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
526 * Set some valid stack frames to give to the child. 526 * Set some valid stack frames to give to the child.
527 */ 527 */
528 childstack = (struct sparc_stackf __user *) 528 childstack = (struct sparc_stackf __user *)
529 (sp & ~0x7UL); 529 (sp & ~0xfUL);
530 parentstack = (struct sparc_stackf __user *) 530 parentstack = (struct sparc_stackf __user *)
531 regs->u_regs[UREG_FP]; 531 regs->u_regs[UREG_FP];
532 532
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index c3f1cce0e95e..cb70476bd8f5 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
398 } else 398 } else
399 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 399 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
400 400
401 /* Now 8-byte align the stack as this is mandatory in the 401 /* Now align the stack as this is mandatory in the Sparc ABI
402 * Sparc ABI due to how register windows work. This hides 402 * due to how register windows work. This hides the
403 * the restriction from thread libraries etc. -DaveM 403 * restriction from thread libraries etc.
404 */ 404 */
405 csp &= ~7UL; 405 csp &= ~15UL;
406 406
407 distance = fp - psp; 407 distance = fp - psp;
408 rval = (csp - distance); 408 rval = (csp - distance);
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ba5b09ad6666..ea22cd373c64 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -120,8 +120,8 @@ struct rt_signal_frame32 {
120}; 120};
121 121
122/* Align macros */ 122/* Align macros */
123#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7))) 123#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
124#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7))) 124#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
125 125
126int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) 126int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
127{ 127{
@@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
420 sp = current->sas_ss_sp + current->sas_ss_size; 420 sp = current->sas_ss_sp + current->sas_ss_size;
421 } 421 }
422 422
423 sp -= framesize;
424
423 /* Always align the stack frame. This handles two cases. First, 425 /* Always align the stack frame. This handles two cases. First,
424 * sigaltstack need not be mindful of platform specific stack 426 * sigaltstack need not be mindful of platform specific stack
425 * alignment. Second, if we took this signal because the stack 427 * alignment. Second, if we took this signal because the stack
426 * is not aligned properly, we'd like to take the signal cleanly 428 * is not aligned properly, we'd like to take the signal cleanly
427 * and report that. 429 * and report that.
428 */ 430 */
429 sp &= ~7UL; 431 sp &= ~15UL;
430 432
431 return (void __user *)(sp - framesize); 433 return (void __user *) sp;
432} 434}
433 435
434static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) 436static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 7ce1a1005b1d..9882df92ba0a 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
267 sp = current->sas_ss_sp + current->sas_ss_size; 267 sp = current->sas_ss_sp + current->sas_ss_size;
268 } 268 }
269 269
270 sp -= framesize;
271
270 /* Always align the stack frame. This handles two cases. First, 272 /* Always align the stack frame. This handles two cases. First,
271 * sigaltstack need not be mindful of platform specific stack 273 * sigaltstack need not be mindful of platform specific stack
272 * alignment. Second, if we took this signal because the stack 274 * alignment. Second, if we took this signal because the stack
273 * is not aligned properly, we'd like to take the signal cleanly 275 * is not aligned properly, we'd like to take the signal cleanly
274 * and report that. 276 * and report that.
275 */ 277 */
276 sp &= ~7UL; 278 sp &= ~15UL;
277 279
278 return (void __user *)(sp - framesize); 280 return (void __user *) sp;
279} 281}
280 282
281static inline int 283static inline int
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 647afbda7ae1..9fa48c30037e 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -353,7 +353,7 @@ segv:
353/* Checks if the fp is valid */ 353/* Checks if the fp is valid */
354static int invalid_frame_pointer(void __user *fp, int fplen) 354static int invalid_frame_pointer(void __user *fp, int fplen)
355{ 355{
356 if (((unsigned long) fp) & 7) 356 if (((unsigned long) fp) & 15)
357 return 1; 357 return 1;
358 return 0; 358 return 0;
359} 359}
@@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
396 sp = current->sas_ss_sp + current->sas_ss_size; 396 sp = current->sas_ss_sp + current->sas_ss_size;
397 } 397 }
398 398
399 sp -= framesize;
400
399 /* Always align the stack frame. This handles two cases. First, 401 /* Always align the stack frame. This handles two cases. First,
400 * sigaltstack need not be mindful of platform specific stack 402 * sigaltstack need not be mindful of platform specific stack
401 * alignment. Second, if we took this signal because the stack 403 * alignment. Second, if we took this signal because the stack
402 * is not aligned properly, we'd like to take the signal cleanly 404 * is not aligned properly, we'd like to take the signal cleanly
403 * and report that. 405 * and report that.
404 */ 406 */
405 sp &= ~7UL; 407 sp &= ~15UL;
406 408
407 return (void __user *)(sp - framesize); 409 return (void __user *) sp;
408} 410}
409 411
410static inline void 412static inline void
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 8c91d9b29a2f..db15d123f054 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -191,10 +191,12 @@ tsb_dtlb_load:
191 191
192tsb_itlb_load: 192tsb_itlb_load:
193 /* Executable bit must be set. */ 193 /* Executable bit must be set. */
194661: andcc %g5, _PAGE_EXEC_4U, %g0 194661: sethi %hi(_PAGE_EXEC_4U), %g4
195 .section .sun4v_1insn_patch, "ax" 195 andcc %g5, %g4, %g0
196 .section .sun4v_2insn_patch, "ax"
196 .word 661b 197 .word 661b
197 andcc %g5, _PAGE_EXEC_4V, %g0 198 andcc %g5, _PAGE_EXEC_4V, %g0
199 nop
198 .previous 200 .previous
199 201
200 be,pn %xcc, tsb_do_fault 202 be,pn %xcc, tsb_do_fault
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 69b74a7b877f..ac80b7d70014 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -65,12 +65,17 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
65 void *text, void *text_end); 65 void *text, void *text_end);
66extern void alternatives_smp_module_del(struct module *mod); 66extern void alternatives_smp_module_del(struct module *mod);
67extern void alternatives_smp_switch(int smp); 67extern void alternatives_smp_switch(int smp);
68extern int alternatives_text_reserved(void *start, void *end);
68#else 69#else
69static inline void alternatives_smp_module_add(struct module *mod, char *name, 70static inline void alternatives_smp_module_add(struct module *mod, char *name,
70 void *locks, void *locks_end, 71 void *locks, void *locks_end,
71 void *text, void *text_end) {} 72 void *text, void *text_end) {}
72static inline void alternatives_smp_module_del(struct module *mod) {} 73static inline void alternatives_smp_module_del(struct module *mod) {}
73static inline void alternatives_smp_switch(int smp) {} 74static inline void alternatives_smp_switch(int smp) {}
75static inline int alternatives_text_reserved(void *start, void *end)
76{
77 return 0;
78}
74#endif /* CONFIG_SMP */ 79#endif /* CONFIG_SMP */
75 80
76/* alternative assembly primitive: */ 81/* alternative assembly primitive: */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 8240f76b531e..b81002f23614 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -14,6 +14,9 @@
14 which debugging register was responsible for the trap. The other bits 14 which debugging register was responsible for the trap. The other bits
15 are either reserved or not of interest to us. */ 15 are either reserved or not of interest to us. */
16 16
17/* Define reserved bits in DR6 which are always set to 1 */
18#define DR6_RESERVED (0xFFFF0FF0)
19
17#define DR_TRAP0 (0x1) /* db0 */ 20#define DR_TRAP0 (0x1) /* db0 */
18#define DR_TRAP1 (0x2) /* db1 */ 21#define DR_TRAP1 (0x2) /* db1 */
19#define DR_TRAP2 (0x4) /* db2 */ 22#define DR_TRAP2 (0x4) /* db2 */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1994d3f58443..f2ad2163109d 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -170,10 +170,7 @@ static inline void elf_common_init(struct thread_struct *t,
170} 170}
171 171
172#define ELF_PLAT_INIT(_r, load_addr) \ 172#define ELF_PLAT_INIT(_r, load_addr) \
173do { \ 173 elf_common_init(&current->thread, _r, 0)
174 elf_common_init(&current->thread, _r, 0); \
175 clear_thread_flag(TIF_IA32); \
176} while (0)
177 174
178#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ 175#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
179 elf_common_init(&current->thread, regs, __USER_DS) 176 elf_common_init(&current->thread, regs, __USER_DS)
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 0675a7c4c20e..2a1bd8f4f23a 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -10,7 +10,6 @@
10 * (display/resolving) 10 * (display/resolving)
11 */ 11 */
12struct arch_hw_breakpoint { 12struct arch_hw_breakpoint {
13 char *name; /* Contains name of the symbol to set bkpt */
14 unsigned long address; 13 unsigned long address;
15 u8 len; 14 u8 len;
16 u8 type; 15 u8 type;
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 139d4c1a33a7..93da9c3f3341 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -19,7 +19,6 @@ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
19extern int check_nmi_watchdog(void); 19extern int check_nmi_watchdog(void);
20extern int nmi_watchdog_enabled; 20extern int nmi_watchdog_enabled;
21extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 21extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
22extern int avail_to_resrv_perfctr_nmi(unsigned int);
23extern int reserve_perfctr_nmi(unsigned int); 22extern int reserve_perfctr_nmi(unsigned int);
24extern void release_perfctr_nmi(unsigned int); 23extern void release_perfctr_nmi(unsigned int);
25extern int reserve_evntsel_nmi(unsigned int); 24extern int reserve_evntsel_nmi(unsigned int);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 1380367dabd9..db6109a885a7 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -18,7 +18,7 @@
18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20 20
21#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) 21#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22)
22#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) 22#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
23#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) 23#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
24#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) 24#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
@@ -27,7 +27,14 @@
27/* 27/*
28 * Includes eventsel and unit mask as well: 28 * Includes eventsel and unit mask as well:
29 */ 29 */
30#define ARCH_PERFMON_EVENT_MASK 0xffff 30
31
32#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
33#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
34#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
35#define INTEL_ARCH_INV_MASK 0x00800000ULL
36#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
37#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
31 38
32/* 39/*
33 * filter mask to validate fixed counter events. 40 * filter mask to validate fixed counter events.
@@ -38,7 +45,12 @@
38 * The other filters are supported by fixed counters. 45 * The other filters are supported by fixed counters.
39 * The any-thread option is supported starting with v3. 46 * The any-thread option is supported starting with v3.
40 */ 47 */
41#define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000 48#define INTEL_ARCH_FIXED_MASK \
49 (INTEL_ARCH_CNT_MASK| \
50 INTEL_ARCH_INV_MASK| \
51 INTEL_ARCH_EDGE_MASK|\
52 INTEL_ARCH_UNIT_MASK|\
53 INTEL_ARCH_EVENT_MASK)
42 54
43#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
44#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 56#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -105,6 +117,18 @@ union cpuid10_edx {
105 */ 117 */
106#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 118#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
107 119
120/* IbsFetchCtl bits/masks */
121#define IBS_FETCH_RAND_EN (1ULL<<57)
122#define IBS_FETCH_VAL (1ULL<<49)
123#define IBS_FETCH_ENABLE (1ULL<<48)
124#define IBS_FETCH_CNT 0xFFFF0000ULL
125#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
126
127/* IbsOpCtl bits */
128#define IBS_OP_CNT_CTL (1ULL<<19)
129#define IBS_OP_VAL (1ULL<<18)
130#define IBS_OP_ENABLE (1ULL<<17)
131#define IBS_OP_MAX_CNT 0x0000FFFFULL
108 132
109#ifdef CONFIG_PERF_EVENTS 133#ifdef CONFIG_PERF_EVENTS
110extern void init_hw_perf_events(void); 134extern void init_hw_perf_events(void);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 9d369f680321..20102808b191 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -274,10 +274,6 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
274 return 0; 274 return 0;
275} 275}
276 276
277/* Get Nth argument at function call */
278extern unsigned long regs_get_argument_nth(struct pt_regs *regs,
279 unsigned int n);
280
281/* 277/*
282 * These are defined as per linux/ptrace.h, which see. 278 * These are defined as per linux/ptrace.h, which see.
283 */ 279 */
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 35e89122a42f..4dab78edbad9 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -3,8 +3,6 @@
3 3
4extern int kstack_depth_to_print; 4extern int kstack_depth_to_print;
5 5
6int x86_is_stack_id(int id, char *name);
7
8struct thread_info; 6struct thread_info;
9struct stacktrace_ops; 7struct stacktrace_ops;
10 8
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index ecb544e65382..e04740f7a0bb 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -11,9 +11,9 @@
11#include <linux/irqflags.h> 11#include <linux/irqflags.h>
12 12
13/* entries in ARCH_DLINFO: */ 13/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION 14#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
15# define AT_VECTOR_SIZE_ARCH 2 15# define AT_VECTOR_SIZE_ARCH 2
16#else 16#else /* else it's non-compat x86-64 */
17# define AT_VECTOR_SIZE_ARCH 1 17# define AT_VECTOR_SIZE_ARCH 1
18#endif 18#endif
19 19
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 036d28adf59d..af1c5833ff23 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void)
1185 if (!error) { 1185 if (!error) {
1186 acpi_lapic = 1; 1186 acpi_lapic = 1;
1187 1187
1188#ifdef CONFIG_X86_BIGSMP
1189 generic_bigsmp_probe();
1190#endif
1191 /* 1188 /*
1192 * Parse MADT IO-APIC entries 1189 * Parse MADT IO-APIC entries
1193 */ 1190 */
@@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void)
1197 acpi_ioapic = 1; 1194 acpi_ioapic = 1;
1198 1195
1199 smp_found_config = 1; 1196 smp_found_config = 1;
1200 if (apic->setup_apic_routing)
1201 apic->setup_apic_routing();
1202 } 1197 }
1203 } 1198 }
1204 if (error == -EINVAL) { 1199 if (error == -EINVAL) {
@@ -1349,14 +1344,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
1349 }, 1344 },
1350 { 1345 {
1351 .callback = force_acpi_ht, 1346 .callback = force_acpi_ht,
1352 .ident = "ASUS P2B-DS",
1353 .matches = {
1354 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1355 DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
1356 },
1357 },
1358 {
1359 .callback = force_acpi_ht,
1360 .ident = "ASUS CUR-DLS", 1347 .ident = "ASUS CUR-DLS",
1361 .matches = { 1348 .matches = {
1362 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 1349 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index de7353c0ce9c..e63b80e5861c 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -390,6 +390,24 @@ void alternatives_smp_switch(int smp)
390 mutex_unlock(&smp_alt); 390 mutex_unlock(&smp_alt);
391} 391}
392 392
393/* Return 1 if the address range is reserved for smp-alternatives */
394int alternatives_text_reserved(void *start, void *end)
395{
396 struct smp_alt_module *mod;
397 u8 **ptr;
398 u8 *text_start = start;
399 u8 *text_end = end;
400
401 list_for_each_entry(mod, &smp_alt_modules, next) {
402 if (mod->text > text_end || mod->text_end < text_start)
403 continue;
404 for (ptr = mod->locks; ptr < mod->locks_end; ptr++)
405 if (text_start <= *ptr && text_end >= *ptr)
406 return 1;
407 }
408
409 return 0;
410}
393#endif 411#endif
394 412
395#ifdef CONFIG_PARAVIRT 413#ifdef CONFIG_PARAVIRT
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 3987e4408f75..dfca210f6a10 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void)
1641#endif 1641#endif
1642 1642
1643 enable_IR_x2apic(); 1643 enable_IR_x2apic();
1644#ifdef CONFIG_X86_64
1645 default_setup_apic_routing(); 1644 default_setup_apic_routing();
1646#endif
1647 1645
1648 verify_local_APIC(); 1646 verify_local_APIC();
1649 connect_bsp_APIC(); 1647 connect_bsp_APIC();
@@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1891 if (apicid > max_physical_apicid) 1889 if (apicid > max_physical_apicid)
1892 max_physical_apicid = apicid; 1890 max_physical_apicid = apicid;
1893 1891
1894#ifdef CONFIG_X86_32
1895 if (num_processors > 8) {
1896 switch (boot_cpu_data.x86_vendor) {
1897 case X86_VENDOR_INTEL:
1898 if (!APIC_XAPIC(version)) {
1899 def_to_bigsmp = 0;
1900 break;
1901 }
1902 /* If P4 and above fall through */
1903 case X86_VENDOR_AMD:
1904 def_to_bigsmp = 1;
1905 }
1906 }
1907#endif
1908
1909#if defined(CONFIG_SMP) || defined(CONFIG_X86_64) 1892#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
1910 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; 1893 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1911 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; 1894 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 1a6559f6768c..99d2fe016084 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -52,7 +52,32 @@ static int __init print_ipi_mode(void)
52} 52}
53late_initcall(print_ipi_mode); 53late_initcall(print_ipi_mode);
54 54
55void default_setup_apic_routing(void) 55void __init default_setup_apic_routing(void)
56{
57 int version = apic_version[boot_cpu_physical_apicid];
58
59 if (num_possible_cpus() > 8) {
60 switch (boot_cpu_data.x86_vendor) {
61 case X86_VENDOR_INTEL:
62 if (!APIC_XAPIC(version)) {
63 def_to_bigsmp = 0;
64 break;
65 }
66 /* If P4 and above fall through */
67 case X86_VENDOR_AMD:
68 def_to_bigsmp = 1;
69 }
70 }
71
72#ifdef CONFIG_X86_BIGSMP
73 generic_bigsmp_probe();
74#endif
75
76 if (apic->setup_apic_routing)
77 apic->setup_apic_routing();
78}
79
80static void setup_apic_flat_routing(void)
56{ 81{
57#ifdef CONFIG_X86_IO_APIC 82#ifdef CONFIG_X86_IO_APIC
58 printk(KERN_INFO 83 printk(KERN_INFO
@@ -103,7 +128,7 @@ struct apic apic_default = {
103 .init_apic_ldr = default_init_apic_ldr, 128 .init_apic_ldr = default_init_apic_ldr,
104 129
105 .ioapic_phys_id_map = default_ioapic_phys_id_map, 130 .ioapic_phys_id_map = default_ioapic_phys_id_map,
106 .setup_apic_routing = default_setup_apic_routing, 131 .setup_apic_routing = setup_apic_flat_routing,
107 .multi_timer_check = NULL, 132 .multi_timer_check = NULL,
108 .apicid_to_node = default_apicid_to_node, 133 .apicid_to_node = default_apicid_to_node,
109 .cpu_to_logical_apicid = default_cpu_to_logical_apicid, 134 .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 450fe2064a14..83e9be4778e2 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void)
67 } 67 }
68#endif 68#endif
69 69
70 if (apic == &apic_flat && num_processors > 8) 70 if (apic == &apic_flat && num_possible_cpus() > 8)
71 apic = &apic_physflat; 71 apic = &apic_physflat;
72 72
73 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); 73 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index f125e5c551c0..6e44519960c8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1356 1356
1357 kfree(data->powernow_table); 1357 kfree(data->powernow_table);
1358 kfree(data); 1358 kfree(data);
1359 per_cpu(powernow_data, pol->cpu) = NULL;
1359 1360
1360 return 0; 1361 return 0;
1361} 1362}
@@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
1375 int err; 1376 int err;
1376 1377
1377 if (!data) 1378 if (!data)
1378 return -EINVAL; 1379 return 0;
1379 1380
1380 smp_call_function_single(cpu, query_values_on_cpu, &err, true); 1381 smp_call_function_single(cpu, query_values_on_cpu, &err, true);
1381 if (err) 1382 if (err)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8c1c07073ccc..bfc43fa208bc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -7,6 +7,7 @@
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
10 * 11 *
11 * For licencing details see kernel-base/COPYING 12 * For licencing details see kernel-base/COPYING
12 */ 13 */
@@ -22,6 +23,7 @@
22#include <linux/uaccess.h> 23#include <linux/uaccess.h>
23#include <linux/highmem.h> 24#include <linux/highmem.h>
24#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/bitops.h>
25 27
26#include <asm/apic.h> 28#include <asm/apic.h>
27#include <asm/stacktrace.h> 29#include <asm/stacktrace.h>
@@ -68,26 +70,59 @@ struct debug_store {
68 u64 pebs_event_reset[MAX_PEBS_EVENTS]; 70 u64 pebs_event_reset[MAX_PEBS_EVENTS];
69}; 71};
70 72
73struct event_constraint {
74 union {
75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76 u64 idxmsk64;
77 };
78 u64 code;
79 u64 cmask;
80 int weight;
81};
82
83struct amd_nb {
84 int nb_id; /* NorthBridge id */
85 int refcnt; /* reference count */
86 struct perf_event *owners[X86_PMC_IDX_MAX];
87 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
88};
89
71struct cpu_hw_events { 90struct cpu_hw_events {
72 struct perf_event *events[X86_PMC_IDX_MAX]; 91 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 92 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
75 unsigned long interrupts; 93 unsigned long interrupts;
76 int enabled; 94 int enabled;
77 struct debug_store *ds; 95 struct debug_store *ds;
78};
79 96
80struct event_constraint { 97 int n_events;
81 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 98 int n_added;
82 int code; 99 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
100 u64 tags[X86_PMC_IDX_MAX];
101 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
102 struct amd_nb *amd_nb;
83}; 103};
84 104
85#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) } 105#define __EVENT_CONSTRAINT(c, n, m, w) {\
86#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 } 106 { .idxmsk64 = (n) }, \
107 .code = (c), \
108 .cmask = (m), \
109 .weight = (w), \
110}
111
112#define EVENT_CONSTRAINT(c, n, m) \
113 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
87 114
88#define for_each_event_constraint(e, c) \ 115#define INTEL_EVENT_CONSTRAINT(c, n) \
89 for ((e) = (c); (e)->idxmsk[0]; (e)++) 116 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
90 117
118#define FIXED_EVENT_CONSTRAINT(c, n) \
119 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
120
121#define EVENT_CONSTRAINT_END \
122 EVENT_CONSTRAINT(0, 0, 0)
123
124#define for_each_event_constraint(e, c) \
125 for ((e) = (c); (e)->cmask; (e)++)
91 126
92/* 127/*
93 * struct x86_pmu - generic x86 pmu 128 * struct x86_pmu - generic x86 pmu
@@ -114,8 +149,14 @@ struct x86_pmu {
114 u64 intel_ctrl; 149 u64 intel_ctrl;
115 void (*enable_bts)(u64 config); 150 void (*enable_bts)(u64 config);
116 void (*disable_bts)(void); 151 void (*disable_bts)(void);
117 int (*get_event_idx)(struct cpu_hw_events *cpuc, 152
118 struct hw_perf_event *hwc); 153 struct event_constraint *
154 (*get_event_constraints)(struct cpu_hw_events *cpuc,
155 struct perf_event *event);
156
157 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
158 struct perf_event *event);
159 struct event_constraint *event_constraints;
119}; 160};
120 161
121static struct x86_pmu x86_pmu __read_mostly; 162static struct x86_pmu x86_pmu __read_mostly;
@@ -124,111 +165,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
124 .enabled = 1, 165 .enabled = 1,
125}; 166};
126 167
127static const struct event_constraint *event_constraints; 168static int x86_perf_event_set_period(struct perf_event *event,
128 169 struct hw_perf_event *hwc, int idx);
129/*
130 * Not sure about some of these
131 */
132static const u64 p6_perfmon_event_map[] =
133{
134 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
135 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
136 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
137 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
138 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
139 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
140 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
141};
142
143static u64 p6_pmu_event_map(int hw_event)
144{
145 return p6_perfmon_event_map[hw_event];
146}
147
148/*
149 * Event setting that is specified not to count anything.
150 * We use this to effectively disable a counter.
151 *
152 * L2_RQSTS with 0 MESI unit mask.
153 */
154#define P6_NOP_EVENT 0x0000002EULL
155
156static u64 p6_pmu_raw_event(u64 hw_event)
157{
158#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
159#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
160#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
161#define P6_EVNTSEL_INV_MASK 0x00800000ULL
162#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
163
164#define P6_EVNTSEL_MASK \
165 (P6_EVNTSEL_EVENT_MASK | \
166 P6_EVNTSEL_UNIT_MASK | \
167 P6_EVNTSEL_EDGE_MASK | \
168 P6_EVNTSEL_INV_MASK | \
169 P6_EVNTSEL_REG_MASK)
170
171 return hw_event & P6_EVNTSEL_MASK;
172}
173
174static const struct event_constraint intel_p6_event_constraints[] =
175{
176 EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
177 EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
178 EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
179 EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
180 EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
181 EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
182 EVENT_CONSTRAINT_END
183};
184
185/*
186 * Intel PerfMon v3. Used on Core2 and later.
187 */
188static const u64 intel_perfmon_event_map[] =
189{
190 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
191 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
192 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
193 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
194 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
195 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
196 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
197};
198
199static const struct event_constraint intel_core_event_constraints[] =
200{
201 EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
202 EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
203 EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
204 EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
205 EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
206 EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
207 EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
208 EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
209 EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
210 EVENT_CONSTRAINT_END
211};
212
213static const struct event_constraint intel_nehalem_event_constraints[] =
214{
215 EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
216 EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
217 EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
218 EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
219 EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
220 EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
221 EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
222 EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
223 EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
224 EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
225 EVENT_CONSTRAINT_END
226};
227
228static u64 intel_pmu_event_map(int hw_event)
229{
230 return intel_perfmon_event_map[hw_event];
231}
232 170
233/* 171/*
234 * Generalized hw caching related hw_event table, filled 172 * Generalized hw caching related hw_event table, filled
@@ -245,424 +183,6 @@ static u64 __read_mostly hw_cache_event_ids
245 [PERF_COUNT_HW_CACHE_OP_MAX] 183 [PERF_COUNT_HW_CACHE_OP_MAX]
246 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 184 [PERF_COUNT_HW_CACHE_RESULT_MAX];
247 185
248static __initconst u64 nehalem_hw_cache_event_ids
249 [PERF_COUNT_HW_CACHE_MAX]
250 [PERF_COUNT_HW_CACHE_OP_MAX]
251 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
252{
253 [ C(L1D) ] = {
254 [ C(OP_READ) ] = {
255 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
256 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
257 },
258 [ C(OP_WRITE) ] = {
259 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
260 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
261 },
262 [ C(OP_PREFETCH) ] = {
263 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
264 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
265 },
266 },
267 [ C(L1I ) ] = {
268 [ C(OP_READ) ] = {
269 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
270 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
271 },
272 [ C(OP_WRITE) ] = {
273 [ C(RESULT_ACCESS) ] = -1,
274 [ C(RESULT_MISS) ] = -1,
275 },
276 [ C(OP_PREFETCH) ] = {
277 [ C(RESULT_ACCESS) ] = 0x0,
278 [ C(RESULT_MISS) ] = 0x0,
279 },
280 },
281 [ C(LL ) ] = {
282 [ C(OP_READ) ] = {
283 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
284 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
285 },
286 [ C(OP_WRITE) ] = {
287 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
288 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
289 },
290 [ C(OP_PREFETCH) ] = {
291 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
292 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
293 },
294 },
295 [ C(DTLB) ] = {
296 [ C(OP_READ) ] = {
297 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
298 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
299 },
300 [ C(OP_WRITE) ] = {
301 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
302 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
303 },
304 [ C(OP_PREFETCH) ] = {
305 [ C(RESULT_ACCESS) ] = 0x0,
306 [ C(RESULT_MISS) ] = 0x0,
307 },
308 },
309 [ C(ITLB) ] = {
310 [ C(OP_READ) ] = {
311 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
312 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
313 },
314 [ C(OP_WRITE) ] = {
315 [ C(RESULT_ACCESS) ] = -1,
316 [ C(RESULT_MISS) ] = -1,
317 },
318 [ C(OP_PREFETCH) ] = {
319 [ C(RESULT_ACCESS) ] = -1,
320 [ C(RESULT_MISS) ] = -1,
321 },
322 },
323 [ C(BPU ) ] = {
324 [ C(OP_READ) ] = {
325 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
326 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
327 },
328 [ C(OP_WRITE) ] = {
329 [ C(RESULT_ACCESS) ] = -1,
330 [ C(RESULT_MISS) ] = -1,
331 },
332 [ C(OP_PREFETCH) ] = {
333 [ C(RESULT_ACCESS) ] = -1,
334 [ C(RESULT_MISS) ] = -1,
335 },
336 },
337};
338
339static __initconst u64 core2_hw_cache_event_ids
340 [PERF_COUNT_HW_CACHE_MAX]
341 [PERF_COUNT_HW_CACHE_OP_MAX]
342 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
343{
344 [ C(L1D) ] = {
345 [ C(OP_READ) ] = {
346 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
347 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
348 },
349 [ C(OP_WRITE) ] = {
350 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
351 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
352 },
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
355 [ C(RESULT_MISS) ] = 0,
356 },
357 },
358 [ C(L1I ) ] = {
359 [ C(OP_READ) ] = {
360 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
361 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
362 },
363 [ C(OP_WRITE) ] = {
364 [ C(RESULT_ACCESS) ] = -1,
365 [ C(RESULT_MISS) ] = -1,
366 },
367 [ C(OP_PREFETCH) ] = {
368 [ C(RESULT_ACCESS) ] = 0,
369 [ C(RESULT_MISS) ] = 0,
370 },
371 },
372 [ C(LL ) ] = {
373 [ C(OP_READ) ] = {
374 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
375 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
376 },
377 [ C(OP_WRITE) ] = {
378 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
379 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
380 },
381 [ C(OP_PREFETCH) ] = {
382 [ C(RESULT_ACCESS) ] = 0,
383 [ C(RESULT_MISS) ] = 0,
384 },
385 },
386 [ C(DTLB) ] = {
387 [ C(OP_READ) ] = {
388 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
389 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
390 },
391 [ C(OP_WRITE) ] = {
392 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
393 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
394 },
395 [ C(OP_PREFETCH) ] = {
396 [ C(RESULT_ACCESS) ] = 0,
397 [ C(RESULT_MISS) ] = 0,
398 },
399 },
400 [ C(ITLB) ] = {
401 [ C(OP_READ) ] = {
402 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
403 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
404 },
405 [ C(OP_WRITE) ] = {
406 [ C(RESULT_ACCESS) ] = -1,
407 [ C(RESULT_MISS) ] = -1,
408 },
409 [ C(OP_PREFETCH) ] = {
410 [ C(RESULT_ACCESS) ] = -1,
411 [ C(RESULT_MISS) ] = -1,
412 },
413 },
414 [ C(BPU ) ] = {
415 [ C(OP_READ) ] = {
416 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
417 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
418 },
419 [ C(OP_WRITE) ] = {
420 [ C(RESULT_ACCESS) ] = -1,
421 [ C(RESULT_MISS) ] = -1,
422 },
423 [ C(OP_PREFETCH) ] = {
424 [ C(RESULT_ACCESS) ] = -1,
425 [ C(RESULT_MISS) ] = -1,
426 },
427 },
428};
429
430static __initconst u64 atom_hw_cache_event_ids
431 [PERF_COUNT_HW_CACHE_MAX]
432 [PERF_COUNT_HW_CACHE_OP_MAX]
433 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
434{
435 [ C(L1D) ] = {
436 [ C(OP_READ) ] = {
437 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
438 [ C(RESULT_MISS) ] = 0,
439 },
440 [ C(OP_WRITE) ] = {
441 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
442 [ C(RESULT_MISS) ] = 0,
443 },
444 [ C(OP_PREFETCH) ] = {
445 [ C(RESULT_ACCESS) ] = 0x0,
446 [ C(RESULT_MISS) ] = 0,
447 },
448 },
449 [ C(L1I ) ] = {
450 [ C(OP_READ) ] = {
451 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
452 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
453 },
454 [ C(OP_WRITE) ] = {
455 [ C(RESULT_ACCESS) ] = -1,
456 [ C(RESULT_MISS) ] = -1,
457 },
458 [ C(OP_PREFETCH) ] = {
459 [ C(RESULT_ACCESS) ] = 0,
460 [ C(RESULT_MISS) ] = 0,
461 },
462 },
463 [ C(LL ) ] = {
464 [ C(OP_READ) ] = {
465 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
466 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
467 },
468 [ C(OP_WRITE) ] = {
469 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
470 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
471 },
472 [ C(OP_PREFETCH) ] = {
473 [ C(RESULT_ACCESS) ] = 0,
474 [ C(RESULT_MISS) ] = 0,
475 },
476 },
477 [ C(DTLB) ] = {
478 [ C(OP_READ) ] = {
479 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
480 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
481 },
482 [ C(OP_WRITE) ] = {
483 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
484 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
485 },
486 [ C(OP_PREFETCH) ] = {
487 [ C(RESULT_ACCESS) ] = 0,
488 [ C(RESULT_MISS) ] = 0,
489 },
490 },
491 [ C(ITLB) ] = {
492 [ C(OP_READ) ] = {
493 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
494 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
495 },
496 [ C(OP_WRITE) ] = {
497 [ C(RESULT_ACCESS) ] = -1,
498 [ C(RESULT_MISS) ] = -1,
499 },
500 [ C(OP_PREFETCH) ] = {
501 [ C(RESULT_ACCESS) ] = -1,
502 [ C(RESULT_MISS) ] = -1,
503 },
504 },
505 [ C(BPU ) ] = {
506 [ C(OP_READ) ] = {
507 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
508 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
509 },
510 [ C(OP_WRITE) ] = {
511 [ C(RESULT_ACCESS) ] = -1,
512 [ C(RESULT_MISS) ] = -1,
513 },
514 [ C(OP_PREFETCH) ] = {
515 [ C(RESULT_ACCESS) ] = -1,
516 [ C(RESULT_MISS) ] = -1,
517 },
518 },
519};
520
521static u64 intel_pmu_raw_event(u64 hw_event)
522{
523#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
524#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
525#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
526#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
527#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
528
529#define CORE_EVNTSEL_MASK \
530 (CORE_EVNTSEL_EVENT_MASK | \
531 CORE_EVNTSEL_UNIT_MASK | \
532 CORE_EVNTSEL_EDGE_MASK | \
533 CORE_EVNTSEL_INV_MASK | \
534 CORE_EVNTSEL_REG_MASK)
535
536 return hw_event & CORE_EVNTSEL_MASK;
537}
538
539static __initconst u64 amd_hw_cache_event_ids
540 [PERF_COUNT_HW_CACHE_MAX]
541 [PERF_COUNT_HW_CACHE_OP_MAX]
542 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
543{
544 [ C(L1D) ] = {
545 [ C(OP_READ) ] = {
546 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
547 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
548 },
549 [ C(OP_WRITE) ] = {
550 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
551 [ C(RESULT_MISS) ] = 0,
552 },
553 [ C(OP_PREFETCH) ] = {
554 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
555 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
556 },
557 },
558 [ C(L1I ) ] = {
559 [ C(OP_READ) ] = {
560 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
561 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
562 },
563 [ C(OP_WRITE) ] = {
564 [ C(RESULT_ACCESS) ] = -1,
565 [ C(RESULT_MISS) ] = -1,
566 },
567 [ C(OP_PREFETCH) ] = {
568 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
569 [ C(RESULT_MISS) ] = 0,
570 },
571 },
572 [ C(LL ) ] = {
573 [ C(OP_READ) ] = {
574 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
575 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
576 },
577 [ C(OP_WRITE) ] = {
578 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
579 [ C(RESULT_MISS) ] = 0,
580 },
581 [ C(OP_PREFETCH) ] = {
582 [ C(RESULT_ACCESS) ] = 0,
583 [ C(RESULT_MISS) ] = 0,
584 },
585 },
586 [ C(DTLB) ] = {
587 [ C(OP_READ) ] = {
588 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
589 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
590 },
591 [ C(OP_WRITE) ] = {
592 [ C(RESULT_ACCESS) ] = 0,
593 [ C(RESULT_MISS) ] = 0,
594 },
595 [ C(OP_PREFETCH) ] = {
596 [ C(RESULT_ACCESS) ] = 0,
597 [ C(RESULT_MISS) ] = 0,
598 },
599 },
600 [ C(ITLB) ] = {
601 [ C(OP_READ) ] = {
602 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
603 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
604 },
605 [ C(OP_WRITE) ] = {
606 [ C(RESULT_ACCESS) ] = -1,
607 [ C(RESULT_MISS) ] = -1,
608 },
609 [ C(OP_PREFETCH) ] = {
610 [ C(RESULT_ACCESS) ] = -1,
611 [ C(RESULT_MISS) ] = -1,
612 },
613 },
614 [ C(BPU ) ] = {
615 [ C(OP_READ) ] = {
616 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
617 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
618 },
619 [ C(OP_WRITE) ] = {
620 [ C(RESULT_ACCESS) ] = -1,
621 [ C(RESULT_MISS) ] = -1,
622 },
623 [ C(OP_PREFETCH) ] = {
624 [ C(RESULT_ACCESS) ] = -1,
625 [ C(RESULT_MISS) ] = -1,
626 },
627 },
628};
629
630/*
631 * AMD Performance Monitor K7 and later.
632 */
633static const u64 amd_perfmon_event_map[] =
634{
635 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
636 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
637 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
638 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
639 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
640 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
641};
642
643static u64 amd_pmu_event_map(int hw_event)
644{
645 return amd_perfmon_event_map[hw_event];
646}
647
648static u64 amd_pmu_raw_event(u64 hw_event)
649{
650#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
651#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
652#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
653#define K7_EVNTSEL_INV_MASK 0x000800000ULL
654#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
655
656#define K7_EVNTSEL_MASK \
657 (K7_EVNTSEL_EVENT_MASK | \
658 K7_EVNTSEL_UNIT_MASK | \
659 K7_EVNTSEL_EDGE_MASK | \
660 K7_EVNTSEL_INV_MASK | \
661 K7_EVNTSEL_REG_MASK)
662
663 return hw_event & K7_EVNTSEL_MASK;
664}
665
666/* 186/*
667 * Propagate event elapsed time into the generic event. 187 * Propagate event elapsed time into the generic event.
668 * Can only be executed on the CPU where the event is active. 188 * Can only be executed on the CPU where the event is active.
@@ -914,42 +434,6 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
914 return 0; 434 return 0;
915} 435}
916 436
917static void intel_pmu_enable_bts(u64 config)
918{
919 unsigned long debugctlmsr;
920
921 debugctlmsr = get_debugctlmsr();
922
923 debugctlmsr |= X86_DEBUGCTL_TR;
924 debugctlmsr |= X86_DEBUGCTL_BTS;
925 debugctlmsr |= X86_DEBUGCTL_BTINT;
926
927 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
928 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
929
930 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
931 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
932
933 update_debugctlmsr(debugctlmsr);
934}
935
936static void intel_pmu_disable_bts(void)
937{
938 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
939 unsigned long debugctlmsr;
940
941 if (!cpuc->ds)
942 return;
943
944 debugctlmsr = get_debugctlmsr();
945
946 debugctlmsr &=
947 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
948 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
949
950 update_debugctlmsr(debugctlmsr);
951}
952
953/* 437/*
954 * Setup the hardware configuration for a given attr_type 438 * Setup the hardware configuration for a given attr_type
955 */ 439 */
@@ -988,6 +472,8 @@ static int __hw_perf_event_init(struct perf_event *event)
988 hwc->config = ARCH_PERFMON_EVENTSEL_INT; 472 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
989 473
990 hwc->idx = -1; 474 hwc->idx = -1;
475 hwc->last_cpu = -1;
476 hwc->last_tag = ~0ULL;
991 477
992 /* 478 /*
993 * Count user and OS events unless requested not to. 479 * Count user and OS events unless requested not to.
@@ -1017,6 +503,9 @@ static int __hw_perf_event_init(struct perf_event *event)
1017 */ 503 */
1018 if (attr->type == PERF_TYPE_RAW) { 504 if (attr->type == PERF_TYPE_RAW) {
1019 hwc->config |= x86_pmu.raw_event(attr->config); 505 hwc->config |= x86_pmu.raw_event(attr->config);
506 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
507 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
508 return -EACCES;
1020 return 0; 509 return 0;
1021 } 510 }
1022 511
@@ -1056,216 +545,323 @@ static int __hw_perf_event_init(struct perf_event *event)
1056 return 0; 545 return 0;
1057} 546}
1058 547
1059static void p6_pmu_disable_all(void) 548static void x86_pmu_disable_all(void)
1060{ 549{
1061 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 550 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1062 u64 val; 551 int idx;
1063
1064 if (!cpuc->enabled)
1065 return;
1066 552
1067 cpuc->enabled = 0; 553 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1068 barrier(); 554 u64 val;
1069 555
1070 /* p6 only has one enable register */ 556 if (!test_bit(idx, cpuc->active_mask))
1071 rdmsrl(MSR_P6_EVNTSEL0, val); 557 continue;
1072 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 558 rdmsrl(x86_pmu.eventsel + idx, val);
1073 wrmsrl(MSR_P6_EVNTSEL0, val); 559 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
560 continue;
561 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
562 wrmsrl(x86_pmu.eventsel + idx, val);
563 }
1074} 564}
1075 565
1076static void intel_pmu_disable_all(void) 566void hw_perf_disable(void)
1077{ 567{
1078 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 568 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1079 569
570 if (!x86_pmu_initialized())
571 return;
572
1080 if (!cpuc->enabled) 573 if (!cpuc->enabled)
1081 return; 574 return;
1082 575
576 cpuc->n_added = 0;
1083 cpuc->enabled = 0; 577 cpuc->enabled = 0;
1084 barrier(); 578 barrier();
1085 579
1086 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 580 x86_pmu.disable_all();
1087
1088 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1089 intel_pmu_disable_bts();
1090} 581}
1091 582
1092static void amd_pmu_disable_all(void) 583static void x86_pmu_enable_all(void)
1093{ 584{
1094 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 585 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1095 int idx; 586 int idx;
1096 587
1097 if (!cpuc->enabled)
1098 return;
1099
1100 cpuc->enabled = 0;
1101 /*
1102 * ensure we write the disable before we start disabling the
1103 * events proper, so that amd_pmu_enable_event() does the
1104 * right thing.
1105 */
1106 barrier();
1107
1108 for (idx = 0; idx < x86_pmu.num_events; idx++) { 588 for (idx = 0; idx < x86_pmu.num_events; idx++) {
589 struct perf_event *event = cpuc->events[idx];
1109 u64 val; 590 u64 val;
1110 591
1111 if (!test_bit(idx, cpuc->active_mask)) 592 if (!test_bit(idx, cpuc->active_mask))
1112 continue; 593 continue;
1113 rdmsrl(MSR_K7_EVNTSEL0 + idx, val); 594
1114 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) 595 val = event->hw.config;
1115 continue; 596 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
1116 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 597 wrmsrl(x86_pmu.eventsel + idx, val);
1117 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1118 } 598 }
1119} 599}
1120 600
1121void hw_perf_disable(void) 601static const struct pmu pmu;
602
603static inline int is_x86_event(struct perf_event *event)
1122{ 604{
1123 if (!x86_pmu_initialized()) 605 return event->pmu == &pmu;
1124 return;
1125 return x86_pmu.disable_all();
1126} 606}
1127 607
1128static void p6_pmu_enable_all(void) 608static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1129{ 609{
1130 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 610 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1131 unsigned long val; 611 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
612 int i, j, w, wmax, num = 0;
613 struct hw_perf_event *hwc;
1132 614
1133 if (cpuc->enabled) 615 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1134 return;
1135 616
1136 cpuc->enabled = 1; 617 for (i = 0; i < n; i++) {
1137 barrier(); 618 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
619 constraints[i] = c;
620 }
1138 621
1139 /* p6 only has one enable register */ 622 /*
1140 rdmsrl(MSR_P6_EVNTSEL0, val); 623 * fastpath, try to reuse previous register
1141 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 624 */
1142 wrmsrl(MSR_P6_EVNTSEL0, val); 625 for (i = 0; i < n; i++) {
1143} 626 hwc = &cpuc->event_list[i]->hw;
627 c = constraints[i];
1144 628
1145static void intel_pmu_enable_all(void) 629 /* never assigned */
1146{ 630 if (hwc->idx == -1)
1147 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 631 break;
1148 632
1149 if (cpuc->enabled) 633 /* constraint still honored */
1150 return; 634 if (!test_bit(hwc->idx, c->idxmsk))
635 break;
1151 636
1152 cpuc->enabled = 1; 637 /* not already used */
1153 barrier(); 638 if (test_bit(hwc->idx, used_mask))
639 break;
640
641 set_bit(hwc->idx, used_mask);
642 if (assign)
643 assign[i] = hwc->idx;
644 }
645 if (i == n)
646 goto done;
647
648 /*
649 * begin slow path
650 */
651
652 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1154 653
1155 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 654 /*
655 * weight = number of possible counters
656 *
657 * 1 = most constrained, only works on one counter
658 * wmax = least constrained, works on any counter
659 *
660 * assign events to counters starting with most
661 * constrained events.
662 */
663 wmax = x86_pmu.num_events;
664
665 /*
666 * when fixed event counters are present,
667 * wmax is incremented by 1 to account
668 * for one more choice
669 */
670 if (x86_pmu.num_events_fixed)
671 wmax++;
672
673 for (w = 1, num = n; num && w <= wmax; w++) {
674 /* for each event */
675 for (i = 0; num && i < n; i++) {
676 c = constraints[i];
677 hwc = &cpuc->event_list[i]->hw;
678
679 if (c->weight != w)
680 continue;
681
682 for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
683 if (!test_bit(j, used_mask))
684 break;
685 }
1156 686
1157 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 687 if (j == X86_PMC_IDX_MAX)
1158 struct perf_event *event = 688 break;
1159 cpuc->events[X86_PMC_IDX_FIXED_BTS];
1160 689
1161 if (WARN_ON_ONCE(!event)) 690 set_bit(j, used_mask);
1162 return;
1163 691
1164 intel_pmu_enable_bts(event->hw.config); 692 if (assign)
693 assign[i] = j;
694 num--;
695 }
696 }
697done:
698 /*
699 * scheduling failed or is just a simulation,
700 * free resources if necessary
701 */
702 if (!assign || num) {
703 for (i = 0; i < n; i++) {
704 if (x86_pmu.put_event_constraints)
705 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
706 }
1165 } 707 }
708 return num ? -ENOSPC : 0;
1166} 709}
1167 710
1168static void amd_pmu_enable_all(void) 711/*
712 * dogrp: true if must collect siblings events (group)
713 * returns total number of events and error code
714 */
715static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1169{ 716{
1170 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 717 struct perf_event *event;
1171 int idx; 718 int n, max_count;
1172 719
1173 if (cpuc->enabled) 720 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1174 return;
1175 721
1176 cpuc->enabled = 1; 722 /* current number of events already accepted */
1177 barrier(); 723 n = cpuc->n_events;
1178 724
1179 for (idx = 0; idx < x86_pmu.num_events; idx++) { 725 if (is_x86_event(leader)) {
1180 struct perf_event *event = cpuc->events[idx]; 726 if (n >= max_count)
1181 u64 val; 727 return -ENOSPC;
728 cpuc->event_list[n] = leader;
729 n++;
730 }
731 if (!dogrp)
732 return n;
1182 733
1183 if (!test_bit(idx, cpuc->active_mask)) 734 list_for_each_entry(event, &leader->sibling_list, group_entry) {
735 if (!is_x86_event(event) ||
736 event->state <= PERF_EVENT_STATE_OFF)
1184 continue; 737 continue;
1185 738
1186 val = event->hw.config; 739 if (n >= max_count)
1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 740 return -ENOSPC;
1188 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1189 }
1190}
1191 741
1192void hw_perf_enable(void) 742 cpuc->event_list[n] = event;
1193{ 743 n++;
1194 if (!x86_pmu_initialized()) 744 }
1195 return; 745 return n;
1196 x86_pmu.enable_all();
1197} 746}
1198 747
1199static inline u64 intel_pmu_get_status(void) 748static inline void x86_assign_hw_event(struct perf_event *event,
749 struct cpu_hw_events *cpuc, int i)
1200{ 750{
1201 u64 status; 751 struct hw_perf_event *hwc = &event->hw;
1202 752
1203 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 753 hwc->idx = cpuc->assign[i];
754 hwc->last_cpu = smp_processor_id();
755 hwc->last_tag = ++cpuc->tags[i];
1204 756
1205 return status; 757 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
758 hwc->config_base = 0;
759 hwc->event_base = 0;
760 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
761 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
762 /*
763 * We set it so that event_base + idx in wrmsr/rdmsr maps to
764 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
765 */
766 hwc->event_base =
767 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
768 } else {
769 hwc->config_base = x86_pmu.eventsel;
770 hwc->event_base = x86_pmu.perfctr;
771 }
1206} 772}
1207 773
1208static inline void intel_pmu_ack_status(u64 ack) 774static inline int match_prev_assignment(struct hw_perf_event *hwc,
775 struct cpu_hw_events *cpuc,
776 int i)
1209{ 777{
1210 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 778 return hwc->idx == cpuc->assign[i] &&
779 hwc->last_cpu == smp_processor_id() &&
780 hwc->last_tag == cpuc->tags[i];
1211} 781}
1212 782
1213static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 783static void x86_pmu_stop(struct perf_event *event);
1214{
1215 (void)checking_wrmsrl(hwc->config_base + idx,
1216 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1217}
1218 784
1219static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) 785void hw_perf_enable(void)
1220{ 786{
1221 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); 787 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1222} 788 struct perf_event *event;
789 struct hw_perf_event *hwc;
790 int i;
1223 791
1224static inline void 792 if (!x86_pmu_initialized())
1225intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) 793 return;
1226{
1227 int idx = __idx - X86_PMC_IDX_FIXED;
1228 u64 ctrl_val, mask;
1229 794
1230 mask = 0xfULL << (idx * 4); 795 if (cpuc->enabled)
796 return;
1231 797
1232 rdmsrl(hwc->config_base, ctrl_val); 798 if (cpuc->n_added) {
1233 ctrl_val &= ~mask; 799 /*
1234 (void)checking_wrmsrl(hwc->config_base, ctrl_val); 800 * apply assignment obtained either from
1235} 801 * hw_perf_group_sched_in() or x86_pmu_enable()
802 *
803 * step1: save events moving to new counters
804 * step2: reprogram moved events into new counters
805 */
806 for (i = 0; i < cpuc->n_events; i++) {
1236 807
1237static inline void 808 event = cpuc->event_list[i];
1238p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) 809 hwc = &event->hw;
1239{
1240 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1241 u64 val = P6_NOP_EVENT;
1242 810
1243 if (cpuc->enabled) 811 /*
1244 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 812 * we can avoid reprogramming counter if:
813 * - assigned same counter as last time
814 * - running on same CPU as last time
815 * - no other event has used the counter since
816 */
817 if (hwc->idx == -1 ||
818 match_prev_assignment(hwc, cpuc, i))
819 continue;
1245 820
1246 (void)checking_wrmsrl(hwc->config_base + idx, val); 821 x86_pmu_stop(event);
1247}
1248 822
1249static inline void 823 hwc->idx = -1;
1250intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) 824 }
1251{
1252 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1253 intel_pmu_disable_bts();
1254 return;
1255 }
1256 825
1257 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 826 for (i = 0; i < cpuc->n_events; i++) {
1258 intel_pmu_disable_fixed(hwc, idx); 827
1259 return; 828 event = cpuc->event_list[i];
829 hwc = &event->hw;
830
831 if (hwc->idx == -1) {
832 x86_assign_hw_event(event, cpuc, i);
833 x86_perf_event_set_period(event, hwc, hwc->idx);
834 }
835 /*
836 * need to mark as active because x86_pmu_disable()
837 * clear active_mask and events[] yet it preserves
838 * idx
839 */
840 set_bit(hwc->idx, cpuc->active_mask);
841 cpuc->events[hwc->idx] = event;
842
843 x86_pmu.enable(hwc, hwc->idx);
844 perf_event_update_userpage(event);
845 }
846 cpuc->n_added = 0;
847 perf_events_lapic_init();
1260 } 848 }
1261 849
1262 x86_pmu_disable_event(hwc, idx); 850 cpuc->enabled = 1;
851 barrier();
852
853 x86_pmu.enable_all();
1263} 854}
1264 855
1265static inline void 856static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1266amd_pmu_disable_event(struct hw_perf_event *hwc, int idx) 857{
858 (void)checking_wrmsrl(hwc->config_base + idx,
859 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
860}
861
862static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1267{ 863{
1268 x86_pmu_disable_event(hwc, idx); 864 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1269} 865}
1270 866
1271static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 867static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -1326,220 +922,60 @@ x86_perf_event_set_period(struct perf_event *event,
1326 return ret; 922 return ret;
1327} 923}
1328 924
1329static inline void 925static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1330intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1331{
1332 int idx = __idx - X86_PMC_IDX_FIXED;
1333 u64 ctrl_val, bits, mask;
1334 int err;
1335
1336 /*
1337 * Enable IRQ generation (0x8),
1338 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1339 * if requested:
1340 */
1341 bits = 0x8ULL;
1342 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1343 bits |= 0x2;
1344 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1345 bits |= 0x1;
1346
1347 /*
1348 * ANY bit is supported in v3 and up
1349 */
1350 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1351 bits |= 0x4;
1352
1353 bits <<= (idx * 4);
1354 mask = 0xfULL << (idx * 4);
1355
1356 rdmsrl(hwc->config_base, ctrl_val);
1357 ctrl_val &= ~mask;
1358 ctrl_val |= bits;
1359 err = checking_wrmsrl(hwc->config_base, ctrl_val);
1360}
1361
1362static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1363{ 926{
1364 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 927 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1365 u64 val;
1366
1367 val = hwc->config;
1368 if (cpuc->enabled) 928 if (cpuc->enabled)
1369 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 929 __x86_pmu_enable_event(hwc, idx);
1370
1371 (void)checking_wrmsrl(hwc->config_base + idx, val);
1372} 930}
1373 931
1374 932/*
1375static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) 933 * activate a single event
1376{ 934 *
1377 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 935 * The event is added to the group of enabled events
1378 if (!__get_cpu_var(cpu_hw_events).enabled) 936 * but only if it can be scehduled with existing events.
1379 return; 937 *
1380 938 * Called with PMU disabled. If successful and return value 1,
1381 intel_pmu_enable_bts(hwc->config); 939 * then guaranteed to call perf_enable() and hw_perf_enable()
1382 return; 940 */
1383 } 941static int x86_pmu_enable(struct perf_event *event)
1384
1385 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1386 intel_pmu_enable_fixed(hwc, idx);
1387 return;
1388 }
1389
1390 x86_pmu_enable_event(hwc, idx);
1391}
1392
1393static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1394{ 942{
1395 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 943 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
944 struct hw_perf_event *hwc;
945 int assign[X86_PMC_IDX_MAX];
946 int n, n0, ret;
1396 947
1397 if (cpuc->enabled) 948 hwc = &event->hw;
1398 x86_pmu_enable_event(hwc, idx);
1399}
1400
1401static int fixed_mode_idx(struct hw_perf_event *hwc)
1402{
1403 unsigned int hw_event;
1404
1405 hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1406
1407 if (unlikely((hw_event ==
1408 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
1409 (hwc->sample_period == 1)))
1410 return X86_PMC_IDX_FIXED_BTS;
1411 949
1412 if (!x86_pmu.num_events_fixed) 950 n0 = cpuc->n_events;
1413 return -1; 951 n = collect_events(cpuc, event, false);
952 if (n < 0)
953 return n;
1414 954
955 ret = x86_schedule_events(cpuc, n, assign);
956 if (ret)
957 return ret;
1415 /* 958 /*
1416 * fixed counters do not take all possible filters 959 * copy new assignment, now we know it is possible
960 * will be used by hw_perf_enable()
1417 */ 961 */
1418 if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK) 962 memcpy(cpuc->assign, assign, n*sizeof(int));
1419 return -1;
1420
1421 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
1422 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
1423 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
1424 return X86_PMC_IDX_FIXED_CPU_CYCLES;
1425 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
1426 return X86_PMC_IDX_FIXED_BUS_CYCLES;
1427
1428 return -1;
1429}
1430
1431/*
1432 * generic counter allocator: get next free counter
1433 */
1434static int
1435gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
1436{
1437 int idx;
1438
1439 idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
1440 return idx == x86_pmu.num_events ? -1 : idx;
1441}
1442
1443/*
1444 * intel-specific counter allocator: check event constraints
1445 */
1446static int
1447intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
1448{
1449 const struct event_constraint *event_constraint;
1450 int i, code;
1451
1452 if (!event_constraints)
1453 goto skip;
1454 963
1455 code = hwc->config & CORE_EVNTSEL_EVENT_MASK; 964 cpuc->n_events = n;
1456 965 cpuc->n_added = n - n0;
1457 for_each_event_constraint(event_constraint, event_constraints) {
1458 if (code == event_constraint->code) {
1459 for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
1460 if (!test_and_set_bit(i, cpuc->used_mask))
1461 return i;
1462 }
1463 return -1;
1464 }
1465 }
1466skip:
1467 return gen_get_event_idx(cpuc, hwc);
1468}
1469
1470static int
1471x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
1472{
1473 int idx;
1474 966
1475 idx = fixed_mode_idx(hwc); 967 return 0;
1476 if (idx == X86_PMC_IDX_FIXED_BTS) {
1477 /* BTS is already occupied. */
1478 if (test_and_set_bit(idx, cpuc->used_mask))
1479 return -EAGAIN;
1480
1481 hwc->config_base = 0;
1482 hwc->event_base = 0;
1483 hwc->idx = idx;
1484 } else if (idx >= 0) {
1485 /*
1486 * Try to get the fixed event, if that is already taken
1487 * then try to get a generic event:
1488 */
1489 if (test_and_set_bit(idx, cpuc->used_mask))
1490 goto try_generic;
1491
1492 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1493 /*
1494 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1495 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1496 */
1497 hwc->event_base =
1498 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1499 hwc->idx = idx;
1500 } else {
1501 idx = hwc->idx;
1502 /* Try to get the previous generic event again */
1503 if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
1504try_generic:
1505 idx = x86_pmu.get_event_idx(cpuc, hwc);
1506 if (idx == -1)
1507 return -EAGAIN;
1508
1509 set_bit(idx, cpuc->used_mask);
1510 hwc->idx = idx;
1511 }
1512 hwc->config_base = x86_pmu.eventsel;
1513 hwc->event_base = x86_pmu.perfctr;
1514 }
1515
1516 return idx;
1517} 968}
1518 969
1519/* 970static int x86_pmu_start(struct perf_event *event)
1520 * Find a PMC slot for the freshly enabled / scheduled in event:
1521 */
1522static int x86_pmu_enable(struct perf_event *event)
1523{ 971{
1524 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1525 struct hw_perf_event *hwc = &event->hw; 972 struct hw_perf_event *hwc = &event->hw;
1526 int idx;
1527
1528 idx = x86_schedule_event(cpuc, hwc);
1529 if (idx < 0)
1530 return idx;
1531
1532 perf_events_lapic_init();
1533
1534 x86_pmu.disable(hwc, idx);
1535 973
1536 cpuc->events[idx] = event; 974 if (hwc->idx == -1)
1537 set_bit(idx, cpuc->active_mask); 975 return -EAGAIN;
1538 976
1539 x86_perf_event_set_period(event, hwc, idx); 977 x86_perf_event_set_period(event, hwc, hwc->idx);
1540 x86_pmu.enable(hwc, idx); 978 x86_pmu.enable(hwc, hwc->idx);
1541
1542 perf_event_update_userpage(event);
1543 979
1544 return 0; 980 return 0;
1545} 981}
@@ -1583,7 +1019,7 @@ void perf_event_print_debug(void)
1583 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); 1019 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1584 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); 1020 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1585 } 1021 }
1586 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); 1022 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1587 1023
1588 for (idx = 0; idx < x86_pmu.num_events; idx++) { 1024 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1589 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1025 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
@@ -1607,67 +1043,7 @@ void perf_event_print_debug(void)
1607 local_irq_restore(flags); 1043 local_irq_restore(flags);
1608} 1044}
1609 1045
1610static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) 1046static void x86_pmu_stop(struct perf_event *event)
1611{
1612 struct debug_store *ds = cpuc->ds;
1613 struct bts_record {
1614 u64 from;
1615 u64 to;
1616 u64 flags;
1617 };
1618 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1619 struct bts_record *at, *top;
1620 struct perf_output_handle handle;
1621 struct perf_event_header header;
1622 struct perf_sample_data data;
1623 struct pt_regs regs;
1624
1625 if (!event)
1626 return;
1627
1628 if (!ds)
1629 return;
1630
1631 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1632 top = (struct bts_record *)(unsigned long)ds->bts_index;
1633
1634 if (top <= at)
1635 return;
1636
1637 ds->bts_index = ds->bts_buffer_base;
1638
1639
1640 data.period = event->hw.last_period;
1641 data.addr = 0;
1642 data.raw = NULL;
1643 regs.ip = 0;
1644
1645 /*
1646 * Prepare a generic sample, i.e. fill in the invariant fields.
1647 * We will overwrite the from and to address before we output
1648 * the sample.
1649 */
1650 perf_prepare_sample(&header, &data, event, &regs);
1651
1652 if (perf_output_begin(&handle, event,
1653 header.size * (top - at), 1, 1))
1654 return;
1655
1656 for (; at < top; at++) {
1657 data.ip = at->from;
1658 data.addr = at->to;
1659
1660 perf_output_sample(&handle, &header, &data, event);
1661 }
1662
1663 perf_output_end(&handle);
1664
1665 /* There's new data available. */
1666 event->hw.interrupts++;
1667 event->pending_kill = POLL_IN;
1668}
1669
1670static void x86_pmu_disable(struct perf_event *event)
1671{ 1047{
1672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1048 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1673 struct hw_perf_event *hwc = &event->hw; 1049 struct hw_perf_event *hwc = &event->hw;
@@ -1681,183 +1057,38 @@ static void x86_pmu_disable(struct perf_event *event)
1681 x86_pmu.disable(hwc, idx); 1057 x86_pmu.disable(hwc, idx);
1682 1058
1683 /* 1059 /*
1684 * Make sure the cleared pointer becomes visible before we
1685 * (potentially) free the event:
1686 */
1687 barrier();
1688
1689 /*
1690 * Drain the remaining delta count out of a event 1060 * Drain the remaining delta count out of a event
1691 * that we are disabling: 1061 * that we are disabling:
1692 */ 1062 */
1693 x86_perf_event_update(event, hwc, idx); 1063 x86_perf_event_update(event, hwc, idx);
1694 1064
1695 /* Drain the remaining BTS records. */
1696 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1697 intel_pmu_drain_bts_buffer(cpuc);
1698
1699 cpuc->events[idx] = NULL; 1065 cpuc->events[idx] = NULL;
1700 clear_bit(idx, cpuc->used_mask);
1701
1702 perf_event_update_userpage(event);
1703}
1704
1705/*
1706 * Save and restart an expired event. Called by NMI contexts,
1707 * so it has to be careful about preempting normal event ops:
1708 */
1709static int intel_pmu_save_and_restart(struct perf_event *event)
1710{
1711 struct hw_perf_event *hwc = &event->hw;
1712 int idx = hwc->idx;
1713 int ret;
1714
1715 x86_perf_event_update(event, hwc, idx);
1716 ret = x86_perf_event_set_period(event, hwc, idx);
1717
1718 if (event->state == PERF_EVENT_STATE_ACTIVE)
1719 intel_pmu_enable_event(hwc, idx);
1720
1721 return ret;
1722}
1723
1724static void intel_pmu_reset(void)
1725{
1726 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1727 unsigned long flags;
1728 int idx;
1729
1730 if (!x86_pmu.num_events)
1731 return;
1732
1733 local_irq_save(flags);
1734
1735 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1736
1737 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1738 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1739 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1740 }
1741 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1742 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1743 }
1744 if (ds)
1745 ds->bts_index = ds->bts_buffer_base;
1746
1747 local_irq_restore(flags);
1748}
1749
1750static int p6_pmu_handle_irq(struct pt_regs *regs)
1751{
1752 struct perf_sample_data data;
1753 struct cpu_hw_events *cpuc;
1754 struct perf_event *event;
1755 struct hw_perf_event *hwc;
1756 int idx, handled = 0;
1757 u64 val;
1758
1759 data.addr = 0;
1760 data.raw = NULL;
1761
1762 cpuc = &__get_cpu_var(cpu_hw_events);
1763
1764 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1765 if (!test_bit(idx, cpuc->active_mask))
1766 continue;
1767
1768 event = cpuc->events[idx];
1769 hwc = &event->hw;
1770
1771 val = x86_perf_event_update(event, hwc, idx);
1772 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1773 continue;
1774
1775 /*
1776 * event overflow
1777 */
1778 handled = 1;
1779 data.period = event->hw.last_period;
1780
1781 if (!x86_perf_event_set_period(event, hwc, idx))
1782 continue;
1783
1784 if (perf_event_overflow(event, 1, &data, regs))
1785 p6_pmu_disable_event(hwc, idx);
1786 }
1787
1788 if (handled)
1789 inc_irq_stat(apic_perf_irqs);
1790
1791 return handled;
1792} 1066}
1793 1067
1794/* 1068static void x86_pmu_disable(struct perf_event *event)
1795 * This handler is triggered by the local APIC, so the APIC IRQ handling
1796 * rules apply:
1797 */
1798static int intel_pmu_handle_irq(struct pt_regs *regs)
1799{ 1069{
1800 struct perf_sample_data data; 1070 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1801 struct cpu_hw_events *cpuc; 1071 int i;
1802 int bit, loops;
1803 u64 ack, status;
1804
1805 data.addr = 0;
1806 data.raw = NULL;
1807
1808 cpuc = &__get_cpu_var(cpu_hw_events);
1809
1810 perf_disable();
1811 intel_pmu_drain_bts_buffer(cpuc);
1812 status = intel_pmu_get_status();
1813 if (!status) {
1814 perf_enable();
1815 return 0;
1816 }
1817
1818 loops = 0;
1819again:
1820 if (++loops > 100) {
1821 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1822 perf_event_print_debug();
1823 intel_pmu_reset();
1824 perf_enable();
1825 return 1;
1826 }
1827 1072
1828 inc_irq_stat(apic_perf_irqs); 1073 x86_pmu_stop(event);
1829 ack = status;
1830 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1831 struct perf_event *event = cpuc->events[bit];
1832 1074
1833 clear_bit(bit, (unsigned long *) &status); 1075 for (i = 0; i < cpuc->n_events; i++) {
1834 if (!test_bit(bit, cpuc->active_mask)) 1076 if (event == cpuc->event_list[i]) {
1835 continue;
1836 1077
1837 if (!intel_pmu_save_and_restart(event)) 1078 if (x86_pmu.put_event_constraints)
1838 continue; 1079 x86_pmu.put_event_constraints(cpuc, event);
1839 1080
1840 data.period = event->hw.last_period; 1081 while (++i < cpuc->n_events)
1082 cpuc->event_list[i-1] = cpuc->event_list[i];
1841 1083
1842 if (perf_event_overflow(event, 1, &data, regs)) 1084 --cpuc->n_events;
1843 intel_pmu_disable_event(&event->hw, bit); 1085 break;
1086 }
1844 } 1087 }
1845 1088 perf_event_update_userpage(event);
1846 intel_pmu_ack_status(ack);
1847
1848 /*
1849 * Repeat if there is more work to be done:
1850 */
1851 status = intel_pmu_get_status();
1852 if (status)
1853 goto again;
1854
1855 perf_enable();
1856
1857 return 1;
1858} 1089}
1859 1090
1860static int amd_pmu_handle_irq(struct pt_regs *regs) 1091static int x86_pmu_handle_irq(struct pt_regs *regs)
1861{ 1092{
1862 struct perf_sample_data data; 1093 struct perf_sample_data data;
1863 struct cpu_hw_events *cpuc; 1094 struct cpu_hw_events *cpuc;
@@ -1892,7 +1123,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1892 continue; 1123 continue;
1893 1124
1894 if (perf_event_overflow(event, 1, &data, regs)) 1125 if (perf_event_overflow(event, 1, &data, regs))
1895 amd_pmu_disable_event(hwc, idx); 1126 x86_pmu.disable(hwc, idx);
1896 } 1127 }
1897 1128
1898 if (handled) 1129 if (handled)
@@ -1975,194 +1206,137 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1975 .priority = 1 1206 .priority = 1
1976}; 1207};
1977 1208
1978static __initconst struct x86_pmu p6_pmu = { 1209static struct event_constraint unconstrained;
1979 .name = "p6", 1210static struct event_constraint emptyconstraint;
1980 .handle_irq = p6_pmu_handle_irq,
1981 .disable_all = p6_pmu_disable_all,
1982 .enable_all = p6_pmu_enable_all,
1983 .enable = p6_pmu_enable_event,
1984 .disable = p6_pmu_disable_event,
1985 .eventsel = MSR_P6_EVNTSEL0,
1986 .perfctr = MSR_P6_PERFCTR0,
1987 .event_map = p6_pmu_event_map,
1988 .raw_event = p6_pmu_raw_event,
1989 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
1990 .apic = 1,
1991 .max_period = (1ULL << 31) - 1,
1992 .version = 0,
1993 .num_events = 2,
1994 /*
1995 * Events have 40 bits implemented. However they are designed such
1996 * that bits [32-39] are sign extensions of bit 31. As such the
1997 * effective width of a event for P6-like PMU is 32 bits only.
1998 *
1999 * See IA-32 Intel Architecture Software developer manual Vol 3B
2000 */
2001 .event_bits = 32,
2002 .event_mask = (1ULL << 32) - 1,
2003 .get_event_idx = intel_get_event_idx,
2004};
2005 1211
2006static __initconst struct x86_pmu intel_pmu = { 1212static struct event_constraint *
2007 .name = "Intel", 1213x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2008 .handle_irq = intel_pmu_handle_irq, 1214{
2009 .disable_all = intel_pmu_disable_all, 1215 struct event_constraint *c;
2010 .enable_all = intel_pmu_enable_all,
2011 .enable = intel_pmu_enable_event,
2012 .disable = intel_pmu_disable_event,
2013 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2014 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2015 .event_map = intel_pmu_event_map,
2016 .raw_event = intel_pmu_raw_event,
2017 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2018 .apic = 1,
2019 /*
2020 * Intel PMCs cannot be accessed sanely above 32 bit width,
2021 * so we install an artificial 1<<31 period regardless of
2022 * the generic event period:
2023 */
2024 .max_period = (1ULL << 31) - 1,
2025 .enable_bts = intel_pmu_enable_bts,
2026 .disable_bts = intel_pmu_disable_bts,
2027 .get_event_idx = intel_get_event_idx,
2028};
2029 1216
2030static __initconst struct x86_pmu amd_pmu = { 1217 if (x86_pmu.event_constraints) {
2031 .name = "AMD", 1218 for_each_event_constraint(c, x86_pmu.event_constraints) {
2032 .handle_irq = amd_pmu_handle_irq, 1219 if ((event->hw.config & c->cmask) == c->code)
2033 .disable_all = amd_pmu_disable_all, 1220 return c;
2034 .enable_all = amd_pmu_enable_all, 1221 }
2035 .enable = amd_pmu_enable_event, 1222 }
2036 .disable = amd_pmu_disable_event, 1223
2037 .eventsel = MSR_K7_EVNTSEL0, 1224 return &unconstrained;
2038 .perfctr = MSR_K7_PERFCTR0, 1225}
2039 .event_map = amd_pmu_event_map,
2040 .raw_event = amd_pmu_raw_event,
2041 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
2042 .num_events = 4,
2043 .event_bits = 48,
2044 .event_mask = (1ULL << 48) - 1,
2045 .apic = 1,
2046 /* use highest bit to detect overflow */
2047 .max_period = (1ULL << 47) - 1,
2048 .get_event_idx = gen_get_event_idx,
2049};
2050 1226
2051static __init int p6_pmu_init(void) 1227static int x86_event_sched_in(struct perf_event *event,
1228 struct perf_cpu_context *cpuctx)
2052{ 1229{
2053 switch (boot_cpu_data.x86_model) { 1230 int ret = 0;
2054 case 1:
2055 case 3: /* Pentium Pro */
2056 case 5:
2057 case 6: /* Pentium II */
2058 case 7:
2059 case 8:
2060 case 11: /* Pentium III */
2061 event_constraints = intel_p6_event_constraints;
2062 break;
2063 case 9:
2064 case 13:
2065 /* Pentium M */
2066 event_constraints = intel_p6_event_constraints;
2067 break;
2068 default:
2069 pr_cont("unsupported p6 CPU model %d ",
2070 boot_cpu_data.x86_model);
2071 return -ENODEV;
2072 }
2073 1231
2074 x86_pmu = p6_pmu; 1232 event->state = PERF_EVENT_STATE_ACTIVE;
1233 event->oncpu = smp_processor_id();
1234 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2075 1235
2076 return 0; 1236 if (!is_x86_event(event))
1237 ret = event->pmu->enable(event);
1238
1239 if (!ret && !is_software_event(event))
1240 cpuctx->active_oncpu++;
1241
1242 if (!ret && event->attr.exclusive)
1243 cpuctx->exclusive = 1;
1244
1245 return ret;
2077} 1246}
2078 1247
2079static __init int intel_pmu_init(void) 1248static void x86_event_sched_out(struct perf_event *event,
1249 struct perf_cpu_context *cpuctx)
2080{ 1250{
2081 union cpuid10_edx edx; 1251 event->state = PERF_EVENT_STATE_INACTIVE;
2082 union cpuid10_eax eax; 1252 event->oncpu = -1;
2083 unsigned int unused;
2084 unsigned int ebx;
2085 int version;
2086
2087 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2088 /* check for P6 processor family */
2089 if (boot_cpu_data.x86 == 6) {
2090 return p6_pmu_init();
2091 } else {
2092 return -ENODEV;
2093 }
2094 }
2095 1253
2096 /* 1254 if (!is_x86_event(event))
2097 * Check whether the Architectural PerfMon supports 1255 event->pmu->disable(event);
2098 * Branch Misses Retired hw_event or not.
2099 */
2100 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2101 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2102 return -ENODEV;
2103 1256
2104 version = eax.split.version_id; 1257 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2105 if (version < 2)
2106 return -ENODEV;
2107 1258
2108 x86_pmu = intel_pmu; 1259 if (!is_software_event(event))
2109 x86_pmu.version = version; 1260 cpuctx->active_oncpu--;
2110 x86_pmu.num_events = eax.split.num_events;
2111 x86_pmu.event_bits = eax.split.bit_width;
2112 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
2113 1261
2114 /* 1262 if (event->attr.exclusive || !cpuctx->active_oncpu)
2115 * Quirk: v2 perfmon does not report fixed-purpose events, so 1263 cpuctx->exclusive = 0;
2116 * assume at least 3 events: 1264}
2117 */
2118 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
2119 1265
1266/*
1267 * Called to enable a whole group of events.
1268 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1269 * Assumes the caller has disabled interrupts and has
1270 * frozen the PMU with hw_perf_save_disable.
1271 *
1272 * called with PMU disabled. If successful and return value 1,
1273 * then guaranteed to call perf_enable() and hw_perf_enable()
1274 */
1275int hw_perf_group_sched_in(struct perf_event *leader,
1276 struct perf_cpu_context *cpuctx,
1277 struct perf_event_context *ctx)
1278{
1279 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1280 struct perf_event *sub;
1281 int assign[X86_PMC_IDX_MAX];
1282 int n0, n1, ret;
1283
1284 /* n0 = total number of events */
1285 n0 = collect_events(cpuc, leader, true);
1286 if (n0 < 0)
1287 return n0;
1288
1289 ret = x86_schedule_events(cpuc, n0, assign);
1290 if (ret)
1291 return ret;
1292
1293 ret = x86_event_sched_in(leader, cpuctx);
1294 if (ret)
1295 return ret;
1296
1297 n1 = 1;
1298 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1299 if (sub->state > PERF_EVENT_STATE_OFF) {
1300 ret = x86_event_sched_in(sub, cpuctx);
1301 if (ret)
1302 goto undo;
1303 ++n1;
1304 }
1305 }
2120 /* 1306 /*
2121 * Install the hw-cache-events table: 1307 * copy new assignment, now we know it is possible
1308 * will be used by hw_perf_enable()
2122 */ 1309 */
2123 switch (boot_cpu_data.x86_model) { 1310 memcpy(cpuc->assign, assign, n0*sizeof(int));
2124 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2125 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2126 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2127 case 29: /* six-core 45 nm xeon "Dunnington" */
2128 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2129 sizeof(hw_cache_event_ids));
2130
2131 pr_cont("Core2 events, ");
2132 event_constraints = intel_core_event_constraints;
2133 break;
2134 default:
2135 case 26:
2136 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2137 sizeof(hw_cache_event_ids));
2138 1311
2139 event_constraints = intel_nehalem_event_constraints; 1312 cpuc->n_events = n0;
2140 pr_cont("Nehalem/Corei7 events, "); 1313 cpuc->n_added = n1;
2141 break; 1314 ctx->nr_active += n1;
2142 case 28:
2143 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2144 sizeof(hw_cache_event_ids));
2145 1315
2146 pr_cont("Atom events, "); 1316 /*
2147 break; 1317 * 1 means successful and events are active
1318 * This is not quite true because we defer
1319 * actual activation until hw_perf_enable() but
1320 * this way we* ensure caller won't try to enable
1321 * individual events
1322 */
1323 return 1;
1324undo:
1325 x86_event_sched_out(leader, cpuctx);
1326 n0 = 1;
1327 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1328 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
1329 x86_event_sched_out(sub, cpuctx);
1330 if (++n0 == n1)
1331 break;
1332 }
2148 } 1333 }
2149 return 0; 1334 return ret;
2150} 1335}
2151 1336
2152static __init int amd_pmu_init(void) 1337#include "perf_event_amd.c"
2153{ 1338#include "perf_event_p6.c"
2154 /* Performance-monitoring supported from K7 and later: */ 1339#include "perf_event_intel.c"
2155 if (boot_cpu_data.x86 < 6)
2156 return -ENODEV;
2157
2158 x86_pmu = amd_pmu;
2159
2160 /* Events are common for all AMDs */
2161 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2162 sizeof(hw_cache_event_ids));
2163
2164 return 0;
2165}
2166 1340
2167static void __init pmu_check_apic(void) 1341static void __init pmu_check_apic(void)
2168{ 1342{
@@ -2176,6 +1350,7 @@ static void __init pmu_check_apic(void)
2176 1350
2177void __init init_hw_perf_events(void) 1351void __init init_hw_perf_events(void)
2178{ 1352{
1353 struct event_constraint *c;
2179 int err; 1354 int err;
2180 1355
2181 pr_info("Performance Events: "); 1356 pr_info("Performance Events: ");
@@ -2220,6 +1395,20 @@ void __init init_hw_perf_events(void)
2220 perf_events_lapic_init(); 1395 perf_events_lapic_init();
2221 register_die_notifier(&perf_event_nmi_notifier); 1396 register_die_notifier(&perf_event_nmi_notifier);
2222 1397
1398 unconstrained = (struct event_constraint)
1399 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1400 0, x86_pmu.num_events);
1401
1402 if (x86_pmu.event_constraints) {
1403 for_each_event_constraint(c, x86_pmu.event_constraints) {
1404 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1405 continue;
1406
1407 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1408 c->weight += x86_pmu.num_events;
1409 }
1410 }
1411
2223 pr_info("... version: %d\n", x86_pmu.version); 1412 pr_info("... version: %d\n", x86_pmu.version);
2224 pr_info("... bit width: %d\n", x86_pmu.event_bits); 1413 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2225 pr_info("... generic registers: %d\n", x86_pmu.num_events); 1414 pr_info("... generic registers: %d\n", x86_pmu.num_events);
@@ -2237,50 +1426,79 @@ static inline void x86_pmu_read(struct perf_event *event)
2237static const struct pmu pmu = { 1426static const struct pmu pmu = {
2238 .enable = x86_pmu_enable, 1427 .enable = x86_pmu_enable,
2239 .disable = x86_pmu_disable, 1428 .disable = x86_pmu_disable,
1429 .start = x86_pmu_start,
1430 .stop = x86_pmu_stop,
2240 .read = x86_pmu_read, 1431 .read = x86_pmu_read,
2241 .unthrottle = x86_pmu_unthrottle, 1432 .unthrottle = x86_pmu_unthrottle,
2242}; 1433};
2243 1434
2244static int 1435/*
2245validate_event(struct cpu_hw_events *cpuc, struct perf_event *event) 1436 * validate a single event group
2246{ 1437 *
2247 struct hw_perf_event fake_event = event->hw; 1438 * validation include:
2248 1439 * - check events are compatible which each other
2249 if (event->pmu && event->pmu != &pmu) 1440 * - events do not compete for the same counter
2250 return 0; 1441 * - number of events <= number of counters
2251 1442 *
2252 return x86_schedule_event(cpuc, &fake_event) >= 0; 1443 * validation ensures the group can be loaded onto the
2253} 1444 * PMU if it was the only group available.
2254 1445 */
2255static int validate_group(struct perf_event *event) 1446static int validate_group(struct perf_event *event)
2256{ 1447{
2257 struct perf_event *sibling, *leader = event->group_leader; 1448 struct perf_event *leader = event->group_leader;
2258 struct cpu_hw_events fake_pmu; 1449 struct cpu_hw_events *fake_cpuc;
1450 int ret, n;
2259 1451
2260 memset(&fake_pmu, 0, sizeof(fake_pmu)); 1452 ret = -ENOMEM;
1453 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1454 if (!fake_cpuc)
1455 goto out;
1456
1457 /*
1458 * the event is not yet connected with its
1459 * siblings therefore we must first collect
1460 * existing siblings, then add the new event
1461 * before we can simulate the scheduling
1462 */
1463 ret = -ENOSPC;
1464 n = collect_events(fake_cpuc, leader, true);
1465 if (n < 0)
1466 goto out_free;
2261 1467
2262 if (!validate_event(&fake_pmu, leader)) 1468 fake_cpuc->n_events = n;
2263 return -ENOSPC; 1469 n = collect_events(fake_cpuc, event, false);
1470 if (n < 0)
1471 goto out_free;
2264 1472
2265 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 1473 fake_cpuc->n_events = n;
2266 if (!validate_event(&fake_pmu, sibling))
2267 return -ENOSPC;
2268 }
2269 1474
2270 if (!validate_event(&fake_pmu, event)) 1475 ret = x86_schedule_events(fake_cpuc, n, NULL);
2271 return -ENOSPC;
2272 1476
2273 return 0; 1477out_free:
1478 kfree(fake_cpuc);
1479out:
1480 return ret;
2274} 1481}
2275 1482
2276const struct pmu *hw_perf_event_init(struct perf_event *event) 1483const struct pmu *hw_perf_event_init(struct perf_event *event)
2277{ 1484{
1485 const struct pmu *tmp;
2278 int err; 1486 int err;
2279 1487
2280 err = __hw_perf_event_init(event); 1488 err = __hw_perf_event_init(event);
2281 if (!err) { 1489 if (!err) {
1490 /*
1491 * we temporarily connect event to its pmu
1492 * such that validate_group() can classify
1493 * it as an x86 event using is_x86_event()
1494 */
1495 tmp = event->pmu;
1496 event->pmu = &pmu;
1497
2282 if (event->group_leader != event) 1498 if (event->group_leader != event)
2283 err = validate_group(event); 1499 err = validate_group(event);
1500
1501 event->pmu = tmp;
2284 } 1502 }
2285 if (err) { 1503 if (err) {
2286 if (event->destroy) 1504 if (event->destroy)
@@ -2304,7 +1522,6 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2304 1522
2305static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); 1523static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2306static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); 1524static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2307static DEFINE_PER_CPU(int, in_ignored_frame);
2308 1525
2309 1526
2310static void 1527static void
@@ -2320,10 +1537,6 @@ static void backtrace_warning(void *data, char *msg)
2320 1537
2321static int backtrace_stack(void *data, char *name) 1538static int backtrace_stack(void *data, char *name)
2322{ 1539{
2323 per_cpu(in_ignored_frame, smp_processor_id()) =
2324 x86_is_stack_id(NMI_STACK, name) ||
2325 x86_is_stack_id(DEBUG_STACK, name);
2326
2327 return 0; 1540 return 0;
2328} 1541}
2329 1542
@@ -2331,9 +1544,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
2331{ 1544{
2332 struct perf_callchain_entry *entry = data; 1545 struct perf_callchain_entry *entry = data;
2333 1546
2334 if (per_cpu(in_ignored_frame, smp_processor_id()))
2335 return;
2336
2337 if (reliable) 1547 if (reliable)
2338 callchain_store(entry, addr); 1548 callchain_store(entry, addr);
2339} 1549}
@@ -2440,9 +1650,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2440 1650
2441 is_user = user_mode(regs); 1651 is_user = user_mode(regs);
2442 1652
2443 if (!current || current->pid == 0)
2444 return;
2445
2446 if (is_user && current->state != TASK_RUNNING) 1653 if (is_user && current->state != TASK_RUNNING)
2447 return; 1654 return;
2448 1655
@@ -2472,4 +1679,25 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2472void hw_perf_event_setup_online(int cpu) 1679void hw_perf_event_setup_online(int cpu)
2473{ 1680{
2474 init_debug_store_on_cpu(cpu); 1681 init_debug_store_on_cpu(cpu);
1682
1683 switch (boot_cpu_data.x86_vendor) {
1684 case X86_VENDOR_AMD:
1685 amd_pmu_cpu_online(cpu);
1686 break;
1687 default:
1688 return;
1689 }
1690}
1691
1692void hw_perf_event_setup_offline(int cpu)
1693{
1694 init_debug_store_on_cpu(cpu);
1695
1696 switch (boot_cpu_data.x86_vendor) {
1697 case X86_VENDOR_AMD:
1698 amd_pmu_cpu_offline(cpu);
1699 break;
1700 default:
1701 return;
1702 }
2475} 1703}
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
new file mode 100644
index 000000000000..8f3dbfda3c4f
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -0,0 +1,416 @@
1#ifdef CONFIG_CPU_SUP_AMD
2
3static DEFINE_RAW_SPINLOCK(amd_nb_lock);
4
5static __initconst u64 amd_hw_cache_event_ids
6 [PERF_COUNT_HW_CACHE_MAX]
7 [PERF_COUNT_HW_CACHE_OP_MAX]
8 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
9{
10 [ C(L1D) ] = {
11 [ C(OP_READ) ] = {
12 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
13 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
14 },
15 [ C(OP_WRITE) ] = {
16 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
17 [ C(RESULT_MISS) ] = 0,
18 },
19 [ C(OP_PREFETCH) ] = {
20 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
21 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
22 },
23 },
24 [ C(L1I ) ] = {
25 [ C(OP_READ) ] = {
26 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
27 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
28 },
29 [ C(OP_WRITE) ] = {
30 [ C(RESULT_ACCESS) ] = -1,
31 [ C(RESULT_MISS) ] = -1,
32 },
33 [ C(OP_PREFETCH) ] = {
34 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
35 [ C(RESULT_MISS) ] = 0,
36 },
37 },
38 [ C(LL ) ] = {
39 [ C(OP_READ) ] = {
40 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
41 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
42 },
43 [ C(OP_WRITE) ] = {
44 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
45 [ C(RESULT_MISS) ] = 0,
46 },
47 [ C(OP_PREFETCH) ] = {
48 [ C(RESULT_ACCESS) ] = 0,
49 [ C(RESULT_MISS) ] = 0,
50 },
51 },
52 [ C(DTLB) ] = {
53 [ C(OP_READ) ] = {
54 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
55 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
56 },
57 [ C(OP_WRITE) ] = {
58 [ C(RESULT_ACCESS) ] = 0,
59 [ C(RESULT_MISS) ] = 0,
60 },
61 [ C(OP_PREFETCH) ] = {
62 [ C(RESULT_ACCESS) ] = 0,
63 [ C(RESULT_MISS) ] = 0,
64 },
65 },
66 [ C(ITLB) ] = {
67 [ C(OP_READ) ] = {
68 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
69 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
70 },
71 [ C(OP_WRITE) ] = {
72 [ C(RESULT_ACCESS) ] = -1,
73 [ C(RESULT_MISS) ] = -1,
74 },
75 [ C(OP_PREFETCH) ] = {
76 [ C(RESULT_ACCESS) ] = -1,
77 [ C(RESULT_MISS) ] = -1,
78 },
79 },
80 [ C(BPU ) ] = {
81 [ C(OP_READ) ] = {
82 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
83 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
84 },
85 [ C(OP_WRITE) ] = {
86 [ C(RESULT_ACCESS) ] = -1,
87 [ C(RESULT_MISS) ] = -1,
88 },
89 [ C(OP_PREFETCH) ] = {
90 [ C(RESULT_ACCESS) ] = -1,
91 [ C(RESULT_MISS) ] = -1,
92 },
93 },
94};
95
96/*
97 * AMD Performance Monitor K7 and later.
98 */
99static const u64 amd_perfmon_event_map[] =
100{
101 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
102 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
103 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
104 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
105 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
106 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
107};
108
109static u64 amd_pmu_event_map(int hw_event)
110{
111 return amd_perfmon_event_map[hw_event];
112}
113
114static u64 amd_pmu_raw_event(u64 hw_event)
115{
116#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
117#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
118#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
119#define K7_EVNTSEL_INV_MASK 0x000800000ULL
120#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
121
122#define K7_EVNTSEL_MASK \
123 (K7_EVNTSEL_EVENT_MASK | \
124 K7_EVNTSEL_UNIT_MASK | \
125 K7_EVNTSEL_EDGE_MASK | \
126 K7_EVNTSEL_INV_MASK | \
127 K7_EVNTSEL_REG_MASK)
128
129 return hw_event & K7_EVNTSEL_MASK;
130}
131
132/*
133 * AMD64 events are detected based on their event codes.
134 */
135static inline int amd_is_nb_event(struct hw_perf_event *hwc)
136{
137 return (hwc->config & 0xe0) == 0xe0;
138}
139
140static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
141 struct perf_event *event)
142{
143 struct hw_perf_event *hwc = &event->hw;
144 struct amd_nb *nb = cpuc->amd_nb;
145 int i;
146
147 /*
148 * only care about NB events
149 */
150 if (!(nb && amd_is_nb_event(hwc)))
151 return;
152
153 /*
154 * need to scan whole list because event may not have
155 * been assigned during scheduling
156 *
157 * no race condition possible because event can only
158 * be removed on one CPU at a time AND PMU is disabled
159 * when we come here
160 */
161 for (i = 0; i < x86_pmu.num_events; i++) {
162 if (nb->owners[i] == event) {
163 cmpxchg(nb->owners+i, event, NULL);
164 break;
165 }
166 }
167}
168
169 /*
170 * AMD64 NorthBridge events need special treatment because
171 * counter access needs to be synchronized across all cores
172 * of a package. Refer to BKDG section 3.12
173 *
174 * NB events are events measuring L3 cache, Hypertransport
175 * traffic. They are identified by an event code >= 0xe00.
176 * They measure events on the NorthBride which is shared
177 * by all cores on a package. NB events are counted on a
178 * shared set of counters. When a NB event is programmed
179 * in a counter, the data actually comes from a shared
180 * counter. Thus, access to those counters needs to be
181 * synchronized.
182 *
183 * We implement the synchronization such that no two cores
184 * can be measuring NB events using the same counters. Thus,
185 * we maintain a per-NB allocation table. The available slot
186 * is propagated using the event_constraint structure.
187 *
188 * We provide only one choice for each NB event based on
189 * the fact that only NB events have restrictions. Consequently,
190 * if a counter is available, there is a guarantee the NB event
191 * will be assigned to it. If no slot is available, an empty
192 * constraint is returned and scheduling will eventually fail
193 * for this event.
194 *
195 * Note that all cores attached the same NB compete for the same
196 * counters to host NB events, this is why we use atomic ops. Some
197 * multi-chip CPUs may have more than one NB.
198 *
199 * Given that resources are allocated (cmpxchg), they must be
200 * eventually freed for others to use. This is accomplished by
201 * calling amd_put_event_constraints().
202 *
203 * Non NB events are not impacted by this restriction.
204 */
205static struct event_constraint *
206amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
207{
208 struct hw_perf_event *hwc = &event->hw;
209 struct amd_nb *nb = cpuc->amd_nb;
210 struct perf_event *old = NULL;
211 int max = x86_pmu.num_events;
212 int i, j, k = -1;
213
214 /*
215 * if not NB event or no NB, then no constraints
216 */
217 if (!(nb && amd_is_nb_event(hwc)))
218 return &unconstrained;
219
220 /*
221 * detect if already present, if so reuse
222 *
223 * cannot merge with actual allocation
224 * because of possible holes
225 *
226 * event can already be present yet not assigned (in hwc->idx)
227 * because of successive calls to x86_schedule_events() from
228 * hw_perf_group_sched_in() without hw_perf_enable()
229 */
230 for (i = 0; i < max; i++) {
231 /*
232 * keep track of first free slot
233 */
234 if (k == -1 && !nb->owners[i])
235 k = i;
236
237 /* already present, reuse */
238 if (nb->owners[i] == event)
239 goto done;
240 }
241 /*
242 * not present, so grab a new slot
243 * starting either at:
244 */
245 if (hwc->idx != -1) {
246 /* previous assignment */
247 i = hwc->idx;
248 } else if (k != -1) {
249 /* start from free slot found */
250 i = k;
251 } else {
252 /*
253 * event not found, no slot found in
254 * first pass, try again from the
255 * beginning
256 */
257 i = 0;
258 }
259 j = i;
260 do {
261 old = cmpxchg(nb->owners+i, NULL, event);
262 if (!old)
263 break;
264 if (++i == max)
265 i = 0;
266 } while (i != j);
267done:
268 if (!old)
269 return &nb->event_constraints[i];
270
271 return &emptyconstraint;
272}
273
274static __initconst struct x86_pmu amd_pmu = {
275 .name = "AMD",
276 .handle_irq = x86_pmu_handle_irq,
277 .disable_all = x86_pmu_disable_all,
278 .enable_all = x86_pmu_enable_all,
279 .enable = x86_pmu_enable_event,
280 .disable = x86_pmu_disable_event,
281 .eventsel = MSR_K7_EVNTSEL0,
282 .perfctr = MSR_K7_PERFCTR0,
283 .event_map = amd_pmu_event_map,
284 .raw_event = amd_pmu_raw_event,
285 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
286 .num_events = 4,
287 .event_bits = 48,
288 .event_mask = (1ULL << 48) - 1,
289 .apic = 1,
290 /* use highest bit to detect overflow */
291 .max_period = (1ULL << 47) - 1,
292 .get_event_constraints = amd_get_event_constraints,
293 .put_event_constraints = amd_put_event_constraints
294};
295
296static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
297{
298 struct amd_nb *nb;
299 int i;
300
301 nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
302 if (!nb)
303 return NULL;
304
305 memset(nb, 0, sizeof(*nb));
306 nb->nb_id = nb_id;
307
308 /*
309 * initialize all possible NB constraints
310 */
311 for (i = 0; i < x86_pmu.num_events; i++) {
312 set_bit(i, nb->event_constraints[i].idxmsk);
313 nb->event_constraints[i].weight = 1;
314 }
315 return nb;
316}
317
318static void amd_pmu_cpu_online(int cpu)
319{
320 struct cpu_hw_events *cpu1, *cpu2;
321 struct amd_nb *nb = NULL;
322 int i, nb_id;
323
324 if (boot_cpu_data.x86_max_cores < 2)
325 return;
326
327 /*
328 * function may be called too early in the
329 * boot process, in which case nb_id is bogus
330 */
331 nb_id = amd_get_nb_id(cpu);
332 if (nb_id == BAD_APICID)
333 return;
334
335 cpu1 = &per_cpu(cpu_hw_events, cpu);
336 cpu1->amd_nb = NULL;
337
338 raw_spin_lock(&amd_nb_lock);
339
340 for_each_online_cpu(i) {
341 cpu2 = &per_cpu(cpu_hw_events, i);
342 nb = cpu2->amd_nb;
343 if (!nb)
344 continue;
345 if (nb->nb_id == nb_id)
346 goto found;
347 }
348
349 nb = amd_alloc_nb(cpu, nb_id);
350 if (!nb) {
351 pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
352 raw_spin_unlock(&amd_nb_lock);
353 return;
354 }
355found:
356 nb->refcnt++;
357 cpu1->amd_nb = nb;
358
359 raw_spin_unlock(&amd_nb_lock);
360}
361
362static void amd_pmu_cpu_offline(int cpu)
363{
364 struct cpu_hw_events *cpuhw;
365
366 if (boot_cpu_data.x86_max_cores < 2)
367 return;
368
369 cpuhw = &per_cpu(cpu_hw_events, cpu);
370
371 raw_spin_lock(&amd_nb_lock);
372
373 if (--cpuhw->amd_nb->refcnt == 0)
374 kfree(cpuhw->amd_nb);
375
376 cpuhw->amd_nb = NULL;
377
378 raw_spin_unlock(&amd_nb_lock);
379}
380
381static __init int amd_pmu_init(void)
382{
383 /* Performance-monitoring supported from K7 and later: */
384 if (boot_cpu_data.x86 < 6)
385 return -ENODEV;
386
387 x86_pmu = amd_pmu;
388
389 /* Events are common for all AMDs */
390 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
391 sizeof(hw_cache_event_ids));
392
393 /*
394 * explicitly initialize the boot cpu, other cpus will get
395 * the cpu hotplug callbacks from smp_init()
396 */
397 amd_pmu_cpu_online(smp_processor_id());
398 return 0;
399}
400
401#else /* CONFIG_CPU_SUP_AMD */
402
403static int amd_pmu_init(void)
404{
405 return 0;
406}
407
408static void amd_pmu_cpu_online(int cpu)
409{
410}
411
412static void amd_pmu_cpu_offline(int cpu)
413{
414}
415
416#endif
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
new file mode 100644
index 000000000000..4fbdfe5708d9
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -0,0 +1,982 @@
1#ifdef CONFIG_CPU_SUP_INTEL
2
3/*
4 * Intel PerfMon, used on Core and later.
5 */
6static const u64 intel_perfmon_event_map[] =
7{
8 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
9 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
10 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
11 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
12 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
13 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
14 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
15};
16
17static struct event_constraint intel_core_event_constraints[] =
18{
19 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
20 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
21 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
22 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
23 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
24 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
25 EVENT_CONSTRAINT_END
26};
27
28static struct event_constraint intel_core2_event_constraints[] =
29{
30 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
32 /*
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
36 */
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
38 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
46 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
47 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
48 EVENT_CONSTRAINT_END
49};
50
51static struct event_constraint intel_nehalem_event_constraints[] =
52{
53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
56 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
59 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
60 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
61 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
62 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
63 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
64 EVENT_CONSTRAINT_END
65};
66
67static struct event_constraint intel_westmere_event_constraints[] =
68{
69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
72 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
75 EVENT_CONSTRAINT_END
76};
77
78static struct event_constraint intel_gen_event_constraints[] =
79{
80 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
81 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
82 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
83 EVENT_CONSTRAINT_END
84};
85
86static u64 intel_pmu_event_map(int hw_event)
87{
88 return intel_perfmon_event_map[hw_event];
89}
90
91static __initconst u64 westmere_hw_cache_event_ids
92 [PERF_COUNT_HW_CACHE_MAX]
93 [PERF_COUNT_HW_CACHE_OP_MAX]
94 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
95{
96 [ C(L1D) ] = {
97 [ C(OP_READ) ] = {
98 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
99 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
100 },
101 [ C(OP_WRITE) ] = {
102 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
103 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
104 },
105 [ C(OP_PREFETCH) ] = {
106 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
107 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
108 },
109 },
110 [ C(L1I ) ] = {
111 [ C(OP_READ) ] = {
112 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
113 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
114 },
115 [ C(OP_WRITE) ] = {
116 [ C(RESULT_ACCESS) ] = -1,
117 [ C(RESULT_MISS) ] = -1,
118 },
119 [ C(OP_PREFETCH) ] = {
120 [ C(RESULT_ACCESS) ] = 0x0,
121 [ C(RESULT_MISS) ] = 0x0,
122 },
123 },
124 [ C(LL ) ] = {
125 [ C(OP_READ) ] = {
126 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
127 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
128 },
129 [ C(OP_WRITE) ] = {
130 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
131 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
132 },
133 [ C(OP_PREFETCH) ] = {
134 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
135 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
136 },
137 },
138 [ C(DTLB) ] = {
139 [ C(OP_READ) ] = {
140 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
141 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
142 },
143 [ C(OP_WRITE) ] = {
144 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
145 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
146 },
147 [ C(OP_PREFETCH) ] = {
148 [ C(RESULT_ACCESS) ] = 0x0,
149 [ C(RESULT_MISS) ] = 0x0,
150 },
151 },
152 [ C(ITLB) ] = {
153 [ C(OP_READ) ] = {
154 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
155 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
156 },
157 [ C(OP_WRITE) ] = {
158 [ C(RESULT_ACCESS) ] = -1,
159 [ C(RESULT_MISS) ] = -1,
160 },
161 [ C(OP_PREFETCH) ] = {
162 [ C(RESULT_ACCESS) ] = -1,
163 [ C(RESULT_MISS) ] = -1,
164 },
165 },
166 [ C(BPU ) ] = {
167 [ C(OP_READ) ] = {
168 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
169 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
170 },
171 [ C(OP_WRITE) ] = {
172 [ C(RESULT_ACCESS) ] = -1,
173 [ C(RESULT_MISS) ] = -1,
174 },
175 [ C(OP_PREFETCH) ] = {
176 [ C(RESULT_ACCESS) ] = -1,
177 [ C(RESULT_MISS) ] = -1,
178 },
179 },
180};
181
182static __initconst u64 nehalem_hw_cache_event_ids
183 [PERF_COUNT_HW_CACHE_MAX]
184 [PERF_COUNT_HW_CACHE_OP_MAX]
185 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
186{
187 [ C(L1D) ] = {
188 [ C(OP_READ) ] = {
189 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
190 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
191 },
192 [ C(OP_WRITE) ] = {
193 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
194 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
195 },
196 [ C(OP_PREFETCH) ] = {
197 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
198 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
199 },
200 },
201 [ C(L1I ) ] = {
202 [ C(OP_READ) ] = {
203 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
204 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
205 },
206 [ C(OP_WRITE) ] = {
207 [ C(RESULT_ACCESS) ] = -1,
208 [ C(RESULT_MISS) ] = -1,
209 },
210 [ C(OP_PREFETCH) ] = {
211 [ C(RESULT_ACCESS) ] = 0x0,
212 [ C(RESULT_MISS) ] = 0x0,
213 },
214 },
215 [ C(LL ) ] = {
216 [ C(OP_READ) ] = {
217 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
218 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
219 },
220 [ C(OP_WRITE) ] = {
221 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
222 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
223 },
224 [ C(OP_PREFETCH) ] = {
225 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
226 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
227 },
228 },
229 [ C(DTLB) ] = {
230 [ C(OP_READ) ] = {
231 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
232 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
233 },
234 [ C(OP_WRITE) ] = {
235 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
236 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
237 },
238 [ C(OP_PREFETCH) ] = {
239 [ C(RESULT_ACCESS) ] = 0x0,
240 [ C(RESULT_MISS) ] = 0x0,
241 },
242 },
243 [ C(ITLB) ] = {
244 [ C(OP_READ) ] = {
245 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
246 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
247 },
248 [ C(OP_WRITE) ] = {
249 [ C(RESULT_ACCESS) ] = -1,
250 [ C(RESULT_MISS) ] = -1,
251 },
252 [ C(OP_PREFETCH) ] = {
253 [ C(RESULT_ACCESS) ] = -1,
254 [ C(RESULT_MISS) ] = -1,
255 },
256 },
257 [ C(BPU ) ] = {
258 [ C(OP_READ) ] = {
259 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
260 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
261 },
262 [ C(OP_WRITE) ] = {
263 [ C(RESULT_ACCESS) ] = -1,
264 [ C(RESULT_MISS) ] = -1,
265 },
266 [ C(OP_PREFETCH) ] = {
267 [ C(RESULT_ACCESS) ] = -1,
268 [ C(RESULT_MISS) ] = -1,
269 },
270 },
271};
272
273static __initconst u64 core2_hw_cache_event_ids
274 [PERF_COUNT_HW_CACHE_MAX]
275 [PERF_COUNT_HW_CACHE_OP_MAX]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
277{
278 [ C(L1D) ] = {
279 [ C(OP_READ) ] = {
280 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
281 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
282 },
283 [ C(OP_WRITE) ] = {
284 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
285 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
286 },
287 [ C(OP_PREFETCH) ] = {
288 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
289 [ C(RESULT_MISS) ] = 0,
290 },
291 },
292 [ C(L1I ) ] = {
293 [ C(OP_READ) ] = {
294 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
295 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
296 },
297 [ C(OP_WRITE) ] = {
298 [ C(RESULT_ACCESS) ] = -1,
299 [ C(RESULT_MISS) ] = -1,
300 },
301 [ C(OP_PREFETCH) ] = {
302 [ C(RESULT_ACCESS) ] = 0,
303 [ C(RESULT_MISS) ] = 0,
304 },
305 },
306 [ C(LL ) ] = {
307 [ C(OP_READ) ] = {
308 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
309 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
310 },
311 [ C(OP_WRITE) ] = {
312 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
313 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
314 },
315 [ C(OP_PREFETCH) ] = {
316 [ C(RESULT_ACCESS) ] = 0,
317 [ C(RESULT_MISS) ] = 0,
318 },
319 },
320 [ C(DTLB) ] = {
321 [ C(OP_READ) ] = {
322 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
323 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
324 },
325 [ C(OP_WRITE) ] = {
326 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
327 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
328 },
329 [ C(OP_PREFETCH) ] = {
330 [ C(RESULT_ACCESS) ] = 0,
331 [ C(RESULT_MISS) ] = 0,
332 },
333 },
334 [ C(ITLB) ] = {
335 [ C(OP_READ) ] = {
336 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
337 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
338 },
339 [ C(OP_WRITE) ] = {
340 [ C(RESULT_ACCESS) ] = -1,
341 [ C(RESULT_MISS) ] = -1,
342 },
343 [ C(OP_PREFETCH) ] = {
344 [ C(RESULT_ACCESS) ] = -1,
345 [ C(RESULT_MISS) ] = -1,
346 },
347 },
348 [ C(BPU ) ] = {
349 [ C(OP_READ) ] = {
350 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
351 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
352 },
353 [ C(OP_WRITE) ] = {
354 [ C(RESULT_ACCESS) ] = -1,
355 [ C(RESULT_MISS) ] = -1,
356 },
357 [ C(OP_PREFETCH) ] = {
358 [ C(RESULT_ACCESS) ] = -1,
359 [ C(RESULT_MISS) ] = -1,
360 },
361 },
362};
363
364static __initconst u64 atom_hw_cache_event_ids
365 [PERF_COUNT_HW_CACHE_MAX]
366 [PERF_COUNT_HW_CACHE_OP_MAX]
367 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
368{
369 [ C(L1D) ] = {
370 [ C(OP_READ) ] = {
371 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
372 [ C(RESULT_MISS) ] = 0,
373 },
374 [ C(OP_WRITE) ] = {
375 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
376 [ C(RESULT_MISS) ] = 0,
377 },
378 [ C(OP_PREFETCH) ] = {
379 [ C(RESULT_ACCESS) ] = 0x0,
380 [ C(RESULT_MISS) ] = 0,
381 },
382 },
383 [ C(L1I ) ] = {
384 [ C(OP_READ) ] = {
385 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
386 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
387 },
388 [ C(OP_WRITE) ] = {
389 [ C(RESULT_ACCESS) ] = -1,
390 [ C(RESULT_MISS) ] = -1,
391 },
392 [ C(OP_PREFETCH) ] = {
393 [ C(RESULT_ACCESS) ] = 0,
394 [ C(RESULT_MISS) ] = 0,
395 },
396 },
397 [ C(LL ) ] = {
398 [ C(OP_READ) ] = {
399 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
400 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
401 },
402 [ C(OP_WRITE) ] = {
403 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
404 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
405 },
406 [ C(OP_PREFETCH) ] = {
407 [ C(RESULT_ACCESS) ] = 0,
408 [ C(RESULT_MISS) ] = 0,
409 },
410 },
411 [ C(DTLB) ] = {
412 [ C(OP_READ) ] = {
413 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
414 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
415 },
416 [ C(OP_WRITE) ] = {
417 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
418 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
419 },
420 [ C(OP_PREFETCH) ] = {
421 [ C(RESULT_ACCESS) ] = 0,
422 [ C(RESULT_MISS) ] = 0,
423 },
424 },
425 [ C(ITLB) ] = {
426 [ C(OP_READ) ] = {
427 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
428 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
429 },
430 [ C(OP_WRITE) ] = {
431 [ C(RESULT_ACCESS) ] = -1,
432 [ C(RESULT_MISS) ] = -1,
433 },
434 [ C(OP_PREFETCH) ] = {
435 [ C(RESULT_ACCESS) ] = -1,
436 [ C(RESULT_MISS) ] = -1,
437 },
438 },
439 [ C(BPU ) ] = {
440 [ C(OP_READ) ] = {
441 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
442 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
443 },
444 [ C(OP_WRITE) ] = {
445 [ C(RESULT_ACCESS) ] = -1,
446 [ C(RESULT_MISS) ] = -1,
447 },
448 [ C(OP_PREFETCH) ] = {
449 [ C(RESULT_ACCESS) ] = -1,
450 [ C(RESULT_MISS) ] = -1,
451 },
452 },
453};
454
455static u64 intel_pmu_raw_event(u64 hw_event)
456{
457#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
458#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
459#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
460#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
461#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
462
463#define CORE_EVNTSEL_MASK \
464 (INTEL_ARCH_EVTSEL_MASK | \
465 INTEL_ARCH_UNIT_MASK | \
466 INTEL_ARCH_EDGE_MASK | \
467 INTEL_ARCH_INV_MASK | \
468 INTEL_ARCH_CNT_MASK)
469
470 return hw_event & CORE_EVNTSEL_MASK;
471}
472
473static void intel_pmu_enable_bts(u64 config)
474{
475 unsigned long debugctlmsr;
476
477 debugctlmsr = get_debugctlmsr();
478
479 debugctlmsr |= X86_DEBUGCTL_TR;
480 debugctlmsr |= X86_DEBUGCTL_BTS;
481 debugctlmsr |= X86_DEBUGCTL_BTINT;
482
483 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
484 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
485
486 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
487 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
488
489 update_debugctlmsr(debugctlmsr);
490}
491
492static void intel_pmu_disable_bts(void)
493{
494 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
495 unsigned long debugctlmsr;
496
497 if (!cpuc->ds)
498 return;
499
500 debugctlmsr = get_debugctlmsr();
501
502 debugctlmsr &=
503 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
504 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
505
506 update_debugctlmsr(debugctlmsr);
507}
508
509static void intel_pmu_disable_all(void)
510{
511 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
512
513 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
514
515 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
516 intel_pmu_disable_bts();
517}
518
519static void intel_pmu_enable_all(void)
520{
521 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
522
523 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
524
525 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
526 struct perf_event *event =
527 cpuc->events[X86_PMC_IDX_FIXED_BTS];
528
529 if (WARN_ON_ONCE(!event))
530 return;
531
532 intel_pmu_enable_bts(event->hw.config);
533 }
534}
535
536static inline u64 intel_pmu_get_status(void)
537{
538 u64 status;
539
540 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
541
542 return status;
543}
544
545static inline void intel_pmu_ack_status(u64 ack)
546{
547 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
548}
549
550static inline void
551intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
552{
553 int idx = __idx - X86_PMC_IDX_FIXED;
554 u64 ctrl_val, mask;
555
556 mask = 0xfULL << (idx * 4);
557
558 rdmsrl(hwc->config_base, ctrl_val);
559 ctrl_val &= ~mask;
560 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
561}
562
563static void intel_pmu_drain_bts_buffer(void)
564{
565 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
566 struct debug_store *ds = cpuc->ds;
567 struct bts_record {
568 u64 from;
569 u64 to;
570 u64 flags;
571 };
572 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
573 struct bts_record *at, *top;
574 struct perf_output_handle handle;
575 struct perf_event_header header;
576 struct perf_sample_data data;
577 struct pt_regs regs;
578
579 if (!event)
580 return;
581
582 if (!ds)
583 return;
584
585 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
586 top = (struct bts_record *)(unsigned long)ds->bts_index;
587
588 if (top <= at)
589 return;
590
591 ds->bts_index = ds->bts_buffer_base;
592
593
594 data.period = event->hw.last_period;
595 data.addr = 0;
596 data.raw = NULL;
597 regs.ip = 0;
598
599 /*
600 * Prepare a generic sample, i.e. fill in the invariant fields.
601 * We will overwrite the from and to address before we output
602 * the sample.
603 */
604 perf_prepare_sample(&header, &data, event, &regs);
605
606 if (perf_output_begin(&handle, event,
607 header.size * (top - at), 1, 1))
608 return;
609
610 for (; at < top; at++) {
611 data.ip = at->from;
612 data.addr = at->to;
613
614 perf_output_sample(&handle, &header, &data, event);
615 }
616
617 perf_output_end(&handle);
618
619 /* There's new data available. */
620 event->hw.interrupts++;
621 event->pending_kill = POLL_IN;
622}
623
624static inline void
625intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
626{
627 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
628 intel_pmu_disable_bts();
629 intel_pmu_drain_bts_buffer();
630 return;
631 }
632
633 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
634 intel_pmu_disable_fixed(hwc, idx);
635 return;
636 }
637
638 x86_pmu_disable_event(hwc, idx);
639}
640
641static inline void
642intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
643{
644 int idx = __idx - X86_PMC_IDX_FIXED;
645 u64 ctrl_val, bits, mask;
646 int err;
647
648 /*
649 * Enable IRQ generation (0x8),
650 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
651 * if requested:
652 */
653 bits = 0x8ULL;
654 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
655 bits |= 0x2;
656 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
657 bits |= 0x1;
658
659 /*
660 * ANY bit is supported in v3 and up
661 */
662 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
663 bits |= 0x4;
664
665 bits <<= (idx * 4);
666 mask = 0xfULL << (idx * 4);
667
668 rdmsrl(hwc->config_base, ctrl_val);
669 ctrl_val &= ~mask;
670 ctrl_val |= bits;
671 err = checking_wrmsrl(hwc->config_base, ctrl_val);
672}
673
674static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
675{
676 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
677 if (!__get_cpu_var(cpu_hw_events).enabled)
678 return;
679
680 intel_pmu_enable_bts(hwc->config);
681 return;
682 }
683
684 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
685 intel_pmu_enable_fixed(hwc, idx);
686 return;
687 }
688
689 __x86_pmu_enable_event(hwc, idx);
690}
691
692/*
693 * Save and restart an expired event. Called by NMI contexts,
694 * so it has to be careful about preempting normal event ops:
695 */
696static int intel_pmu_save_and_restart(struct perf_event *event)
697{
698 struct hw_perf_event *hwc = &event->hw;
699 int idx = hwc->idx;
700 int ret;
701
702 x86_perf_event_update(event, hwc, idx);
703 ret = x86_perf_event_set_period(event, hwc, idx);
704
705 return ret;
706}
707
708static void intel_pmu_reset(void)
709{
710 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
711 unsigned long flags;
712 int idx;
713
714 if (!x86_pmu.num_events)
715 return;
716
717 local_irq_save(flags);
718
719 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
720
721 for (idx = 0; idx < x86_pmu.num_events; idx++) {
722 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
723 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
724 }
725 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
726 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
727 }
728 if (ds)
729 ds->bts_index = ds->bts_buffer_base;
730
731 local_irq_restore(flags);
732}
733
734/*
735 * This handler is triggered by the local APIC, so the APIC IRQ handling
736 * rules apply:
737 */
738static int intel_pmu_handle_irq(struct pt_regs *regs)
739{
740 struct perf_sample_data data;
741 struct cpu_hw_events *cpuc;
742 int bit, loops;
743 u64 ack, status;
744
745 data.addr = 0;
746 data.raw = NULL;
747
748 cpuc = &__get_cpu_var(cpu_hw_events);
749
750 perf_disable();
751 intel_pmu_drain_bts_buffer();
752 status = intel_pmu_get_status();
753 if (!status) {
754 perf_enable();
755 return 0;
756 }
757
758 loops = 0;
759again:
760 if (++loops > 100) {
761 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
762 perf_event_print_debug();
763 intel_pmu_reset();
764 perf_enable();
765 return 1;
766 }
767
768 inc_irq_stat(apic_perf_irqs);
769 ack = status;
770 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
771 struct perf_event *event = cpuc->events[bit];
772
773 clear_bit(bit, (unsigned long *) &status);
774 if (!test_bit(bit, cpuc->active_mask))
775 continue;
776
777 if (!intel_pmu_save_and_restart(event))
778 continue;
779
780 data.period = event->hw.last_period;
781
782 if (perf_event_overflow(event, 1, &data, regs))
783 intel_pmu_disable_event(&event->hw, bit);
784 }
785
786 intel_pmu_ack_status(ack);
787
788 /*
789 * Repeat if there is more work to be done:
790 */
791 status = intel_pmu_get_status();
792 if (status)
793 goto again;
794
795 perf_enable();
796
797 return 1;
798}
799
800static struct event_constraint bts_constraint =
801 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
802
803static struct event_constraint *
804intel_special_constraints(struct perf_event *event)
805{
806 unsigned int hw_event;
807
808 hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
809
810 if (unlikely((hw_event ==
811 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
812 (event->hw.sample_period == 1))) {
813
814 return &bts_constraint;
815 }
816 return NULL;
817}
818
819static struct event_constraint *
820intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
821{
822 struct event_constraint *c;
823
824 c = intel_special_constraints(event);
825 if (c)
826 return c;
827
828 return x86_get_event_constraints(cpuc, event);
829}
830
831static __initconst struct x86_pmu core_pmu = {
832 .name = "core",
833 .handle_irq = x86_pmu_handle_irq,
834 .disable_all = x86_pmu_disable_all,
835 .enable_all = x86_pmu_enable_all,
836 .enable = x86_pmu_enable_event,
837 .disable = x86_pmu_disable_event,
838 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
839 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
840 .event_map = intel_pmu_event_map,
841 .raw_event = intel_pmu_raw_event,
842 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
843 .apic = 1,
844 /*
845 * Intel PMCs cannot be accessed sanely above 32 bit width,
846 * so we install an artificial 1<<31 period regardless of
847 * the generic event period:
848 */
849 .max_period = (1ULL << 31) - 1,
850 .get_event_constraints = intel_get_event_constraints,
851 .event_constraints = intel_core_event_constraints,
852};
853
854static __initconst struct x86_pmu intel_pmu = {
855 .name = "Intel",
856 .handle_irq = intel_pmu_handle_irq,
857 .disable_all = intel_pmu_disable_all,
858 .enable_all = intel_pmu_enable_all,
859 .enable = intel_pmu_enable_event,
860 .disable = intel_pmu_disable_event,
861 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
862 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
863 .event_map = intel_pmu_event_map,
864 .raw_event = intel_pmu_raw_event,
865 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
866 .apic = 1,
867 /*
868 * Intel PMCs cannot be accessed sanely above 32 bit width,
869 * so we install an artificial 1<<31 period regardless of
870 * the generic event period:
871 */
872 .max_period = (1ULL << 31) - 1,
873 .enable_bts = intel_pmu_enable_bts,
874 .disable_bts = intel_pmu_disable_bts,
875 .get_event_constraints = intel_get_event_constraints
876};
877
878static __init int intel_pmu_init(void)
879{
880 union cpuid10_edx edx;
881 union cpuid10_eax eax;
882 unsigned int unused;
883 unsigned int ebx;
884 int version;
885
886 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
887 /* check for P6 processor family */
888 if (boot_cpu_data.x86 == 6) {
889 return p6_pmu_init();
890 } else {
891 return -ENODEV;
892 }
893 }
894
895 /*
896 * Check whether the Architectural PerfMon supports
897 * Branch Misses Retired hw_event or not.
898 */
899 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
900 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
901 return -ENODEV;
902
903 version = eax.split.version_id;
904 if (version < 2)
905 x86_pmu = core_pmu;
906 else
907 x86_pmu = intel_pmu;
908
909 x86_pmu.version = version;
910 x86_pmu.num_events = eax.split.num_events;
911 x86_pmu.event_bits = eax.split.bit_width;
912 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
913
914 /*
915 * Quirk: v2 perfmon does not report fixed-purpose events, so
916 * assume at least 3 events:
917 */
918 if (version > 1)
919 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
920
921 /*
922 * Install the hw-cache-events table:
923 */
924 switch (boot_cpu_data.x86_model) {
925 case 14: /* 65 nm core solo/duo, "Yonah" */
926 pr_cont("Core events, ");
927 break;
928
929 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
930 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
931 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
932 case 29: /* six-core 45 nm xeon "Dunnington" */
933 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
934 sizeof(hw_cache_event_ids));
935
936 x86_pmu.event_constraints = intel_core2_event_constraints;
937 pr_cont("Core2 events, ");
938 break;
939
940 case 26: /* 45 nm nehalem, "Bloomfield" */
941 case 30: /* 45 nm nehalem, "Lynnfield" */
942 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
943 sizeof(hw_cache_event_ids));
944
945 x86_pmu.event_constraints = intel_nehalem_event_constraints;
946 pr_cont("Nehalem/Corei7 events, ");
947 break;
948 case 28: /* Atom */
949 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
950 sizeof(hw_cache_event_ids));
951
952 x86_pmu.event_constraints = intel_gen_event_constraints;
953 pr_cont("Atom events, ");
954 break;
955
956 case 37: /* 32 nm nehalem, "Clarkdale" */
957 case 44: /* 32 nm nehalem, "Gulftown" */
958 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
959 sizeof(hw_cache_event_ids));
960
961 x86_pmu.event_constraints = intel_westmere_event_constraints;
962 pr_cont("Westmere events, ");
963 break;
964
965 default:
966 /*
967 * default constraints for v2 and up
968 */
969 x86_pmu.event_constraints = intel_gen_event_constraints;
970 pr_cont("generic architected perfmon, ");
971 }
972 return 0;
973}
974
975#else /* CONFIG_CPU_SUP_INTEL */
976
977static int intel_pmu_init(void)
978{
979 return 0;
980}
981
982#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
new file mode 100644
index 000000000000..a4e67b99d91c
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -0,0 +1,157 @@
1#ifdef CONFIG_CPU_SUP_INTEL
2
3/*
4 * Not sure about some of these
5 */
6static const u64 p6_perfmon_event_map[] =
7{
8 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
9 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
10 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
11 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
12 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
13 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
14 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
15};
16
17static u64 p6_pmu_event_map(int hw_event)
18{
19 return p6_perfmon_event_map[hw_event];
20}
21
22/*
23 * Event setting that is specified not to count anything.
24 * We use this to effectively disable a counter.
25 *
26 * L2_RQSTS with 0 MESI unit mask.
27 */
28#define P6_NOP_EVENT 0x0000002EULL
29
30static u64 p6_pmu_raw_event(u64 hw_event)
31{
32#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
33#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
34#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
35#define P6_EVNTSEL_INV_MASK 0x00800000ULL
36#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
37
38#define P6_EVNTSEL_MASK \
39 (P6_EVNTSEL_EVENT_MASK | \
40 P6_EVNTSEL_UNIT_MASK | \
41 P6_EVNTSEL_EDGE_MASK | \
42 P6_EVNTSEL_INV_MASK | \
43 P6_EVNTSEL_REG_MASK)
44
45 return hw_event & P6_EVNTSEL_MASK;
46}
47
48static struct event_constraint p6_event_constraints[] =
49{
50 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
51 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
52 INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
53 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
54 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
55 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
56 EVENT_CONSTRAINT_END
57};
58
59static void p6_pmu_disable_all(void)
60{
61 u64 val;
62
63 /* p6 only has one enable register */
64 rdmsrl(MSR_P6_EVNTSEL0, val);
65 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
66 wrmsrl(MSR_P6_EVNTSEL0, val);
67}
68
69static void p6_pmu_enable_all(void)
70{
71 unsigned long val;
72
73 /* p6 only has one enable register */
74 rdmsrl(MSR_P6_EVNTSEL0, val);
75 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
76 wrmsrl(MSR_P6_EVNTSEL0, val);
77}
78
79static inline void
80p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
81{
82 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
83 u64 val = P6_NOP_EVENT;
84
85 if (cpuc->enabled)
86 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
87
88 (void)checking_wrmsrl(hwc->config_base + idx, val);
89}
90
91static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
92{
93 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
94 u64 val;
95
96 val = hwc->config;
97 if (cpuc->enabled)
98 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
99
100 (void)checking_wrmsrl(hwc->config_base + idx, val);
101}
102
103static __initconst struct x86_pmu p6_pmu = {
104 .name = "p6",
105 .handle_irq = x86_pmu_handle_irq,
106 .disable_all = p6_pmu_disable_all,
107 .enable_all = p6_pmu_enable_all,
108 .enable = p6_pmu_enable_event,
109 .disable = p6_pmu_disable_event,
110 .eventsel = MSR_P6_EVNTSEL0,
111 .perfctr = MSR_P6_PERFCTR0,
112 .event_map = p6_pmu_event_map,
113 .raw_event = p6_pmu_raw_event,
114 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
115 .apic = 1,
116 .max_period = (1ULL << 31) - 1,
117 .version = 0,
118 .num_events = 2,
119 /*
120 * Events have 40 bits implemented. However they are designed such
121 * that bits [32-39] are sign extensions of bit 31. As such the
122 * effective width of a event for P6-like PMU is 32 bits only.
123 *
124 * See IA-32 Intel Architecture Software developer manual Vol 3B
125 */
126 .event_bits = 32,
127 .event_mask = (1ULL << 32) - 1,
128 .get_event_constraints = x86_get_event_constraints,
129 .event_constraints = p6_event_constraints,
130};
131
132static __init int p6_pmu_init(void)
133{
134 switch (boot_cpu_data.x86_model) {
135 case 1:
136 case 3: /* Pentium Pro */
137 case 5:
138 case 6: /* Pentium II */
139 case 7:
140 case 8:
141 case 11: /* Pentium III */
142 case 9:
143 case 13:
144 /* Pentium M */
145 break;
146 default:
147 pr_cont("unsupported p6 CPU model %d ",
148 boot_cpu_data.x86_model);
149 return -ENODEV;
150 }
151
152 x86_pmu = p6_pmu;
153
154 return 0;
155}
156
157#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 898df9719afb..fb329e9f8494 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -115,17 +115,6 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
115 115
116 return !test_bit(counter, perfctr_nmi_owner); 116 return !test_bit(counter, perfctr_nmi_owner);
117} 117}
118
119/* checks the an msr for availability */
120int avail_to_resrv_perfctr_nmi(unsigned int msr)
121{
122 unsigned int counter;
123
124 counter = nmi_perfctr_msr_to_bit(msr);
125 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
126
127 return !test_bit(counter, perfctr_nmi_owner);
128}
129EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); 118EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
130 119
131int reserve_perfctr_nmi(unsigned int msr) 120int reserve_perfctr_nmi(unsigned int msr)
@@ -691,7 +680,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
691 cpu_nmi_set_wd_enabled(); 680 cpu_nmi_set_wd_enabled();
692 681
693 apic_write(APIC_LVTPC, APIC_DM_NMI); 682 apic_write(APIC_LVTPC, APIC_DM_NMI);
694 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; 683 evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE;
695 wrmsr(evntsel_msr, evntsel, 0); 684 wrmsr(evntsel_msr, evntsel, 0);
696 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 685 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
697 return 1; 686 return 1;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index ae775ca47b25..11540a189d93 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -18,11 +18,6 @@
18 18
19#include "dumpstack.h" 19#include "dumpstack.h"
20 20
21/* Just a stub for now */
22int x86_is_stack_id(int id, char *name)
23{
24 return 0;
25}
26 21
27void dump_trace(struct task_struct *task, struct pt_regs *regs, 22void dump_trace(struct task_struct *task, struct pt_regs *regs,
28 unsigned long *stack, unsigned long bp, 23 unsigned long *stack, unsigned long bp,
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 0ad9597073f5..676bc051252e 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -33,11 +33,6 @@ static char x86_stack_ids[][8] = {
33#endif 33#endif
34}; 34};
35 35
36int x86_is_stack_id(int id, char *name)
37{
38 return x86_stack_ids[id - 1] == name;
39}
40
41static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, 36static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
42 unsigned *usedp, char **idp) 37 unsigned *usedp, char **idp)
43{ 38{
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 1e8ceadc0d6a..d6cc065f519f 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -344,13 +344,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
344 } 344 }
345 345
346 /* 346 /*
347 * For kernel-addresses, either the address or symbol name can be
348 * specified.
349 */
350 if (info->name)
351 info->address = (unsigned long)
352 kallsyms_lookup_name(info->name);
353 /*
354 * Check that the low-order bits of the address are appropriate 347 * Check that the low-order bits of the address are appropriate
355 * for the alignment implied by len. 348 * for the alignment implied by len.
356 */ 349 */
@@ -486,8 +479,6 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
486 rcu_read_lock(); 479 rcu_read_lock();
487 480
488 bp = per_cpu(bp_per_reg[i], cpu); 481 bp = per_cpu(bp_per_reg[i], cpu);
489 if (bp)
490 rc = NOTIFY_DONE;
491 /* 482 /*
492 * Reset the 'i'th TRAP bit in dr6 to denote completion of 483 * Reset the 'i'th TRAP bit in dr6 to denote completion of
493 * exception handling 484 * exception handling
@@ -506,7 +497,13 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
506 497
507 rcu_read_unlock(); 498 rcu_read_unlock();
508 } 499 }
509 if (dr6 & (~DR_TRAP_BITS)) 500 /*
501 * Further processing in do_debug() is needed for a) user-space
502 * breakpoints (to generate signals) and b) when the system has
503 * taken exception due to multiple causes
504 */
505 if ((current->thread.debugreg6 & DR_TRAP_BITS) ||
506 (dr6 & (~DR_TRAP_BITS)))
510 rc = NOTIFY_DONE; 507 rc = NOTIFY_DONE;
511 508
512 set_debugreg(dr7, 7); 509 set_debugreg(dr7, 7);
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 5b8c7505b3bc..5de9f4a9c3fd 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -337,6 +337,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
337 337
338int __kprobes arch_prepare_kprobe(struct kprobe *p) 338int __kprobes arch_prepare_kprobe(struct kprobe *p)
339{ 339{
340 if (alternatives_text_reserved(p->addr, p->addr))
341 return -EINVAL;
342
340 if (!can_probe((unsigned long)p->addr)) 343 if (!can_probe((unsigned long)p->addr))
341 return -EILSEQ; 344 return -EILSEQ;
342 /* insn: must be on special executable page on x86. */ 345 /* insn: must be on special executable page on x86. */
@@ -429,7 +432,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
429static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, 432static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
430 struct kprobe_ctlblk *kcb) 433 struct kprobe_ctlblk *kcb)
431{ 434{
432#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) 435#if !defined(CONFIG_PREEMPT)
433 if (p->ainsn.boostable == 1 && !p->post_handler) { 436 if (p->ainsn.boostable == 1 && !p->post_handler) {
434 /* Boost up -- we can execute copied instructions directly */ 437 /* Boost up -- we can execute copied instructions directly */
435 reset_current_kprobe(); 438 reset_current_kprobe();
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 40b54ceb68b5..a2c1edd2d3ac 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
359 x86_init.mpparse.mpc_record(1); 359 x86_init.mpparse.mpc_record(1);
360 } 360 }
361 361
362#ifdef CONFIG_X86_BIGSMP
363 generic_bigsmp_probe();
364#endif
365
366 if (apic->setup_apic_routing)
367 apic->setup_apic_routing();
368
369 if (!num_processors) 362 if (!num_processors)
370 printk(KERN_ERR "MPTABLE: no processors registered!\n"); 363 printk(KERN_ERR "MPTABLE: no processors registered!\n");
371 return num_processors; 364 return num_processors;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 41a26a82470a..126f0b493d04 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -527,6 +527,7 @@ void set_personality_ia32(void)
527 527
528 /* Make sure to be in 32bit mode */ 528 /* Make sure to be in 32bit mode */
529 set_thread_flag(TIF_IA32); 529 set_thread_flag(TIF_IA32);
530 current->personality |= force_personality32;
530 531
531 /* Prepare the first "return" to user space */ 532 /* Prepare the first "return" to user space */
532 current_thread_info()->status |= TS_COMPAT; 533 current_thread_info()->status |= TS_COMPAT;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 0c1033d61e59..d03146f71b2f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -140,30 +140,6 @@ static const int arg_offs_table[] = {
140#endif 140#endif
141}; 141};
142 142
143/**
144 * regs_get_argument_nth() - get Nth argument at function call
145 * @regs: pt_regs which contains registers at function entry.
146 * @n: argument number.
147 *
148 * regs_get_argument_nth() returns @n th argument of a function call.
149 * Since usually the kernel stack will be changed right after function entry,
150 * you must use this at function entry. If the @n th entry is NOT in the
151 * kernel stack or pt_regs, this returns 0.
152 */
153unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n)
154{
155 if (n < ARRAY_SIZE(arg_offs_table))
156 return *(unsigned long *)((char *)regs + arg_offs_table[n]);
157 else {
158 /*
159 * The typical case: arg n is on the stack.
160 * (Note: stack[0] = return address, so skip it)
161 */
162 n -= ARRAY_SIZE(arg_offs_table);
163 return regs_get_kernel_stack_nth(regs, 1 + n);
164 }
165}
166
167/* 143/*
168 * does not yet catch signals sent when the child dies. 144 * does not yet catch signals sent when the child dies.
169 * in exit.c or in signal.c. 145 * in exit.c or in signal.c.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 678d0b8c26f3..b4e870cbdc60 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1083 set_cpu_sibling_map(0); 1083 set_cpu_sibling_map(0);
1084 1084
1085 enable_IR_x2apic(); 1085 enable_IR_x2apic();
1086#ifdef CONFIG_X86_64
1087 default_setup_apic_routing(); 1086 default_setup_apic_routing();
1088#endif
1089 1087
1090 if (smp_sanity_check(max_cpus) < 0) { 1088 if (smp_sanity_check(max_cpus) < 0) {
1091 printk(KERN_INFO "SMP disabled\n"); 1089 printk(KERN_INFO "SMP disabled\n");
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 33399176512a..1168e4454188 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -534,6 +534,9 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
534 534
535 get_debugreg(dr6, 6); 535 get_debugreg(dr6, 6);
536 536
537 /* Filter out all the reserved bits which are preset to 1 */
538 dr6 &= ~DR6_RESERVED;
539
537 /* Catch kmemcheck conditions first of all! */ 540 /* Catch kmemcheck conditions first of all! */
538 if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) 541 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
539 return; 542 return;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 296aba49472a..15578f180e59 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
467 return -EOPNOTSUPP; 467 return -EOPNOTSUPP;
468 468
469 addr &= KVM_PIT_CHANNEL_MASK; 469 addr &= KVM_PIT_CHANNEL_MASK;
470 if (addr == 3)
471 return 0;
472
470 s = &pit_state->channels[addr]; 473 s = &pit_state->channels[addr];
471 474
472 mutex_lock(&pit_state->lock); 475 mutex_lock(&pit_state->lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1ddcad452add..a1e1bc9d412d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
670{ 670{
671 static int version; 671 static int version;
672 struct pvclock_wall_clock wc; 672 struct pvclock_wall_clock wc;
673 struct timespec now, sys, boot; 673 struct timespec boot;
674 674
675 if (!wall_clock) 675 if (!wall_clock)
676 return; 676 return;
@@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
685 * wall clock specified here. guest system time equals host 685 * wall clock specified here. guest system time equals host
686 * system time for us, thus we must fill in host boot time here. 686 * system time for us, thus we must fill in host boot time here.
687 */ 687 */
688 now = current_kernel_time(); 688 getboottime(&boot);
689 ktime_get_ts(&sys);
690 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
691 689
692 wc.sec = boot.tv_sec; 690 wc.sec = boot.tv_sec;
693 wc.nsec = boot.tv_nsec; 691 wc.nsec = boot.tv_nsec;
@@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
762 local_irq_save(flags); 760 local_irq_save(flags);
763 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); 761 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
764 ktime_get_ts(&ts); 762 ktime_get_ts(&ts);
763 monotonic_to_bootbased(&ts);
765 local_irq_restore(flags); 764 local_irq_restore(flags);
766 765
767 /* With all the info we got, fill in the values */ 766 /* With all the info we got, fill in the values */
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 71da1bca13cb..738e6593799d 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)
18#else 18#else
19 /* 19 /*
20 * With get_user_pages_fast, we walk down the pagetables without taking 20 * With get_user_pages_fast, we walk down the pagetables without taking
21 * any locks. For this we would like to load the pointers atoimcally, 21 * any locks. For this we would like to load the pointers atomically,
22 * but that is not possible (without expensive cmpxchg8b) on PAE. What 22 * but that is not possible (without expensive cmpxchg8b) on PAE. What
23 * we do have is the guarantee that a pte will only either go from not 23 * we do have is the guarantee that a pte will only either go from not
24 * present to present, or present to not present or both -- it will not 24 * present to present, or present to not present or both -- it will not
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 3347f696edc7..2c505ee71014 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -159,7 +159,7 @@ static int nmi_setup_mux(void)
159 159
160 for_each_possible_cpu(i) { 160 for_each_possible_cpu(i) {
161 per_cpu(cpu_msrs, i).multiplex = 161 per_cpu(cpu_msrs, i).multiplex =
162 kmalloc(multiplex_size, GFP_KERNEL); 162 kzalloc(multiplex_size, GFP_KERNEL);
163 if (!per_cpu(cpu_msrs, i).multiplex) 163 if (!per_cpu(cpu_msrs, i).multiplex)
164 return 0; 164 return 0;
165 } 165 }
@@ -179,7 +179,6 @@ static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
179 if (counter_config[i].enabled) { 179 if (counter_config[i].enabled) {
180 multiplex[i].saved = -(u64)counter_config[i].count; 180 multiplex[i].saved = -(u64)counter_config[i].count;
181 } else { 181 } else {
182 multiplex[i].addr = 0;
183 multiplex[i].saved = 0; 182 multiplex[i].saved = 0;
184 } 183 }
185 } 184 }
@@ -189,25 +188,27 @@ static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
189 188
190static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) 189static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
191{ 190{
191 struct op_msr *counters = msrs->counters;
192 struct op_msr *multiplex = msrs->multiplex; 192 struct op_msr *multiplex = msrs->multiplex;
193 int i; 193 int i;
194 194
195 for (i = 0; i < model->num_counters; ++i) { 195 for (i = 0; i < model->num_counters; ++i) {
196 int virt = op_x86_phys_to_virt(i); 196 int virt = op_x86_phys_to_virt(i);
197 if (multiplex[virt].addr) 197 if (counters[i].addr)
198 rdmsrl(multiplex[virt].addr, multiplex[virt].saved); 198 rdmsrl(counters[i].addr, multiplex[virt].saved);
199 } 199 }
200} 200}
201 201
202static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) 202static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
203{ 203{
204 struct op_msr *counters = msrs->counters;
204 struct op_msr *multiplex = msrs->multiplex; 205 struct op_msr *multiplex = msrs->multiplex;
205 int i; 206 int i;
206 207
207 for (i = 0; i < model->num_counters; ++i) { 208 for (i = 0; i < model->num_counters; ++i) {
208 int virt = op_x86_phys_to_virt(i); 209 int virt = op_x86_phys_to_virt(i);
209 if (multiplex[virt].addr) 210 if (counters[i].addr)
210 wrmsrl(multiplex[virt].addr, multiplex[virt].saved); 211 wrmsrl(counters[i].addr, multiplex[virt].saved);
211 } 212 }
212} 213}
213 214
@@ -303,11 +304,11 @@ static int allocate_msrs(void)
303 304
304 int i; 305 int i;
305 for_each_possible_cpu(i) { 306 for_each_possible_cpu(i) {
306 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, 307 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
307 GFP_KERNEL); 308 GFP_KERNEL);
308 if (!per_cpu(cpu_msrs, i).counters) 309 if (!per_cpu(cpu_msrs, i).counters)
309 return 0; 310 return 0;
310 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, 311 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
311 GFP_KERNEL); 312 GFP_KERNEL);
312 if (!per_cpu(cpu_msrs, i).controls) 313 if (!per_cpu(cpu_msrs, i).controls)
313 return 0; 314 return 0;
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 39686c29f03a..090cbbec7dbd 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -22,6 +22,9 @@
22#include <asm/ptrace.h> 22#include <asm/ptrace.h>
23#include <asm/msr.h> 23#include <asm/msr.h>
24#include <asm/nmi.h> 24#include <asm/nmi.h>
25#include <asm/apic.h>
26#include <asm/processor.h>
27#include <asm/cpufeature.h>
25 28
26#include "op_x86_model.h" 29#include "op_x86_model.h"
27#include "op_counter.h" 30#include "op_counter.h"
@@ -43,23 +46,10 @@
43 46
44static unsigned long reset_value[NUM_VIRT_COUNTERS]; 47static unsigned long reset_value[NUM_VIRT_COUNTERS];
45 48
46#ifdef CONFIG_OPROFILE_IBS
47
48/* IbsFetchCtl bits/masks */
49#define IBS_FETCH_RAND_EN (1ULL<<57)
50#define IBS_FETCH_VAL (1ULL<<49)
51#define IBS_FETCH_ENABLE (1ULL<<48)
52#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
53
54/*IbsOpCtl bits */
55#define IBS_OP_CNT_CTL (1ULL<<19)
56#define IBS_OP_VAL (1ULL<<18)
57#define IBS_OP_ENABLE (1ULL<<17)
58
59#define IBS_FETCH_SIZE 6 49#define IBS_FETCH_SIZE 6
60#define IBS_OP_SIZE 12 50#define IBS_OP_SIZE 12
61 51
62static int has_ibs; /* AMD Family10h and later */ 52static u32 ibs_caps;
63 53
64struct op_ibs_config { 54struct op_ibs_config {
65 unsigned long op_enabled; 55 unsigned long op_enabled;
@@ -71,24 +61,52 @@ struct op_ibs_config {
71}; 61};
72 62
73static struct op_ibs_config ibs_config; 63static struct op_ibs_config ibs_config;
64static u64 ibs_op_ctl;
74 65
75#endif 66/*
67 * IBS cpuid feature detection
68 */
76 69
77#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 70#define IBS_CPUID_FEATURES 0x8000001b
71
72/*
73 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
74 * bit 0 is used to indicate the existence of IBS.
75 */
76#define IBS_CAPS_AVAIL (1LL<<0)
77#define IBS_CAPS_RDWROPCNT (1LL<<3)
78#define IBS_CAPS_OPCNT (1LL<<4)
78 79
79static void op_mux_fill_in_addresses(struct op_msrs * const msrs) 80/*
81 * IBS randomization macros
82 */
83#define IBS_RANDOM_BITS 12
84#define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
85#define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
86
87static u32 get_ibs_caps(void)
80{ 88{
81 int i; 89 u32 ibs_caps;
90 unsigned int max_level;
82 91
83 for (i = 0; i < NUM_VIRT_COUNTERS; i++) { 92 if (!boot_cpu_has(X86_FEATURE_IBS))
84 int hw_counter = op_x86_virt_to_phys(i); 93 return 0;
85 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) 94
86 msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter; 95 /* check IBS cpuid feature flags */
87 else 96 max_level = cpuid_eax(0x80000000);
88 msrs->multiplex[i].addr = 0; 97 if (max_level < IBS_CPUID_FEATURES)
89 } 98 return IBS_CAPS_AVAIL;
99
100 ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
101 if (!(ibs_caps & IBS_CAPS_AVAIL))
102 /* cpuid flags not valid */
103 return IBS_CAPS_AVAIL;
104
105 return ibs_caps;
90} 106}
91 107
108#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
109
92static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, 110static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
93 struct op_msrs const * const msrs) 111 struct op_msrs const * const msrs)
94{ 112{
@@ -98,7 +116,7 @@ static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
98 /* enable active counters */ 116 /* enable active counters */
99 for (i = 0; i < NUM_COUNTERS; ++i) { 117 for (i = 0; i < NUM_COUNTERS; ++i) {
100 int virt = op_x86_phys_to_virt(i); 118 int virt = op_x86_phys_to_virt(i);
101 if (!counter_config[virt].enabled) 119 if (!reset_value[virt])
102 continue; 120 continue;
103 rdmsrl(msrs->controls[i].addr, val); 121 rdmsrl(msrs->controls[i].addr, val);
104 val &= model->reserved; 122 val &= model->reserved;
@@ -107,10 +125,6 @@ static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
107 } 125 }
108} 126}
109 127
110#else
111
112static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { }
113
114#endif 128#endif
115 129
116/* functions for op_amd_spec */ 130/* functions for op_amd_spec */
@@ -122,18 +136,12 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
122 for (i = 0; i < NUM_COUNTERS; i++) { 136 for (i = 0; i < NUM_COUNTERS; i++) {
123 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) 137 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
124 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; 138 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
125 else
126 msrs->counters[i].addr = 0;
127 } 139 }
128 140
129 for (i = 0; i < NUM_CONTROLS; i++) { 141 for (i = 0; i < NUM_CONTROLS; i++) {
130 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) 142 if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
131 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; 143 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
132 else
133 msrs->controls[i].addr = 0;
134 } 144 }
135
136 op_mux_fill_in_addresses(msrs);
137} 145}
138 146
139static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, 147static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
@@ -144,7 +152,8 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
144 152
145 /* setup reset_value */ 153 /* setup reset_value */
146 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) { 154 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
147 if (counter_config[i].enabled) 155 if (counter_config[i].enabled
156 && msrs->counters[op_x86_virt_to_phys(i)].addr)
148 reset_value[i] = counter_config[i].count; 157 reset_value[i] = counter_config[i].count;
149 else 158 else
150 reset_value[i] = 0; 159 reset_value[i] = 0;
@@ -152,9 +161,18 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
152 161
153 /* clear all counters */ 162 /* clear all counters */
154 for (i = 0; i < NUM_CONTROLS; ++i) { 163 for (i = 0; i < NUM_CONTROLS; ++i) {
155 if (unlikely(!msrs->controls[i].addr)) 164 if (unlikely(!msrs->controls[i].addr)) {
165 if (counter_config[i].enabled && !smp_processor_id())
166 /*
167 * counter is reserved, this is on all
168 * cpus, so report only for cpu #0
169 */
170 op_x86_warn_reserved(i);
156 continue; 171 continue;
172 }
157 rdmsrl(msrs->controls[i].addr, val); 173 rdmsrl(msrs->controls[i].addr, val);
174 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
175 op_x86_warn_in_use(i);
158 val &= model->reserved; 176 val &= model->reserved;
159 wrmsrl(msrs->controls[i].addr, val); 177 wrmsrl(msrs->controls[i].addr, val);
160 } 178 }
@@ -169,9 +187,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
169 /* enable active counters */ 187 /* enable active counters */
170 for (i = 0; i < NUM_COUNTERS; ++i) { 188 for (i = 0; i < NUM_COUNTERS; ++i) {
171 int virt = op_x86_phys_to_virt(i); 189 int virt = op_x86_phys_to_virt(i);
172 if (!counter_config[virt].enabled) 190 if (!reset_value[virt])
173 continue;
174 if (!msrs->counters[i].addr)
175 continue; 191 continue;
176 192
177 /* setup counter registers */ 193 /* setup counter registers */
@@ -185,7 +201,60 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
185 } 201 }
186} 202}
187 203
188#ifdef CONFIG_OPROFILE_IBS 204/*
205 * 16-bit Linear Feedback Shift Register (LFSR)
206 *
207 * 16 14 13 11
208 * Feedback polynomial = X + X + X + X + 1
209 */
210static unsigned int lfsr_random(void)
211{
212 static unsigned int lfsr_value = 0xF00D;
213 unsigned int bit;
214
215 /* Compute next bit to shift in */
216 bit = ((lfsr_value >> 0) ^
217 (lfsr_value >> 2) ^
218 (lfsr_value >> 3) ^
219 (lfsr_value >> 5)) & 0x0001;
220
221 /* Advance to next register value */
222 lfsr_value = (lfsr_value >> 1) | (bit << 15);
223
224 return lfsr_value;
225}
226
227/*
228 * IBS software randomization
229 *
230 * The IBS periodic op counter is randomized in software. The lower 12
231 * bits of the 20 bit counter are randomized. IbsOpCurCnt is
232 * initialized with a 12 bit random value.
233 */
234static inline u64 op_amd_randomize_ibs_op(u64 val)
235{
236 unsigned int random = lfsr_random();
237
238 if (!(ibs_caps & IBS_CAPS_RDWROPCNT))
239 /*
240 * Work around if the hw can not write to IbsOpCurCnt
241 *
242 * Randomize the lower 8 bits of the 16 bit
243 * IbsOpMaxCnt [15:0] value in the range of -128 to
244 * +127 by adding/subtracting an offset to the
245 * maximum count (IbsOpMaxCnt).
246 *
247 * To avoid over or underflows and protect upper bits
248 * starting at bit 16, the initial value for
249 * IbsOpMaxCnt must fit in the range from 0x0081 to
250 * 0xff80.
251 */
252 val += (s8)(random >> 4);
253 else
254 val |= (u64)(random & IBS_RANDOM_MASK) << 32;
255
256 return val;
257}
189 258
190static inline void 259static inline void
191op_amd_handle_ibs(struct pt_regs * const regs, 260op_amd_handle_ibs(struct pt_regs * const regs,
@@ -194,7 +263,7 @@ op_amd_handle_ibs(struct pt_regs * const regs,
194 u64 val, ctl; 263 u64 val, ctl;
195 struct op_entry entry; 264 struct op_entry entry;
196 265
197 if (!has_ibs) 266 if (!ibs_caps)
198 return; 267 return;
199 268
200 if (ibs_config.fetch_enabled) { 269 if (ibs_config.fetch_enabled) {
@@ -210,7 +279,7 @@ op_amd_handle_ibs(struct pt_regs * const regs,
210 oprofile_write_commit(&entry); 279 oprofile_write_commit(&entry);
211 280
212 /* reenable the IRQ */ 281 /* reenable the IRQ */
213 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK); 282 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT);
214 ctl |= IBS_FETCH_ENABLE; 283 ctl |= IBS_FETCH_ENABLE;
215 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); 284 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
216 } 285 }
@@ -236,8 +305,7 @@ op_amd_handle_ibs(struct pt_regs * const regs,
236 oprofile_write_commit(&entry); 305 oprofile_write_commit(&entry);
237 306
238 /* reenable the IRQ */ 307 /* reenable the IRQ */
239 ctl &= ~IBS_OP_VAL & 0xFFFFFFFF; 308 ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
240 ctl |= IBS_OP_ENABLE;
241 wrmsrl(MSR_AMD64_IBSOPCTL, ctl); 309 wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
242 } 310 }
243 } 311 }
@@ -246,41 +314,57 @@ op_amd_handle_ibs(struct pt_regs * const regs,
246static inline void op_amd_start_ibs(void) 314static inline void op_amd_start_ibs(void)
247{ 315{
248 u64 val; 316 u64 val;
249 if (has_ibs && ibs_config.fetch_enabled) { 317
250 val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; 318 if (!ibs_caps)
319 return;
320
321 if (ibs_config.fetch_enabled) {
322 val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
251 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; 323 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
252 val |= IBS_FETCH_ENABLE; 324 val |= IBS_FETCH_ENABLE;
253 wrmsrl(MSR_AMD64_IBSFETCHCTL, val); 325 wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
254 } 326 }
255 327
256 if (has_ibs && ibs_config.op_enabled) { 328 if (ibs_config.op_enabled) {
257 val = (ibs_config.max_cnt_op >> 4) & 0xFFFF; 329 ibs_op_ctl = ibs_config.max_cnt_op >> 4;
258 val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; 330 if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
259 val |= IBS_OP_ENABLE; 331 /*
332 * IbsOpCurCnt not supported. See
333 * op_amd_randomize_ibs_op() for details.
334 */
335 ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
336 } else {
337 /*
338 * The start value is randomized with a
339 * positive offset, we need to compensate it
340 * with the half of the randomized range. Also
341 * avoid underflows.
342 */
343 ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
344 IBS_OP_MAX_CNT);
345 }
346 if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
347 ibs_op_ctl |= IBS_OP_CNT_CTL;
348 ibs_op_ctl |= IBS_OP_ENABLE;
349 val = op_amd_randomize_ibs_op(ibs_op_ctl);
260 wrmsrl(MSR_AMD64_IBSOPCTL, val); 350 wrmsrl(MSR_AMD64_IBSOPCTL, val);
261 } 351 }
262} 352}
263 353
264static void op_amd_stop_ibs(void) 354static void op_amd_stop_ibs(void)
265{ 355{
266 if (has_ibs && ibs_config.fetch_enabled) 356 if (!ibs_caps)
357 return;
358
359 if (ibs_config.fetch_enabled)
267 /* clear max count and enable */ 360 /* clear max count and enable */
268 wrmsrl(MSR_AMD64_IBSFETCHCTL, 0); 361 wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
269 362
270 if (has_ibs && ibs_config.op_enabled) 363 if (ibs_config.op_enabled)
271 /* clear max count and enable */ 364 /* clear max count and enable */
272 wrmsrl(MSR_AMD64_IBSOPCTL, 0); 365 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
273} 366}
274 367
275#else
276
277static inline void op_amd_handle_ibs(struct pt_regs * const regs,
278 struct op_msrs const * const msrs) { }
279static inline void op_amd_start_ibs(void) { }
280static inline void op_amd_stop_ibs(void) { }
281
282#endif
283
284static int op_amd_check_ctrs(struct pt_regs * const regs, 368static int op_amd_check_ctrs(struct pt_regs * const regs,
285 struct op_msrs const * const msrs) 369 struct op_msrs const * const msrs)
286{ 370{
@@ -314,7 +398,7 @@ static void op_amd_start(struct op_msrs const * const msrs)
314 if (!reset_value[op_x86_phys_to_virt(i)]) 398 if (!reset_value[op_x86_phys_to_virt(i)])
315 continue; 399 continue;
316 rdmsrl(msrs->controls[i].addr, val); 400 rdmsrl(msrs->controls[i].addr, val);
317 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 401 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
318 wrmsrl(msrs->controls[i].addr, val); 402 wrmsrl(msrs->controls[i].addr, val);
319 } 403 }
320 404
@@ -334,7 +418,7 @@ static void op_amd_stop(struct op_msrs const * const msrs)
334 if (!reset_value[op_x86_phys_to_virt(i)]) 418 if (!reset_value[op_x86_phys_to_virt(i)])
335 continue; 419 continue;
336 rdmsrl(msrs->controls[i].addr, val); 420 rdmsrl(msrs->controls[i].addr, val);
337 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 421 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
338 wrmsrl(msrs->controls[i].addr, val); 422 wrmsrl(msrs->controls[i].addr, val);
339 } 423 }
340 424
@@ -355,8 +439,6 @@ static void op_amd_shutdown(struct op_msrs const * const msrs)
355 } 439 }
356} 440}
357 441
358#ifdef CONFIG_OPROFILE_IBS
359
360static u8 ibs_eilvt_off; 442static u8 ibs_eilvt_off;
361 443
362static inline void apic_init_ibs_nmi_per_cpu(void *arg) 444static inline void apic_init_ibs_nmi_per_cpu(void *arg)
@@ -405,45 +487,36 @@ static int init_ibs_nmi(void)
405 return 1; 487 return 1;
406 } 488 }
407 489
408#ifdef CONFIG_NUMA
409 /* Sanity check */
410 /* Works only for 64bit with proper numa implementation. */
411 if (nodes != num_possible_nodes()) {
412 printk(KERN_DEBUG "Failed to setup CPU node(s) for IBS, "
413 "found: %d, expected %d",
414 nodes, num_possible_nodes());
415 return 1;
416 }
417#endif
418 return 0; 490 return 0;
419} 491}
420 492
421/* uninitialize the APIC for the IBS interrupts if needed */ 493/* uninitialize the APIC for the IBS interrupts if needed */
422static void clear_ibs_nmi(void) 494static void clear_ibs_nmi(void)
423{ 495{
424 if (has_ibs) 496 if (ibs_caps)
425 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); 497 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
426} 498}
427 499
428/* initialize the APIC for the IBS interrupts if available */ 500/* initialize the APIC for the IBS interrupts if available */
429static void ibs_init(void) 501static void ibs_init(void)
430{ 502{
431 has_ibs = boot_cpu_has(X86_FEATURE_IBS); 503 ibs_caps = get_ibs_caps();
432 504
433 if (!has_ibs) 505 if (!ibs_caps)
434 return; 506 return;
435 507
436 if (init_ibs_nmi()) { 508 if (init_ibs_nmi()) {
437 has_ibs = 0; 509 ibs_caps = 0;
438 return; 510 return;
439 } 511 }
440 512
441 printk(KERN_INFO "oprofile: AMD IBS detected\n"); 513 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n",
514 (unsigned)ibs_caps);
442} 515}
443 516
444static void ibs_exit(void) 517static void ibs_exit(void)
445{ 518{
446 if (!has_ibs) 519 if (!ibs_caps)
447 return; 520 return;
448 521
449 clear_ibs_nmi(); 522 clear_ibs_nmi();
@@ -463,7 +536,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
463 if (ret) 536 if (ret)
464 return ret; 537 return ret;
465 538
466 if (!has_ibs) 539 if (!ibs_caps)
467 return ret; 540 return ret;
468 541
469 /* model specific files */ 542 /* model specific files */
@@ -473,7 +546,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
473 ibs_config.fetch_enabled = 0; 546 ibs_config.fetch_enabled = 0;
474 ibs_config.max_cnt_op = 250000; 547 ibs_config.max_cnt_op = 250000;
475 ibs_config.op_enabled = 0; 548 ibs_config.op_enabled = 0;
476 ibs_config.dispatched_ops = 1; 549 ibs_config.dispatched_ops = 0;
477 550
478 dir = oprofilefs_mkdir(sb, root, "ibs_fetch"); 551 dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
479 oprofilefs_create_ulong(sb, dir, "enable", 552 oprofilefs_create_ulong(sb, dir, "enable",
@@ -488,8 +561,9 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
488 &ibs_config.op_enabled); 561 &ibs_config.op_enabled);
489 oprofilefs_create_ulong(sb, dir, "max_count", 562 oprofilefs_create_ulong(sb, dir, "max_count",
490 &ibs_config.max_cnt_op); 563 &ibs_config.max_cnt_op);
491 oprofilefs_create_ulong(sb, dir, "dispatched_ops", 564 if (ibs_caps & IBS_CAPS_OPCNT)
492 &ibs_config.dispatched_ops); 565 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
566 &ibs_config.dispatched_ops);
493 567
494 return 0; 568 return 0;
495} 569}
@@ -507,19 +581,6 @@ static void op_amd_exit(void)
507 ibs_exit(); 581 ibs_exit();
508} 582}
509 583
510#else
511
512/* no IBS support */
513
514static int op_amd_init(struct oprofile_operations *ops)
515{
516 return 0;
517}
518
519static void op_amd_exit(void) {}
520
521#endif /* CONFIG_OPROFILE_IBS */
522
523struct op_x86_model_spec op_amd_spec = { 584struct op_x86_model_spec op_amd_spec = {
524 .num_counters = NUM_COUNTERS, 585 .num_counters = NUM_COUNTERS,
525 .num_controls = NUM_CONTROLS, 586 .num_controls = NUM_CONTROLS,
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index ac6b354becdf..e6a160a4684a 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -394,12 +394,6 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
394 setup_num_counters(); 394 setup_num_counters();
395 stag = get_stagger(); 395 stag = get_stagger();
396 396
397 /* initialize some registers */
398 for (i = 0; i < num_counters; ++i)
399 msrs->counters[i].addr = 0;
400 for (i = 0; i < num_controls; ++i)
401 msrs->controls[i].addr = 0;
402
403 /* the counter & cccr registers we pay attention to */ 397 /* the counter & cccr registers we pay attention to */
404 for (i = 0; i < num_counters; ++i) { 398 for (i = 0; i < num_counters; ++i) {
405 addr = p4_counters[VIRT_CTR(stag, i)].counter_address; 399 addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 8eb05878554c..2bf90fafa7b5 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -37,15 +37,11 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
37 for (i = 0; i < num_counters; i++) { 37 for (i = 0; i < num_counters; i++) {
38 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) 38 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
39 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; 39 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
40 else
41 msrs->counters[i].addr = 0;
42 } 40 }
43 41
44 for (i = 0; i < num_counters; i++) { 42 for (i = 0; i < num_counters; i++) {
45 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) 43 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
46 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; 44 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
47 else
48 msrs->controls[i].addr = 0;
49 } 45 }
50} 46}
51 47
@@ -57,7 +53,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
57 int i; 53 int i;
58 54
59 if (!reset_value) { 55 if (!reset_value) {
60 reset_value = kmalloc(sizeof(reset_value[0]) * num_counters, 56 reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
61 GFP_ATOMIC); 57 GFP_ATOMIC);
62 if (!reset_value) 58 if (!reset_value)
63 return; 59 return;
@@ -82,9 +78,18 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
82 78
83 /* clear all counters */ 79 /* clear all counters */
84 for (i = 0; i < num_counters; ++i) { 80 for (i = 0; i < num_counters; ++i) {
85 if (unlikely(!msrs->controls[i].addr)) 81 if (unlikely(!msrs->controls[i].addr)) {
82 if (counter_config[i].enabled && !smp_processor_id())
83 /*
84 * counter is reserved, this is on all
85 * cpus, so report only for cpu #0
86 */
87 op_x86_warn_reserved(i);
86 continue; 88 continue;
89 }
87 rdmsrl(msrs->controls[i].addr, val); 90 rdmsrl(msrs->controls[i].addr, val);
91 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
92 op_x86_warn_in_use(i);
88 val &= model->reserved; 93 val &= model->reserved;
89 wrmsrl(msrs->controls[i].addr, val); 94 wrmsrl(msrs->controls[i].addr, val);
90 } 95 }
@@ -161,7 +166,7 @@ static void ppro_start(struct op_msrs const * const msrs)
161 for (i = 0; i < num_counters; ++i) { 166 for (i = 0; i < num_counters; ++i) {
162 if (reset_value[i]) { 167 if (reset_value[i]) {
163 rdmsrl(msrs->controls[i].addr, val); 168 rdmsrl(msrs->controls[i].addr, val);
164 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 169 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
165 wrmsrl(msrs->controls[i].addr, val); 170 wrmsrl(msrs->controls[i].addr, val);
166 } 171 }
167 } 172 }
@@ -179,7 +184,7 @@ static void ppro_stop(struct op_msrs const * const msrs)
179 if (!reset_value[i]) 184 if (!reset_value[i])
180 continue; 185 continue;
181 rdmsrl(msrs->controls[i].addr, val); 186 rdmsrl(msrs->controls[i].addr, val);
182 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 187 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
183 wrmsrl(msrs->controls[i].addr, val); 188 wrmsrl(msrs->controls[i].addr, val);
184 } 189 }
185} 190}
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 7b8e75d16081..ff82a755edd4 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -57,6 +57,26 @@ struct op_x86_model_spec {
57 57
58struct op_counter_config; 58struct op_counter_config;
59 59
60static inline void op_x86_warn_in_use(int counter)
61{
62 /*
63 * The warning indicates an already running counter. If
64 * oprofile doesn't collect data, then try using a different
65 * performance counter on your platform to monitor the desired
66 * event. Delete counter #%d from the desired event by editing
67 * the /usr/share/oprofile/%s/<cpu>/events file. If the event
68 * cannot be monitored by any other counter, contact your
69 * hardware or BIOS vendor.
70 */
71 pr_warning("oprofile: counter #%d on cpu #%d may already be used\n",
72 counter, smp_processor_id());
73}
74
75static inline void op_x86_warn_reserved(int counter)
76{
77 pr_warning("oprofile: counter #%d is already reserved\n", counter);
78}
79
60extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, 80extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
61 struct op_counter_config *counter_config); 81 struct op_counter_config *counter_config);
62extern int op_x86_phys_to_virt(int phys); 82extern int op_x86_phys_to_virt(int phys);
diff --git a/block/blk-core.c b/block/blk-core.c
index 718897e6d37f..d1a9a0a64f95 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1147 */ 1147 */
1148static inline bool queue_should_plug(struct request_queue *q) 1148static inline bool queue_should_plug(struct request_queue *q)
1149{ 1149{
1150 return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); 1150 return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
1151} 1151}
1152 1152
1153static int __make_request(struct request_queue *q, struct bio *bio) 1153static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1859,15 +1859,8 @@ void blk_dequeue_request(struct request *rq)
1859 * and to it is freed is accounted as io that is in progress at 1859 * and to it is freed is accounted as io that is in progress at
1860 * the driver side. 1860 * the driver side.
1861 */ 1861 */
1862 if (blk_account_rq(rq)) { 1862 if (blk_account_rq(rq))
1863 q->in_flight[rq_is_sync(rq)]++; 1863 q->in_flight[rq_is_sync(rq)]++;
1864 /*
1865 * Mark this device as supporting hardware queuing, if
1866 * we have more IOs in flight than 4.
1867 */
1868 if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
1869 set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
1870 }
1871} 1864}
1872 1865
1873/** 1866/**
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 17b768d0d42f..023f4e69a337 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4;
42 */ 42 */
43#define CFQ_MIN_TT (2) 43#define CFQ_MIN_TT (2)
44 44
45/*
46 * Allow merged cfqqs to perform this amount of seeky I/O before
47 * deciding to break the queues up again.
48 */
49#define CFQQ_COOP_TOUT (HZ)
50
51#define CFQ_SLICE_SCALE (5) 45#define CFQ_SLICE_SCALE (5)
52#define CFQ_HW_QUEUE_MIN (5) 46#define CFQ_HW_QUEUE_MIN (5)
53#define CFQ_SERVICE_SHIFT 12 47#define CFQ_SERVICE_SHIFT 12
54 48
49#define CFQQ_SEEK_THR 8 * 1024
50#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
51
55#define RQ_CIC(rq) \ 52#define RQ_CIC(rq) \
56 ((struct cfq_io_context *) (rq)->elevator_private) 53 ((struct cfq_io_context *) (rq)->elevator_private)
57#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) 54#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
@@ -137,7 +134,6 @@ struct cfq_queue {
137 u64 seek_total; 134 u64 seek_total;
138 sector_t seek_mean; 135 sector_t seek_mean;
139 sector_t last_request_pos; 136 sector_t last_request_pos;
140 unsigned long seeky_start;
141 137
142 pid_t pid; 138 pid_t pid;
143 139
@@ -314,6 +310,7 @@ enum cfqq_state_flags {
314 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 310 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
315 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 311 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
316 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 312 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
313 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
317 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 314 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
318 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ 315 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
319}; 316};
@@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed);
342CFQ_CFQQ_FNS(slice_new); 339CFQ_CFQQ_FNS(slice_new);
343CFQ_CFQQ_FNS(sync); 340CFQ_CFQQ_FNS(sync);
344CFQ_CFQQ_FNS(coop); 341CFQ_CFQQ_FNS(coop);
342CFQ_CFQQ_FNS(split_coop);
345CFQ_CFQQ_FNS(deep); 343CFQ_CFQQ_FNS(deep);
346CFQ_CFQQ_FNS(wait_busy); 344CFQ_CFQQ_FNS(wait_busy);
347#undef CFQ_CFQQ_FNS 345#undef CFQ_CFQQ_FNS
@@ -1566,6 +1564,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1566 cfq_clear_cfqq_wait_busy(cfqq); 1564 cfq_clear_cfqq_wait_busy(cfqq);
1567 1565
1568 /* 1566 /*
1567 * If this cfqq is shared between multiple processes, check to
1568 * make sure that those processes are still issuing I/Os within
1569 * the mean seek distance. If not, it may be time to break the
1570 * queues apart again.
1571 */
1572 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1573 cfq_mark_cfqq_split_coop(cfqq);
1574
1575 /*
1569 * store what was left of this slice, if the queue idled/timed out 1576 * store what was left of this slice, if the queue idled/timed out
1570 */ 1577 */
1571 if (timed_out && !cfq_cfqq_slice_new(cfqq)) { 1578 if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
@@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1663 return cfqd->last_position - blk_rq_pos(rq); 1670 return cfqd->last_position - blk_rq_pos(rq);
1664} 1671}
1665 1672
1666#define CFQQ_SEEK_THR 8 * 1024
1667#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1668
1669static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1673static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1670 struct request *rq, bool for_preempt) 1674 struct request *rq, bool for_preempt)
1671{ 1675{
@@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3000 total = cfqq->seek_total + (cfqq->seek_samples/2); 3004 total = cfqq->seek_total + (cfqq->seek_samples/2);
3001 do_div(total, cfqq->seek_samples); 3005 do_div(total, cfqq->seek_samples);
3002 cfqq->seek_mean = (sector_t)total; 3006 cfqq->seek_mean = (sector_t)total;
3003
3004 /*
3005 * If this cfqq is shared between multiple processes, check to
3006 * make sure that those processes are still issuing I/Os within
3007 * the mean seek distance. If not, it may be time to break the
3008 * queues apart again.
3009 */
3010 if (cfq_cfqq_coop(cfqq)) {
3011 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
3012 cfqq->seeky_start = jiffies;
3013 else if (!CFQQ_SEEKY(cfqq))
3014 cfqq->seeky_start = 0;
3015 }
3016} 3007}
3017 3008
3018/* 3009/*
@@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3453 return cic_to_cfqq(cic, 1); 3444 return cic_to_cfqq(cic, 1);
3454} 3445}
3455 3446
3456static int should_split_cfqq(struct cfq_queue *cfqq)
3457{
3458 if (cfqq->seeky_start &&
3459 time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
3460 return 1;
3461 return 0;
3462}
3463
3464/* 3447/*
3465 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this 3448 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3466 * was the last process referring to said cfqq. 3449 * was the last process referring to said cfqq.
@@ -3469,9 +3452,9 @@ static struct cfq_queue *
3469split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) 3452split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3470{ 3453{
3471 if (cfqq_process_refs(cfqq) == 1) { 3454 if (cfqq_process_refs(cfqq) == 1) {
3472 cfqq->seeky_start = 0;
3473 cfqq->pid = current->pid; 3455 cfqq->pid = current->pid;
3474 cfq_clear_cfqq_coop(cfqq); 3456 cfq_clear_cfqq_coop(cfqq);
3457 cfq_clear_cfqq_split_coop(cfqq);
3475 return cfqq; 3458 return cfqq;
3476 } 3459 }
3477 3460
@@ -3510,7 +3493,7 @@ new_queue:
3510 /* 3493 /*
3511 * If the queue was seeky for too long, break it apart. 3494 * If the queue was seeky for too long, break it apart.
3512 */ 3495 */
3513 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) { 3496 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3514 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); 3497 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3515 cfqq = split_cfqq(cic, cfqq); 3498 cfqq = split_cfqq(cic, cfqq);
3516 if (!cfqq) 3499 if (!cfqq)
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index bbc2c1315c47..b2586f57e1f5 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
935 struct platform_device *dd; 935 struct platform_device *dd;
936 936
937 id = dock_station_count; 937 id = dock_station_count;
938 memset(&ds, 0, sizeof(ds));
938 dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); 939 dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
939 if (IS_ERR(dd)) 940 if (IS_ERR(dd))
940 return PTR_ERR(dd); 941 return PTR_ERR(dd);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 7c0441f63b39..cc978a8c00b7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
110 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 110 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
111 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 111 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
112 (void *)2}, 112 (void *)2},
113 { set_max_cstate, "Pavilion zv5000", {
114 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
115 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
116 (void *)1},
117 { set_max_cstate, "Asus L8400B", {
118 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
119 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
120 (void *)1},
113 {}, 121 {},
114}; 122};
115 123
@@ -872,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
872 return(acpi_idle_enter_c1(dev, state)); 880 return(acpi_idle_enter_c1(dev, state));
873 881
874 local_irq_disable(); 882 local_irq_disable();
875 current_thread_info()->status &= ~TS_POLLING; 883 if (cx->entry_method != ACPI_CSTATE_FFH) {
876 /* 884 current_thread_info()->status &= ~TS_POLLING;
877 * TS_POLLING-cleared state must be visible before we test 885 /*
878 * NEED_RESCHED: 886 * TS_POLLING-cleared state must be visible before we test
879 */ 887 * NEED_RESCHED:
880 smp_mb(); 888 */
889 smp_mb();
890 }
881 891
882 if (unlikely(need_resched())) { 892 if (unlikely(need_resched())) {
883 current_thread_info()->status |= TS_POLLING; 893 current_thread_info()->status |= TS_POLLING;
@@ -957,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
957 } 967 }
958 968
959 local_irq_disable(); 969 local_irq_disable();
960 current_thread_info()->status &= ~TS_POLLING; 970 if (cx->entry_method != ACPI_CSTATE_FFH) {
961 /* 971 current_thread_info()->status &= ~TS_POLLING;
962 * TS_POLLING-cleared state must be visible before we test 972 /*
963 * NEED_RESCHED: 973 * TS_POLLING-cleared state must be visible before we test
964 */ 974 * NEED_RESCHED:
965 smp_mb(); 975 */
976 smp_mb();
977 }
966 978
967 if (unlikely(need_resched())) { 979 if (unlikely(need_resched())) {
968 current_thread_info()->status |= TS_POLLING; 980 current_thread_info()->status |= TS_POLLING;
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 7247819dbd80..e306ba9aa34e 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
125 return status; 125 return status;
126} 126}
127 127
128static int early_pdc_done;
129
128void acpi_processor_set_pdc(acpi_handle handle) 130void acpi_processor_set_pdc(acpi_handle handle)
129{ 131{
130 struct acpi_object_list *obj_list; 132 struct acpi_object_list *obj_list;
@@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
132 if (arch_has_acpi_pdc() == false) 134 if (arch_has_acpi_pdc() == false)
133 return; 135 return;
134 136
137 if (early_pdc_done)
138 return;
139
135 obj_list = acpi_processor_alloc_pdc(); 140 obj_list = acpi_processor_alloc_pdc();
136 if (!obj_list) 141 if (!obj_list)
137 return; 142 return;
@@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id)
151 return 0; 156 return 0;
152} 157}
153 158
159static int param_early_pdc_optin(char *s)
160{
161 early_pdc_optin = 1;
162 return 1;
163}
164__setup("acpi_early_pdc_eval", param_early_pdc_optin);
165
154static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { 166static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
155 { 167 {
156 set_early_pdc_optin, "HP Envy", { 168 set_early_pdc_optin, "HP Envy", {
@@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void)
192 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 204 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
193 ACPI_UINT32_MAX, 205 ACPI_UINT32_MAX,
194 early_init_pdc, NULL, NULL, NULL); 206 early_init_pdc, NULL, NULL, NULL);
207
208 early_pdc_done = 1;
195} 209}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 2cabadcc4d8c..a959f6a07508 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
413 if (result) 413 if (result)
414 goto update_bios; 414 goto update_bios;
415 415
416 return 0; 416 /* We need to call _PPC once when cpufreq starts */
417 if (ignore_ppc != 1)
418 result = acpi_processor_get_platform_limit(pr);
419
420 return result;
417 421
418 /* 422 /*
419 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 423 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ff9f6226085d..3e009674f333 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
1336 1336
1337 if (child) 1337 if (child)
1338 *child = device; 1338 *child = device;
1339 return 0; 1339
1340 if (device)
1341 return 0;
1342 else
1343 return -ENODEV;
1340} 1344}
1341 1345
1346/*
1347 * acpi_bus_add and acpi_bus_start
1348 *
1349 * scan a given ACPI tree and (probably recently hot-plugged)
1350 * create and add or starts found devices.
1351 *
1352 * If no devices were found -ENODEV is returned which does not
1353 * mean that this is a real error, there just have been no suitable
1354 * ACPI objects in the table trunk from which the kernel could create
1355 * a device and add/start an appropriate driver.
1356 */
1357
1342int 1358int
1343acpi_bus_add(struct acpi_device **child, 1359acpi_bus_add(struct acpi_device **child,
1344 struct acpi_device *parent, acpi_handle handle, int type) 1360 struct acpi_device *parent, acpi_handle handle, int type)
@@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
1348 memset(&ops, 0, sizeof(ops)); 1364 memset(&ops, 0, sizeof(ops));
1349 ops.acpi_op_add = 1; 1365 ops.acpi_op_add = 1;
1350 1366
1351 acpi_bus_scan(handle, &ops, child); 1367 return acpi_bus_scan(handle, &ops, child);
1352 return 0;
1353} 1368}
1354EXPORT_SYMBOL(acpi_bus_add); 1369EXPORT_SYMBOL(acpi_bus_add);
1355 1370
@@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
1357{ 1372{
1358 struct acpi_bus_ops ops; 1373 struct acpi_bus_ops ops;
1359 1374
1375 if (!device)
1376 return -EINVAL;
1377
1360 memset(&ops, 0, sizeof(ops)); 1378 memset(&ops, 0, sizeof(ops));
1361 ops.acpi_op_start = 1; 1379 ops.acpi_op_start = 1;
1362 1380
1363 acpi_bus_scan(device->handle, &ops, NULL); 1381 return acpi_bus_scan(device->handle, &ops, NULL);
1364 return 0;
1365} 1382}
1366EXPORT_SYMBOL(acpi_bus_start); 1383EXPORT_SYMBOL(acpi_bus_start);
1367 1384
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f336bca7c450..8a0ed2800e63 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
213 unsigned long table_end; 213 unsigned long table_end;
214 acpi_size tbl_size; 214 acpi_size tbl_size;
215 215
216 if (acpi_disabled) 216 if (acpi_disabled && !acpi_ht)
217 return -ENODEV; 217 return -ENODEV;
218 218
219 if (!handler) 219 if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
280 struct acpi_table_header *table = NULL; 280 struct acpi_table_header *table = NULL;
281 acpi_size tbl_size; 281 acpi_size tbl_size;
282 282
283 if (acpi_disabled) 283 if (acpi_disabled && !acpi_ht)
284 return -ENODEV; 284 return -ENODEV;
285 285
286 if (!handler) 286 if (!handler)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8bea100a160..b34390347c16 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
2868 }, 2868 },
2869 .driver_data = "F.23", /* cutoff BIOS version */ 2869 .driver_data = "F.23", /* cutoff BIOS version */
2870 }, 2870 },
2871 /*
2872 * Acer eMachines G725 has the same problem. BIOS
2873 * V1.03 is known to be broken. V3.04 is known to
2874 * work. Inbetween, there are V1.06, V2.06 and V3.03
2875 * that we don't have much idea about. For now,
2876 * blacklist anything older than V3.04.
2877 */
2878 {
2879 .ident = "G725",
2880 .matches = {
2881 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
2882 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
2883 },
2884 .driver_data = "V3.04", /* cutoff BIOS version */
2885 },
2871 { } /* terminate list */ 2886 { } /* terminate list */
2872 }; 2887 };
2873 const struct dmi_system_id *dmi = dmi_first_match(sysids); 2888 const struct dmi_system_id *dmi = dmi_first_match(sysids);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f4ea5a8c325b..d096fbcbc771 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2875 * write indication (used for PIO/DMA setup), result TF is 2875 * write indication (used for PIO/DMA setup), result TF is
2876 * copied back and we don't whine too much about its failure. 2876 * copied back and we don't whine too much about its failure.
2877 */ 2877 */
2878 tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2878 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2879 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2879 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2880 tf->flags |= ATA_TFLAG_WRITE; 2880 tf->flags |= ATA_TFLAG_WRITE;
2881 2881
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 741065c9da67..730ef3c384ca 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
893 do_write); 893 do_write);
894 } 894 }
895 895
896 if (!do_write)
897 flush_dcache_page(page);
898
896 qc->curbytes += qc->sect_size; 899 qc->curbytes += qc->sect_size;
897 qc->cursg_ofs += qc->sect_size; 900 qc->cursg_ofs += qc->sect_size;
898 901
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 161746deab4b..6e2c3b064f53 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
59 else 59 else
60 pr_debug("class '%s' does not have a release() function, " 60 pr_debug("class '%s' does not have a release() function, "
61 "be careful\n", class->name); 61 "be careful\n", class->name);
62
63 kfree(cp);
62} 64}
63 65
64static struct sysfs_ops class_sysfs_ops = { 66static struct sysfs_ops class_sysfs_ops = {
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 873e594860d3..9291614ac6b7 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
337 if (*pos > h->highest_lun) 337 if (*pos > h->highest_lun)
338 return 0; 338 return 0;
339 339
340 if (drv == NULL) /* it's possible for h->drv[] to have holes. */
341 return 0;
342
340 if (drv->heads == 0) 343 if (drv->heads == 0)
341 return 0; 344 return 0;
342 345
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index f36defa37764..57d965b7f521 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
808 808
809exit: 809exit:
810 sdio_release_host(card->func); 810 sdio_release_host(card->func);
811 kfree(tmpbuf);
811 812
812 return ret; 813 return ret;
813} 814}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 34cf04e21795..fd50ead59c79 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -767,16 +767,19 @@ int __init agp_amd64_init(void)
767 767
768static int __init agp_amd64_mod_init(void) 768static int __init agp_amd64_mod_init(void)
769{ 769{
770#ifndef MODULE
770 if (gart_iommu_aperture) 771 if (gart_iommu_aperture)
771 return agp_bridges_found ? 0 : -ENODEV; 772 return agp_bridges_found ? 0 : -ENODEV;
772 773#endif
773 return agp_amd64_init(); 774 return agp_amd64_init();
774} 775}
775 776
776static void __exit agp_amd64_cleanup(void) 777static void __exit agp_amd64_cleanup(void)
777{ 778{
779#ifndef MODULE
778 if (gart_iommu_aperture) 780 if (gart_iommu_aperture)
779 return; 781 return;
782#endif
780 if (aperture_resource) 783 if (aperture_resource)
781 release_resource(aperture_resource); 784 release_resource(aperture_resource);
782 pci_unregister_driver(&agp_amd64_pci_driver); 785 pci_unregister_driver(&agp_amd64_pci_driver);
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index ecba4942fc8e..f58440791e65 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -39,12 +39,12 @@
39struct tpm_inf_dev { 39struct tpm_inf_dev {
40 int iotype; 40 int iotype;
41 41
42 void __iomem *mem_base; /* MMIO ioremap'd addr */ 42 void __iomem *mem_base; /* MMIO ioremap'd addr */
43 unsigned long map_base; /* phys MMIO base */ 43 unsigned long map_base; /* phys MMIO base */
44 unsigned long map_size; /* MMIO region size */ 44 unsigned long map_size; /* MMIO region size */
45 unsigned int index_off; /* index register offset */ 45 unsigned int index_off; /* index register offset */
46 46
47 unsigned int data_regs; /* Data registers */ 47 unsigned int data_regs; /* Data registers */
48 unsigned int data_size; 48 unsigned int data_size;
49 49
50 unsigned int config_port; /* IO Port config index reg */ 50 unsigned int config_port; /* IO Port config index reg */
@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
406 .miscdev = {.fops = &inf_ops,}, 406 .miscdev = {.fops = &inf_ops,},
407}; 407};
408 408
409static const struct pnp_device_id tpm_pnp_tbl[] = { 409static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
410 /* Infineon TPMs */ 410 /* Infineon TPMs */
411 {"IFX0101", 0}, 411 {"IFX0101", 0},
412 {"IFX0102", 0}, 412 {"IFX0102", 0},
413 {"", 0} 413 {"", 0}
414}; 414};
415 415
416MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 416MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
417 417
418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, 418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
419 const struct pnp_device_id *dev_id) 419 const struct pnp_device_id *dev_id)
@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && 430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { 431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
432 432
433 tpm_dev.iotype = TPM_INF_IO_PORT; 433 tpm_dev.iotype = TPM_INF_IO_PORT;
434 434
435 tpm_dev.config_port = pnp_port_start(dev, 0); 435 tpm_dev.config_port = pnp_port_start(dev, 0);
436 tpm_dev.config_size = pnp_port_len(dev, 0); 436 tpm_dev.config_size = pnp_port_len(dev, 0);
@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
459 goto err_last; 459 goto err_last;
460 } 460 }
461 } else if (pnp_mem_valid(dev, 0) && 461 } else if (pnp_mem_valid(dev, 0) &&
462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { 462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
463 463
464 tpm_dev.iotype = TPM_INF_IO_MEM; 464 tpm_dev.iotype = TPM_INF_IO_MEM;
465 465
466 tpm_dev.map_base = pnp_mem_start(dev, 0); 466 tpm_dev.map_base = pnp_mem_start(dev, 0);
467 tpm_dev.map_size = pnp_mem_len(dev, 0); 467 tpm_dev.map_size = pnp_mem_len(dev, 0);
@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
563 "product id 0x%02x%02x" 563 "product id 0x%02x%02x"
564 "%s\n", 564 "%s\n",
565 tpm_dev.iotype == TPM_INF_IO_PORT ? 565 tpm_dev.iotype == TPM_INF_IO_PORT ?
566 tpm_dev.config_port : 566 tpm_dev.config_port :
567 tpm_dev.map_base + tpm_dev.index_off, 567 tpm_dev.map_base + tpm_dev.index_off,
568 tpm_dev.iotype == TPM_INF_IO_PORT ? 568 tpm_dev.iotype == TPM_INF_IO_PORT ?
569 tpm_dev.data_regs : 569 tpm_dev.data_regs :
570 tpm_dev.map_base + tpm_dev.data_regs, 570 tpm_dev.map_base + tpm_dev.data_regs,
571 version[0], version[1], 571 version[0], version[1],
572 vendorid[0], vendorid[1], 572 vendorid[0], vendorid[1],
573 productid[0], productid[1], chipname); 573 productid[0], productid[1], chipname);
@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
607 iounmap(tpm_dev.mem_base); 607 iounmap(tpm_dev.mem_base);
608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size); 608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
609 } 609 }
610 tpm_dev_vendor_release(chip);
610 tpm_remove_hardware(chip->dev); 611 tpm_remove_hardware(chip->dev);
611 } 612 }
612} 613}
613 614
615static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
616{
617 struct tpm_chip *chip = pnp_get_drvdata(dev);
618 int rc;
619 if (chip) {
620 u8 savestate[] = {
621 0, 193, /* TPM_TAG_RQU_COMMAND */
622 0, 0, 0, 10, /* blob length (in bytes) */
623 0, 0, 0, 152 /* TPM_ORD_SaveState */
624 };
625 dev_info(&dev->dev, "saving TPM state\n");
626 rc = tpm_inf_send(chip, savestate, sizeof(savestate));
627 if (rc < 0) {
628 dev_err(&dev->dev, "error while saving TPM state\n");
629 return rc;
630 }
631 }
632 return 0;
633}
634
635static int tpm_inf_pnp_resume(struct pnp_dev *dev)
636{
637 /* Re-configure TPM after suspending */
638 tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
639 tpm_config_out(IOLIMH, TPM_INF_ADDR);
640 tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
641 tpm_config_out(IOLIML, TPM_INF_ADDR);
642 tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
643 /* activate register */
644 tpm_config_out(TPM_DAR, TPM_INF_ADDR);
645 tpm_config_out(0x01, TPM_INF_DATA);
646 tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
647 /* disable RESET, LP and IRQC */
648 tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
649 return tpm_pm_resume(&dev->dev);
650}
651
614static struct pnp_driver tpm_inf_pnp_driver = { 652static struct pnp_driver tpm_inf_pnp_driver = {
615 .name = "tpm_inf_pnp", 653 .name = "tpm_inf_pnp",
616 .driver = { 654 .id_table = tpm_inf_pnp_tbl,
617 .owner = THIS_MODULE,
618 .suspend = tpm_pm_suspend,
619 .resume = tpm_pm_resume,
620 },
621 .id_table = tpm_pnp_tbl,
622 .probe = tpm_inf_pnp_probe, 655 .probe = tpm_inf_pnp_probe,
623 .remove = __devexit_p(tpm_inf_pnp_remove), 656 .suspend = tpm_inf_pnp_suspend,
657 .resume = tpm_inf_pnp_resume,
658 .remove = __devexit_p(tpm_inf_pnp_remove)
624}; 659};
625 660
626static int __init init_inf(void) 661static int __init init_inf(void)
@@ -638,5 +673,5 @@ module_exit(cleanup_inf);
638 673
639MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); 674MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
640MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 675MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
641MODULE_VERSION("1.9"); 676MODULE_VERSION("1.9.2");
642MODULE_LICENSE("GPL"); 677MODULE_LICENSE("GPL");
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index c6f3b48be9dd..dcb9083ecde0 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on)
1951 pid = task_pid(current); 1951 pid = task_pid(current);
1952 type = PIDTYPE_PID; 1952 type = PIDTYPE_PID;
1953 } 1953 }
1954 retval = __f_setown(filp, pid, type, 0); 1954 get_pid(pid);
1955 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 1955 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1956 retval = __f_setown(filp, pid, type, 0);
1957 put_pid(pid);
1956 if (retval) 1958 if (retval)
1957 goto out; 1959 goto out;
1958 } else { 1960 } else {
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 27d20fac19d1..b314a999aabe 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -21,7 +21,7 @@
21 21
22#define DRV_NAME "cs5535-clockevt" 22#define DRV_NAME "cs5535-clockevt"
23 23
24static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; 24static int timer_irq;
25module_param_named(irq, timer_irq, int, 0644); 25module_param_named(irq, timer_irq, int, 0644);
26MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); 26MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
27 27
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b34ade2332b..bd444dc93cf2 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
554 (dbs_tuners_ins.up_threshold - 554 (dbs_tuners_ins.up_threshold -
555 dbs_tuners_ins.down_differential); 555 dbs_tuners_ins.down_differential);
556 556
557 if (freq_next < policy->min)
558 freq_next = policy->min;
559
557 if (!dbs_tuners_ins.powersave_bias) { 560 if (!dbs_tuners_ins.powersave_bias) {
558 __cpufreq_driver_target(policy, freq_next, 561 __cpufreq_driver_target(policy, freq_next,
559 CPUFREQ_RELATION_L); 562 CPUFREQ_RELATION_L);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index b5f2ee0f8e2c..64a937262a40 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data)
613 cohd_fin->pending_irqs--; 613 cohd_fin->pending_irqs--;
614 cohc->completed = cohd_fin->desc.cookie; 614 cohc->completed = cohd_fin->desc.cookie;
615 615
616 BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
617
618 if (cohc->nbr_active_done == 0) 616 if (cohc->nbr_active_done == 0)
619 return; 617 return;
620 618
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6f51a0a7a8bb..e7a3230fb7d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
826 chan->dev->chan = NULL; 826 chan->dev->chan = NULL;
827 mutex_unlock(&dma_list_mutex); 827 mutex_unlock(&dma_list_mutex);
828 device_unregister(&chan->dev->device); 828 device_unregister(&chan->dev->device);
829 free_percpu(chan->local);
829 } 830 }
830} 831}
831EXPORT_SYMBOL(dma_async_device_unregister); 832EXPORT_SYMBOL(dma_async_device_unregister);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 8b905161fbf4..948d563941c9 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -467,7 +467,7 @@ err_srcs:
467 467
468 if (iterations > 0) 468 if (iterations > 0)
469 while (!kthread_should_stop()) { 469 while (!kthread_should_stop()) {
470 DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); 470 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
471 interruptible_sleep_on(&wait_dmatest_exit); 471 interruptible_sleep_on(&wait_dmatest_exit);
472 } 472 }
473 473
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5f7a500e18d0..5cc37afe2bc1 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
249 if (is_ioat_active(status) || is_ioat_idle(status)) 249 if (is_ioat_active(status) || is_ioat_idle(status))
250 ioat_suspend(chan); 250 ioat_suspend(chan);
251 while (is_ioat_active(status) || is_ioat_idle(status)) { 251 while (is_ioat_active(status) || is_ioat_idle(status)) {
252 if (end && time_after(jiffies, end)) { 252 if (tmo && time_after(jiffies, end)) {
253 err = -ETIMEDOUT; 253 err = -ETIMEDOUT;
254 break; 254 break;
255 } 255 }
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 9a5bc1a7389e..e80bae1673fa 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
761 * @buffer_n: buffer number to update. 761 * @buffer_n: buffer number to update.
762 * 0 or 1 are the only valid values. 762 * 0 or 1 are the only valid values.
763 * @phyaddr: buffer physical address. 763 * @phyaddr: buffer physical address.
764 * @return: Returns 0 on success or negative error code on failure. This
765 * function will fail if the buffer is set to ready.
766 */ 764 */
767/* Called under spin_lock(_irqsave)(&ichan->lock) */ 765/* Called under spin_lock(_irqsave)(&ichan->lock) */
768static int ipu_update_channel_buffer(struct idmac_channel *ichan, 766static void ipu_update_channel_buffer(struct idmac_channel *ichan,
769 int buffer_n, dma_addr_t phyaddr) 767 int buffer_n, dma_addr_t phyaddr)
770{ 768{
771 enum ipu_channel channel = ichan->dma_chan.chan_id; 769 enum ipu_channel channel = ichan->dma_chan.chan_id;
772 uint32_t reg; 770 uint32_t reg;
@@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
806 } 804 }
807 805
808 spin_unlock_irqrestore(&ipu_data.lock, flags); 806 spin_unlock_irqrestore(&ipu_data.lock, flags);
809
810 return 0;
811} 807}
812 808
813/* Called under spin_lock_irqsave(&ichan->lock) */ 809/* Called under spin_lock_irqsave(&ichan->lock) */
@@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
816{ 812{
817 unsigned int chan_id = ichan->dma_chan.chan_id; 813 unsigned int chan_id = ichan->dma_chan.chan_id;
818 struct device *dev = &ichan->dma_chan.dev->device; 814 struct device *dev = &ichan->dma_chan.dev->device;
819 int ret;
820 815
821 if (async_tx_test_ack(&desc->txd)) 816 if (async_tx_test_ack(&desc->txd))
822 return -EINTR; 817 return -EINTR;
@@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but 822 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
828 * doing it again shouldn't hurt either. 823 * doing it again shouldn't hurt either.
829 */ 824 */
830 ret = ipu_update_channel_buffer(ichan, buf_idx, 825 ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
831 sg_dma_address(sg));
832
833 if (ret < 0) {
834 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
835 sg, chan_id, buf_idx);
836 return ret;
837 }
838 826
839 ipu_select_buffer(chan_id, buf_idx); 827 ipu_select_buffer(chan_id, buf_idx);
840 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", 828 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1379 1367
1380 if (likely(sgnew) && 1368 if (likely(sgnew) &&
1381 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1369 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1382 callback = desc->txd.callback; 1370 callback = descnew->txd.callback;
1383 callback_param = desc->txd.callback_param; 1371 callback_param = descnew->txd.callback_param;
1384 spin_unlock(&ichan->lock); 1372 spin_unlock(&ichan->lock);
1385 callback(callback_param); 1373 if (callback)
1374 callback(callback_param);
1386 spin_lock(&ichan->lock); 1375 spin_lock(&ichan->lock);
1387 } 1376 }
1388 1377
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 000dc67b85b7..3391e6739d06 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2658 * the memory system completely. A command line option allows to force-enable 2658 * the memory system completely. A command line option allows to force-enable
2659 * hardware ECC later in amd64_enable_ecc_error_reporting(). 2659 * hardware ECC later in amd64_enable_ecc_error_reporting().
2660 */ 2660 */
2661static const char *ecc_warning = 2661static const char *ecc_msg =
2662 "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" 2662 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2663 " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" 2663 " Either enable ECC checking or force module loading by setting "
2664 " Also, use of the override can cause unknown side effects.\n"; 2664 "'ecc_enable_override'.\n"
2665 " (Note that use of the override may cause unknown side effects.)\n";
2665 2666
2666static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) 2667static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2667{ 2668{
@@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2673 2674
2674 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); 2675 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2675 if (!ecc_enabled) 2676 if (!ecc_enabled)
2676 amd64_printk(KERN_WARNING, "This node reports that Memory ECC " 2677 amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
2677 "is currently disabled, set F3x%x[22] (%s).\n", 2678 "is currently disabled, set F3x%x[22] (%s).\n",
2678 K8_NBCFG, pci_name(pvt->misc_f3_ctl)); 2679 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2679 else 2680 else
@@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2681 2682
2682 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); 2683 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2683 if (!nb_mce_en) 2684 if (!nb_mce_en)
2684 amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " 2685 amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
2685 "0x%08x[4] on node %d to enable.\n", 2686 "0x%08x[4] on node %d to enable.\n",
2686 MSR_IA32_MCG_CTL, pvt->mc_node_id); 2687 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2687 2688
2688 if (!ecc_enabled || !nb_mce_en) { 2689 if (!ecc_enabled || !nb_mce_en) {
2689 if (!ecc_enable_override) { 2690 if (!ecc_enable_override) {
2690 amd64_printk(KERN_WARNING, "%s", ecc_warning); 2691 amd64_printk(KERN_NOTICE, "%s", ecc_msg);
2691 return -ENODEV; 2692 return -ENODEV;
2692 } 2693 }
2693 ecc_enable_override = 0; 2694 ecc_enable_override = 0;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index cf27402af97b..ecd5928d7110 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
804 end <<= (24 - PAGE_SHIFT); 804 end <<= (24 - PAGE_SHIFT);
805 end |= (1 << (24 - PAGE_SHIFT)) - 1; 805 end |= (1 << (24 - PAGE_SHIFT)) - 1;
806 806
807 csrow->first_page = start >> PAGE_SHIFT; 807 csrow->first_page = start;
808 csrow->last_page = end >> PAGE_SHIFT; 808 csrow->last_page = end;
809 csrow->nr_pages = end + 1 - start; 809 csrow->nr_pages = end + 1 - start;
810 csrow->grain = 8; 810 csrow->grain = 8;
811 csrow->mtype = mtype; 811 csrow->mtype = mtype;
@@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
892 892
893 mpc85xx_init_csrows(mci); 893 mpc85xx_init_csrows(mci);
894 894
895#ifdef CONFIG_EDAC_DEBUG
896 edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
897#endif
898
899 /* store the original error disable bits */ 895 /* store the original error disable bits */
900 orig_ddr_err_disable = 896 orig_ddr_err_disable =
901 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); 897 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index cbaf420c36c5..2d3dc7ded0a9 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
893 893
894static struct kmem_cache *fwnet_packet_task_cache; 894static struct kmem_cache *fwnet_packet_task_cache;
895 895
896static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
897{
898 dev_kfree_skb_any(ptask->skb);
899 kmem_cache_free(fwnet_packet_task_cache, ptask);
900}
901
896static int fwnet_send_packet(struct fwnet_packet_task *ptask); 902static int fwnet_send_packet(struct fwnet_packet_task *ptask);
897 903
898static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) 904static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
899{ 905{
900 struct fwnet_device *dev; 906 struct fwnet_device *dev = ptask->dev;
901 unsigned long flags; 907 unsigned long flags;
902 908 bool free;
903 dev = ptask->dev;
904 909
905 spin_lock_irqsave(&dev->lock, flags); 910 spin_lock_irqsave(&dev->lock, flags);
906 list_del(&ptask->pt_link);
907 spin_unlock_irqrestore(&dev->lock, flags);
908 911
909 ptask->outstanding_pkts--; /* FIXME access inside lock */ 912 ptask->outstanding_pkts--;
913
914 /* Check whether we or the networking TX soft-IRQ is last user. */
915 free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link));
916
917 if (ptask->outstanding_pkts == 0)
918 list_del(&ptask->pt_link);
919
920 spin_unlock_irqrestore(&dev->lock, flags);
910 921
911 if (ptask->outstanding_pkts > 0) { 922 if (ptask->outstanding_pkts > 0) {
912 u16 dg_size; 923 u16 dg_size;
@@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
951 ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; 962 ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
952 } 963 }
953 fwnet_send_packet(ptask); 964 fwnet_send_packet(ptask);
954 } else {
955 dev_kfree_skb_any(ptask->skb);
956 kmem_cache_free(fwnet_packet_task_cache, ptask);
957 } 965 }
966
967 if (free)
968 fwnet_free_ptask(ptask);
958} 969}
959 970
960static void fwnet_write_complete(struct fw_card *card, int rcode, 971static void fwnet_write_complete(struct fw_card *card, int rcode,
@@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
977 unsigned tx_len; 988 unsigned tx_len;
978 struct rfc2734_header *bufhdr; 989 struct rfc2734_header *bufhdr;
979 unsigned long flags; 990 unsigned long flags;
991 bool free;
980 992
981 dev = ptask->dev; 993 dev = ptask->dev;
982 tx_len = ptask->max_payload; 994 tx_len = ptask->max_payload;
@@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1022 generation, SCODE_100, 0ULL, ptask->skb->data, 1034 generation, SCODE_100, 0ULL, ptask->skb->data,
1023 tx_len + 8, fwnet_write_complete, ptask); 1035 tx_len + 8, fwnet_write_complete, ptask);
1024 1036
1025 /* FIXME race? */
1026 spin_lock_irqsave(&dev->lock, flags); 1037 spin_lock_irqsave(&dev->lock, flags);
1027 list_add_tail(&ptask->pt_link, &dev->broadcasted_list); 1038
1039 /* If the AT tasklet already ran, we may be last user. */
1040 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
1041 if (!free)
1042 list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
1043
1028 spin_unlock_irqrestore(&dev->lock, flags); 1044 spin_unlock_irqrestore(&dev->lock, flags);
1029 1045
1030 return 0; 1046 goto out;
1031 } 1047 }
1032 1048
1033 fw_send_request(dev->card, &ptask->transaction, 1049 fw_send_request(dev->card, &ptask->transaction,
@@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1035 ptask->generation, ptask->speed, ptask->fifo_addr, 1051 ptask->generation, ptask->speed, ptask->fifo_addr,
1036 ptask->skb->data, tx_len, fwnet_write_complete, ptask); 1052 ptask->skb->data, tx_len, fwnet_write_complete, ptask);
1037 1053
1038 /* FIXME race? */
1039 spin_lock_irqsave(&dev->lock, flags); 1054 spin_lock_irqsave(&dev->lock, flags);
1040 list_add_tail(&ptask->pt_link, &dev->sent_list); 1055
1056 /* If the AT tasklet already ran, we may be last user. */
1057 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
1058 if (!free)
1059 list_add_tail(&ptask->pt_link, &dev->sent_list);
1060
1041 spin_unlock_irqrestore(&dev->lock, flags); 1061 spin_unlock_irqrestore(&dev->lock, flags);
1042 1062
1043 dev->netdev->trans_start = jiffies; 1063 dev->netdev->trans_start = jiffies;
1064 out:
1065 if (free)
1066 fwnet_free_ptask(ptask);
1044 1067
1045 return 0; 1068 return 0;
1046} 1069}
@@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1298 spin_unlock_irqrestore(&dev->lock, flags); 1321 spin_unlock_irqrestore(&dev->lock, flags);
1299 1322
1300 ptask->max_payload = max_payload; 1323 ptask->max_payload = max_payload;
1324 INIT_LIST_HEAD(&ptask->pt_link);
1325
1301 fwnet_send_packet(ptask); 1326 fwnet_send_packet(ptask);
1302 1327
1303 return NETDEV_TX_OK; 1328 return NETDEV_TX_OK;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 2345d4103fe6..43ebf337b131 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2101 u32 payload_index, payload_end_index, next_page_index; 2101 u32 payload_index, payload_end_index, next_page_index;
2102 int page, end_page, i, length, offset; 2102 int page, end_page, i, length, offset;
2103 2103
2104 /*
2105 * FIXME: Cycle lost behavior should be configurable: lose
2106 * packet, retransmit or terminate..
2107 */
2108
2109 p = packet; 2104 p = packet;
2110 payload_index = payload; 2105 payload_index = payload;
2111 2106
@@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2135 if (!p->skip) { 2130 if (!p->skip) {
2136 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 2131 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2137 d[0].req_count = cpu_to_le16(8); 2132 d[0].req_count = cpu_to_le16(8);
2133 /*
2134 * Link the skip address to this descriptor itself. This causes
2135 * a context to skip a cycle whenever lost cycles or FIFO
2136 * overruns occur, without dropping the data. The application
2137 * should then decide whether this is an error condition or not.
2138 * FIXME: Make the context's cycle-lost behaviour configurable?
2139 */
2140 d[0].branch_address = cpu_to_le32(d_bus | z);
2138 2141
2139 header = (__le32 *) &d[1]; 2142 header = (__le32 *) &d[1];
2140 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 2143 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index a1fce68e3bbe..17be051b7aa3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
113 113
114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { 114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
115 DRM_ERROR("fail to set dma mask to 0x%Lx\n", 115 DRM_ERROR("fail to set dma mask to 0x%Lx\n",
116 gart_info->table_mask); 116 (unsigned long long)gart_info->table_mask);
117 ret = 1; 117 ret = 1;
118 goto done; 118 goto done;
119 } 119 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f665b05592f3..ab6c97330412 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
598 return mode; 598 return mode;
599} 599}
600 600
601/*
602 * EDID is delightfully ambiguous about how interlaced modes are to be
603 * encoded. Our internal representation is of frame height, but some
604 * HDTV detailed timings are encoded as field height.
605 *
606 * The format list here is from CEA, in frame size. Technically we
607 * should be checking refresh rate too. Whatever.
608 */
609static void
610drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
611 struct detailed_pixel_timing *pt)
612{
613 int i;
614 static const struct {
615 int w, h;
616 } cea_interlaced[] = {
617 { 1920, 1080 },
618 { 720, 480 },
619 { 1440, 480 },
620 { 2880, 480 },
621 { 720, 576 },
622 { 1440, 576 },
623 { 2880, 576 },
624 };
625 static const int n_sizes =
626 sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
627
628 if (!(pt->misc & DRM_EDID_PT_INTERLACED))
629 return;
630
631 for (i = 0; i < n_sizes; i++) {
632 if ((mode->hdisplay == cea_interlaced[i].w) &&
633 (mode->vdisplay == cea_interlaced[i].h / 2)) {
634 mode->vdisplay *= 2;
635 mode->vsync_start *= 2;
636 mode->vsync_end *= 2;
637 mode->vtotal *= 2;
638 mode->vtotal |= 1;
639 }
640 }
641
642 mode->flags |= DRM_MODE_FLAG_INTERLACE;
643}
644
601/** 645/**
602 * drm_mode_detailed - create a new mode from an EDID detailed timing section 646 * drm_mode_detailed - create a new mode from an EDID detailed timing section
603 * @dev: DRM device (needed to create new mode) 647 * @dev: DRM device (needed to create new mode)
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
680 724
681 drm_mode_set_name(mode); 725 drm_mode_set_name(mode);
682 726
683 if (pt->misc & DRM_EDID_PT_INTERLACED) 727 drm_mode_do_interlace_quirk(mode, pt);
684 mode->flags |= DRM_MODE_FLAG_INTERLACE;
685 728
686 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 729 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
687 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; 730 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
405 wasted += alignment - tmp; 405 wasted += alignment - tmp;
406 } 406 }
407 407
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted &&
409 (entry->start + wasted + size) <= end) {
409 if (!best_match) 410 if (!best_match)
410 return entry; 411 return entry;
411 if (entry->size < best_size) { 412 if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e660ac07f3b2..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
735 if (cmdbuf->num_cliprects) { 735 if (cmdbuf->num_cliprects) {
736 cliprects = kcalloc(cmdbuf->num_cliprects, 736 cliprects = kcalloc(cmdbuf->num_cliprects,
737 sizeof(struct drm_clip_rect), GFP_KERNEL); 737 sizeof(struct drm_clip_rect), GFP_KERNEL);
738 if (cliprects == NULL) 738 if (cliprects == NULL) {
739 ret = -ENOMEM;
739 goto fail_batch_free; 740 goto fail_batch_free;
741 }
740 742
741 ret = copy_from_user(cliprects, cmdbuf->cliprects, 743 ret = copy_from_user(cliprects, cmdbuf->cliprects,
742 cmdbuf->num_cliprects * 744 cmdbuf->num_cliprects *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 46d88965852a..cf4cb3e9a0c2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
120 120
121const static struct intel_device_info intel_pineview_info = { 121const static struct intel_device_info intel_pineview_info = {
122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
123 .has_pipe_cxsr = 1, 123 .need_gfx_hws = 1,
124 .has_hotplug = 1, 124 .has_hotplug = 1,
125}; 125};
126 126
@@ -174,26 +174,20 @@ const static struct pci_device_id pciidlist[] = {
174MODULE_DEVICE_TABLE(pci, pciidlist); 174MODULE_DEVICE_TABLE(pci, pciidlist);
175#endif 175#endif
176 176
177static int i915_suspend(struct drm_device *dev, pm_message_t state) 177static int i915_drm_freeze(struct drm_device *dev)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private; 179 struct drm_i915_private *dev_priv = dev->dev_private;
180 180
181 if (!dev || !dev_priv) {
182 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
183 DRM_ERROR("DRM not initialized, aborting suspend.\n");
184 return -ENODEV;
185 }
186
187 if (state.event == PM_EVENT_PRETHAW)
188 return 0;
189
190 pci_save_state(dev->pdev); 181 pci_save_state(dev->pdev);
191 182
192 /* If KMS is active, we do the leavevt stuff here */ 183 /* If KMS is active, we do the leavevt stuff here */
193 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 184 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
194 if (i915_gem_idle(dev)) 185 int error = i915_gem_idle(dev);
186 if (error) {
195 dev_err(&dev->pdev->dev, 187 dev_err(&dev->pdev->dev,
196 "GEM idle failed, resume may fail\n"); 188 "GEM idle failed, resume might fail\n");
189 return error;
190 }
197 drm_irq_uninstall(dev); 191 drm_irq_uninstall(dev);
198 } 192 }
199 193
@@ -201,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
201 195
202 intel_opregion_free(dev, 1); 196 intel_opregion_free(dev, 1);
203 197
198 /* Modeset on resume, not lid events */
199 dev_priv->modeset_on_lid = 0;
200
201 return 0;
202}
203
204static int i915_suspend(struct drm_device *dev, pm_message_t state)
205{
206 int error;
207
208 if (!dev || !dev->dev_private) {
209 DRM_ERROR("dev: %p\n", dev);
210 DRM_ERROR("DRM not initialized, aborting suspend.\n");
211 return -ENODEV;
212 }
213
214 if (state.event == PM_EVENT_PRETHAW)
215 return 0;
216
217 error = i915_drm_freeze(dev);
218 if (error)
219 return error;
220
204 if (state.event == PM_EVENT_SUSPEND) { 221 if (state.event == PM_EVENT_SUSPEND) {
205 /* Shut down the device */ 222 /* Shut down the device */
206 pci_disable_device(dev->pdev); 223 pci_disable_device(dev->pdev);
207 pci_set_power_state(dev->pdev, PCI_D3hot); 224 pci_set_power_state(dev->pdev, PCI_D3hot);
208 } 225 }
209 226
210 /* Modeset on resume, not lid events */
211 dev_priv->modeset_on_lid = 0;
212
213 return 0; 227 return 0;
214} 228}
215 229
216static int i915_resume(struct drm_device *dev) 230static int i915_drm_thaw(struct drm_device *dev)
217{ 231{
218 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
219 int ret = 0; 233 int error = 0;
220
221 if (pci_enable_device(dev->pdev))
222 return -1;
223 pci_set_master(dev->pdev);
224 234
225 i915_restore_state(dev); 235 i915_restore_state(dev);
226 236
@@ -231,21 +241,28 @@ static int i915_resume(struct drm_device *dev)
231 mutex_lock(&dev->struct_mutex); 241 mutex_lock(&dev->struct_mutex);
232 dev_priv->mm.suspended = 0; 242 dev_priv->mm.suspended = 0;
233 243
234 ret = i915_gem_init_ringbuffer(dev); 244 error = i915_gem_init_ringbuffer(dev);
235 if (ret != 0)
236 ret = -1;
237 mutex_unlock(&dev->struct_mutex); 245 mutex_unlock(&dev->struct_mutex);
238 246
239 drm_irq_install(dev); 247 drm_irq_install(dev);
240 } 248
241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
242 /* Resume the modeset for every activated CRTC */ 249 /* Resume the modeset for every activated CRTC */
243 drm_helper_resume_force_mode(dev); 250 drm_helper_resume_force_mode(dev);
244 } 251 }
245 252
246 dev_priv->modeset_on_lid = 0; 253 dev_priv->modeset_on_lid = 0;
247 254
248 return ret; 255 return error;
256}
257
258static int i915_resume(struct drm_device *dev)
259{
260 if (pci_enable_device(dev->pdev))
261 return -EIO;
262
263 pci_set_master(dev->pdev);
264
265 return i915_drm_thaw(dev);
249} 266}
250 267
251/** 268/**
@@ -386,57 +403,62 @@ i915_pci_remove(struct pci_dev *pdev)
386 drm_put_dev(dev); 403 drm_put_dev(dev);
387} 404}
388 405
389static int 406static int i915_pm_suspend(struct device *dev)
390i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
391{ 407{
392 struct drm_device *dev = pci_get_drvdata(pdev); 408 struct pci_dev *pdev = to_pci_dev(dev);
409 struct drm_device *drm_dev = pci_get_drvdata(pdev);
410 int error;
393 411
394 return i915_suspend(dev, state); 412 if (!drm_dev || !drm_dev->dev_private) {
395} 413 dev_err(dev, "DRM not initialized, aborting suspend.\n");
414 return -ENODEV;
415 }
396 416
397static int 417 error = i915_drm_freeze(drm_dev);
398i915_pci_resume(struct pci_dev *pdev) 418 if (error)
399{ 419 return error;
400 struct drm_device *dev = pci_get_drvdata(pdev);
401 420
402 return i915_resume(dev); 421 pci_disable_device(pdev);
403} 422 pci_set_power_state(pdev, PCI_D3hot);
404 423
405static int 424 return 0;
406i915_pm_suspend(struct device *dev)
407{
408 return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
409} 425}
410 426
411static int 427static int i915_pm_resume(struct device *dev)
412i915_pm_resume(struct device *dev)
413{ 428{
414 return i915_pci_resume(to_pci_dev(dev)); 429 struct pci_dev *pdev = to_pci_dev(dev);
415} 430 struct drm_device *drm_dev = pci_get_drvdata(pdev);
416 431
417static int 432 return i915_resume(drm_dev);
418i915_pm_freeze(struct device *dev)
419{
420 return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
421} 433}
422 434
423static int 435static int i915_pm_freeze(struct device *dev)
424i915_pm_thaw(struct device *dev)
425{ 436{
426 /* thaw during hibernate, do nothing! */ 437 struct pci_dev *pdev = to_pci_dev(dev);
427 return 0; 438 struct drm_device *drm_dev = pci_get_drvdata(pdev);
439
440 if (!drm_dev || !drm_dev->dev_private) {
441 dev_err(dev, "DRM not initialized, aborting suspend.\n");
442 return -ENODEV;
443 }
444
445 return i915_drm_freeze(drm_dev);
428} 446}
429 447
430static int 448static int i915_pm_thaw(struct device *dev)
431i915_pm_poweroff(struct device *dev)
432{ 449{
433 return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); 450 struct pci_dev *pdev = to_pci_dev(dev);
451 struct drm_device *drm_dev = pci_get_drvdata(pdev);
452
453 return i915_drm_thaw(drm_dev);
434} 454}
435 455
436static int 456static int i915_pm_poweroff(struct device *dev)
437i915_pm_restore(struct device *dev)
438{ 457{
439 return i915_pci_resume(to_pci_dev(dev)); 458 struct pci_dev *pdev = to_pci_dev(dev);
459 struct drm_device *drm_dev = pci_get_drvdata(pdev);
460
461 return i915_drm_freeze(drm_dev);
440} 462}
441 463
442const struct dev_pm_ops i915_pm_ops = { 464const struct dev_pm_ops i915_pm_ops = {
@@ -445,7 +467,7 @@ const struct dev_pm_ops i915_pm_ops = {
445 .freeze = i915_pm_freeze, 467 .freeze = i915_pm_freeze,
446 .thaw = i915_pm_thaw, 468 .thaw = i915_pm_thaw,
447 .poweroff = i915_pm_poweroff, 469 .poweroff = i915_pm_poweroff,
448 .restore = i915_pm_restore, 470 .restore = i915_pm_resume,
449}; 471};
450 472
451static struct vm_operations_struct i915_gem_vm_ops = { 473static struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aaf934d96f21..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -493,6 +493,15 @@ typedef struct drm_i915_private {
493 struct list_head flushing_list; 493 struct list_head flushing_list;
494 494
495 /** 495 /**
496 * List of objects currently pending a GPU write flush.
497 *
498 * All elements on this list will belong to either the
499 * active_list or flushing_list, last_rendering_seqno can
500 * be used to differentiate between the two elements.
501 */
502 struct list_head gpu_write_list;
503
504 /**
496 * LRU list of objects which are not in the ringbuffer and 505 * LRU list of objects which are not in the ringbuffer and
497 * are ready to unbind, but are still in the GTT. 506 * are ready to unbind, but are still in the GTT.
498 * 507 *
@@ -592,6 +601,8 @@ struct drm_i915_gem_object {
592 601
593 /** This object's place on the active/flushing/inactive lists */ 602 /** This object's place on the active/flushing/inactive lists */
594 struct list_head list; 603 struct list_head list;
604 /** This object's place on GPU write list */
605 struct list_head gpu_write_list;
595 606
596 /** This object's place on the fenced object LRU */ 607 /** This object's place on the fenced object LRU */
597 struct list_head fence_list; 608 struct list_head fence_list;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dda787aafcc6..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1552 else 1552 else
1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1554 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1555 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1556 if (obj_priv->active) { 1558 if (obj_priv->active) {
1557 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1622 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1623 1625
1624 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1625 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1626 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1627 1630
1628 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1630 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1631 1634
1632 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1633 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1634 1638
1635 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2084,8 +2088,8 @@ static int
2084i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2085{ 2089{
2086 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2087 uint32_t seqno;
2088 int ret; 2091 int ret;
2092 uint32_t seqno;
2089 bool lists_empty; 2093 bool lists_empty;
2090 2094
2091 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2107 if (ret) 2111 if (ret)
2108 return ret; 2112 return ret;
2109 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2110 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2111 if (ret) 2117 if (ret)
2112 return ret; 2118 return ret;
@@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2701 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2702 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2703 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2704 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2705 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2706 2712
2707 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -3564,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3564 uint32_t reloc_count = 0, i; 3570 uint32_t reloc_count = 0, i;
3565 int ret = 0; 3571 int ret = 0;
3566 3572
3573 if (relocs == NULL)
3574 return 0;
3575
3567 for (i = 0; i < buffer_count; i++) { 3576 for (i = 0; i < buffer_count; i++) {
3568 struct drm_i915_gem_relocation_entry __user *user_relocs; 3577 struct drm_i915_gem_relocation_entry __user *user_relocs;
3569 int unwritten; 3578 int unwritten;
@@ -3653,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3653 struct drm_gem_object *batch_obj; 3662 struct drm_gem_object *batch_obj;
3654 struct drm_i915_gem_object *obj_priv; 3663 struct drm_i915_gem_object *obj_priv;
3655 struct drm_clip_rect *cliprects = NULL; 3664 struct drm_clip_rect *cliprects = NULL;
3656 struct drm_i915_gem_relocation_entry *relocs; 3665 struct drm_i915_gem_relocation_entry *relocs = NULL;
3657 int ret = 0, ret2, i, pinned = 0; 3666 int ret = 0, ret2, i, pinned = 0;
3658 uint64_t exec_offset; 3667 uint64_t exec_offset;
3659 uint32_t seqno, flush_domains, reloc_index; 3668 uint32_t seqno, flush_domains, reloc_index;
@@ -3679,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3679 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3680 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3681 GFP_KERNEL); 3690 GFP_KERNEL);
3682 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3683 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3684 3695
3685 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3686 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3722,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3722 if (object_list[i] == NULL) { 3733 if (object_list[i] == NULL) {
3723 DRM_ERROR("Invalid object handle %d at index %d\n", 3734 DRM_ERROR("Invalid object handle %d at index %d\n",
3724 exec_list[i].handle, i); 3735 exec_list[i].handle, i);
3736 /* prevent error path from reading uninitialized data */
3737 args->buffer_count = i + 1;
3725 ret = -EBADF; 3738 ret = -EBADF;
3726 goto err; 3739 goto err;
3727 } 3740 }
@@ -3730,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3730 if (obj_priv->in_execbuffer) { 3743 if (obj_priv->in_execbuffer) {
3731 DRM_ERROR("Object %p appears more than once in object list\n", 3744 DRM_ERROR("Object %p appears more than once in object list\n",
3732 object_list[i]); 3745 object_list[i]);
3746 /* prevent error path from reading uninitialized data */
3747 args->buffer_count = i + 1;
3733 ret = -EBADF; 3748 ret = -EBADF;
3734 goto err; 3749 goto err;
3735 } 3750 }
@@ -3843,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3843 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3844 dev->invalidate_domains, 3859 dev->invalidate_domains,
3845 dev->flush_domains); 3860 dev->flush_domains);
3846 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3847 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3848 dev->flush_domains); 3863 dev->flush_domains);
3849 } 3864 }
3850 3865
3851 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3852 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3853 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3854 3870
3855 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3856 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3857 obj->read_domains, 3879 obj->read_domains,
3858 old_write_domain); 3880 old_write_domain);
@@ -3926,6 +3948,7 @@ err:
3926 3948
3927 mutex_unlock(&dev->struct_mutex); 3949 mutex_unlock(&dev->struct_mutex);
3928 3950
3951pre_mutex_err:
3929 /* Copy the updated relocations out regardless of current error 3952 /* Copy the updated relocations out regardless of current error
3930 * state. Failure to update the relocs would mean that the next 3953 * state. Failure to update the relocs would mean that the next
3931 * time userland calls execbuf, it would do so with presumed offset 3954 * time userland calls execbuf, it would do so with presumed offset
@@ -3940,7 +3963,6 @@ err:
3940 ret = ret2; 3963 ret = ret2;
3941 } 3964 }
3942 3965
3943pre_mutex_err:
3944 drm_free_large(object_list); 3966 drm_free_large(object_list);
3945 kfree(cliprects); 3967 kfree(cliprects);
3946 3968
@@ -4363,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4363 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4364 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4365 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4366 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4367 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4368 4391
@@ -4814,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4814 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4815 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4816 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4817 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4818 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4819 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 89a071a3e6fb..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309 if (de_iir & DE_GSE) 309 if (de_iir & DE_GSE)
310 ironlake_opregion_gse_intr(dev); 310 ironlake_opregion_gse_intr(dev);
311 311
312 if (de_iir & DE_PLANEA_FLIP_DONE) {
313 intel_prepare_page_flip(dev, 0);
314 intel_finish_page_flip(dev, 0);
315 }
316
317 if (de_iir & DE_PLANEB_FLIP_DONE) {
318 intel_prepare_page_flip(dev, 1);
319 intel_finish_page_flip(dev, 1);
320 }
321
322 if (de_iir & DE_PIPEA_VBLANK)
323 drm_handle_vblank(dev, 0);
324
325 if (de_iir & DE_PIPEB_VBLANK)
326 drm_handle_vblank(dev, 1);
327
312 /* check event from PCH */ 328 /* check event from PCH */
313 if ((de_iir & DE_PCH_EVENT) && 329 if ((de_iir & DE_PCH_EVENT) &&
314 (pch_iir & SDE_HOTPLUG_MASK)) { 330 (pch_iir & SDE_HOTPLUG_MASK)) {
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
844 if (!(pipeconf & PIPEACONF_ENABLE)) 860 if (!(pipeconf & PIPEACONF_ENABLE))
845 return -EINVAL; 861 return -EINVAL;
846 862
847 if (IS_IRONLAKE(dev))
848 return 0;
849
850 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
851 if (IS_I965G(dev)) 864 if (IS_IRONLAKE(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev))
852 i915_enable_pipestat(dev_priv, pipe, 868 i915_enable_pipestat(dev_priv, pipe,
853 PIPE_START_VBLANK_INTERRUPT_ENABLE); 869 PIPE_START_VBLANK_INTERRUPT_ENABLE);
854 else 870 else
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
866 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
867 unsigned long irqflags; 883 unsigned long irqflags;
868 884
869 if (IS_IRONLAKE(dev))
870 return;
871
872 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
873 i915_disable_pipestat(dev_priv, pipe, 886 if (IS_IRONLAKE(dev))
874 PIPE_VBLANK_INTERRUPT_ENABLE | 887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
875 PIPE_START_VBLANK_INTERRUPT_ENABLE); 888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else
890 i915_disable_pipestat(dev_priv, pipe,
891 PIPE_VBLANK_INTERRUPT_ENABLE |
892 PIPE_START_VBLANK_INTERRUPT_ENABLE);
876 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
877} 894}
878 895
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1015{ 1032{
1016 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1033 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1017 /* enable kind of interrupts always enabled */ 1034 /* enable kind of interrupts always enabled */
1018 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1035 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1036 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1019 u32 render_mask = GT_USER_INTERRUPT; 1037 u32 render_mask = GT_USER_INTERRUPT;
1020 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1021 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1022 1040
1023 dev_priv->irq_mask_reg = ~display_mask; 1041 dev_priv->irq_mask_reg = ~display_mask;
1024 dev_priv->de_irq_enable_reg = display_mask; 1042 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1025 1043
1026 /* should always can generate irq */ 1044 /* should always can generate irq */
1027 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1045 I915_WRITE(DEIIR, I915_READ(DEIIR));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 847006c5218e..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
338#define FBC_CTL_PERIODIC (1<<30) 338#define FBC_CTL_PERIODIC (1<<30)
339#define FBC_CTL_INTERVAL_SHIFT (16) 339#define FBC_CTL_INTERVAL_SHIFT (16)
340#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 340#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
341#define FBC_C3_IDLE (1<<13)
341#define FBC_CTL_STRIDE_SHIFT (5) 342#define FBC_CTL_STRIDE_SHIFT (5)
342#define FBC_CTL_FENCENO (1<<0) 343#define FBC_CTL_FENCENO (1<<0)
343#define FBC_COMMAND 0x0320c 344#define FBC_COMMAND 0x0320c
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ddefc871edfe..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
157 adpa = I915_READ(PCH_ADPA); 157 adpa = I915_READ(PCH_ADPA);
158 158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160 /* disable HPD first */
161 I915_WRITE(PCH_ADPA, adpa);
162 (void)I915_READ(PCH_ADPA);
160 163
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS | 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 45da78ef4a92..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -240,33 +240,86 @@ struct intel_limit {
240#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
241#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
242#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
243#define IRONLAKE_N_MIN 1
244#define IRONLAKE_N_MAX 6
245#define IRONLAKE_M_MIN 79
246#define IRONLAKE_M_MAX 127
247#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
248#define IRONLAKE_M1_MAX 22 244#define IRONLAKE_M1_MAX 22
249#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
250#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
251#define IRONLAKE_P_SDVO_DAC_MIN 5
252#define IRONLAKE_P_SDVO_DAC_MAX 80
253#define IRONLAKE_P_LVDS_MIN 28
254#define IRONLAKE_P_LVDS_MAX 112
255#define IRONLAKE_P1_MIN 1
256#define IRONLAKE_P1_MAX 8
257#define IRONLAKE_P2_SDVO_DAC_SLOW 10
258#define IRONLAKE_P2_SDVO_DAC_FAST 5
259#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
260#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
261#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
262 248
263#define IRONLAKE_P_DISPLAY_PORT_MIN 10 249/* We have parameter ranges for different type of outputs. */
264#define IRONLAKE_P_DISPLAY_PORT_MAX 20 250
265#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 251/* DAC & HDMI Refclk 120Mhz */
266#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 252#define IRONLAKE_DAC_N_MIN 1
267#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 253#define IRONLAKE_DAC_N_MAX 5
268#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 254#define IRONLAKE_DAC_M_MIN 79
269#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
270 323
271static bool 324static bool
272intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
474 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
475}; 528};
476 529
477static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
478 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
479 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
480 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
481 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
482 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
483 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
484 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
485 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
486 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
487 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
488 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 541 .p2_fast = IRONLAKE_DAC_P2_FAST },
489 .find_pll = intel_g4x_find_best_PLL, 542 .find_pll = intel_g4x_find_best_PLL,
490}; 543};
491 544
492static const intel_limit_t intel_limits_ironlake_lvds = { 545static const intel_limit_t intel_limits_ironlake_single_lvds = {
493 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
494 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
495 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
496 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
497 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
498 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
499 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
500 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
502 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
503 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
572 .find_pll = intel_g4x_find_best_PLL,
573};
574
575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
504 .find_pll = intel_g4x_find_best_PLL, 602 .find_pll = intel_g4x_find_best_PLL,
505}; 603};
506 604
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
509 .max = IRONLAKE_DOT_MAX }, 607 .max = IRONLAKE_DOT_MAX },
510 .vco = { .min = IRONLAKE_VCO_MIN, 608 .vco = { .min = IRONLAKE_VCO_MIN,
511 .max = IRONLAKE_VCO_MAX}, 609 .max = IRONLAKE_VCO_MAX},
512 .n = { .min = IRONLAKE_N_MIN, 610 .n = { .min = IRONLAKE_DP_N_MIN,
513 .max = IRONLAKE_N_MAX }, 611 .max = IRONLAKE_DP_N_MAX },
514 .m = { .min = IRONLAKE_M_MIN, 612 .m = { .min = IRONLAKE_DP_M_MIN,
515 .max = IRONLAKE_M_MAX }, 613 .max = IRONLAKE_DP_M_MAX },
516 .m1 = { .min = IRONLAKE_M1_MIN, 614 .m1 = { .min = IRONLAKE_M1_MIN,
517 .max = IRONLAKE_M1_MAX }, 615 .max = IRONLAKE_M1_MAX },
518 .m2 = { .min = IRONLAKE_M2_MIN, 616 .m2 = { .min = IRONLAKE_M2_MIN,
519 .max = IRONLAKE_M2_MAX }, 617 .max = IRONLAKE_M2_MAX },
520 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, 618 .p = { .min = IRONLAKE_DP_P_MIN,
521 .max = IRONLAKE_P_DISPLAY_PORT_MAX }, 619 .max = IRONLAKE_DP_P_MAX },
522 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, 620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
523 .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, 621 .max = IRONLAKE_DP_P1_MAX},
524 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, 622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
525 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, 623 .p2_slow = IRONLAKE_DP_P2_SLOW,
526 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, 624 .p2_fast = IRONLAKE_DP_P2_FAST },
527 .find_pll = intel_find_pll_ironlake_dp, 625 .find_pll = intel_find_pll_ironlake_dp,
528}; 626};
529 627
530static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
531{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
532 const intel_limit_t *limit; 632 const intel_limit_t *limit;
533 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
534 limit = &intel_limits_ironlake_lvds; 634
535 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
536 HAS_eDP) 653 HAS_eDP)
537 limit = &intel_limits_ironlake_display_port; 654 limit = &intel_limits_ironlake_display_port;
538 else 655 else
539 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
540 657
541 return limit; 658 return limit;
542} 659}
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
914 1031
915 /* enable it... */ 1032 /* enable it... */
916 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
917 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
918 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
919 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1638 case DRM_MODE_DPMS_OFF: 1757 case DRM_MODE_DPMS_OFF:
1639 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1758 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1640 1759
1760 drm_vblank_off(dev, pipe);
1641 /* Disable display plane */ 1761 /* Disable display plane */
1642 temp = I915_READ(dspcntr_reg); 1762 temp = I915_READ(dspcntr_reg);
1643 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1763 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2519 sr_entries = roundup(sr_entries / cacheline_size, 1); 2639 sr_entries = roundup(sr_entries / cacheline_size, 1);
2520 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2640 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2521 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2641 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2642 } else {
2643 /* Turn off self refresh if both pipes are enabled */
2644 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2645 & ~FW_BLC_SELF_EN);
2522 } 2646 }
2523 2647
2524 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2648 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2562 srwm = 1; 2686 srwm = 1;
2563 srwm &= 0x3f; 2687 srwm &= 0x3f;
2564 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2688 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2689 } else {
2690 /* Turn off self refresh if both pipes are enabled */
2691 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2692 & ~FW_BLC_SELF_EN);
2565 } 2693 }
2566 2694
2567 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2695 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2630 if (srwm < 0) 2758 if (srwm < 0)
2631 srwm = 1; 2759 srwm = 1;
2632 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2761 } else {
2762 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2764 & ~FW_BLC_SELF_EN);
2633 } 2765 }
2634 2766
2635 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3949struct intel_unpin_work { 4081struct intel_unpin_work {
3950 struct work_struct work; 4082 struct work_struct work;
3951 struct drm_device *dev; 4083 struct drm_device *dev;
3952 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
3953 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
3954 int pending; 4087 int pending;
3955}; 4088};
@@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
3960 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
3961 4094
3962 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
3963 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
3964 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
3965 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
3966 kfree(work); 4100 kfree(work);
3967} 4101}
@@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
3984 spin_lock_irqsave(&dev->event_lock, flags); 4118 spin_lock_irqsave(&dev->event_lock, flags);
3985 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
3986 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4121 if (work && !work->pending) {
4122 obj_priv = work->pending_flip_obj->driver_private;
4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4124 obj_priv,
4125 atomic_read(&obj_priv->pending_flip));
4126 }
3987 spin_unlock_irqrestore(&dev->event_lock, flags); 4127 spin_unlock_irqrestore(&dev->event_lock, flags);
3988 return; 4128 return;
3989 } 4129 }
@@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4004 4144
4005 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4006 4146
4007 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4008 if (atomic_dec_and_test(&obj_priv->pending_flip)) 4148
4149 /* Initial scanout buffer will have a 0 pending flip count */
4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4151 atomic_dec_and_test(&obj_priv->pending_flip))
4009 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4152 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4010 schedule_work(&work->work); 4153 schedule_work(&work->work);
4011} 4154}
@@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
4018 unsigned long flags; 4161 unsigned long flags;
4019 4162
4020 spin_lock_irqsave(&dev->event_lock, flags); 4163 spin_lock_irqsave(&dev->event_lock, flags);
4021 if (intel_crtc->unpin_work) 4164 if (intel_crtc->unpin_work) {
4022 intel_crtc->unpin_work->pending = 1; 4165 intel_crtc->unpin_work->pending = 1;
4166 } else {
4167 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4168 }
4023 spin_unlock_irqrestore(&dev->event_lock, flags); 4169 spin_unlock_irqrestore(&dev->event_lock, flags);
4024} 4170}
4025 4171
@@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4035 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4036 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4037 unsigned long flags; 4183 unsigned long flags;
4038 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4039 RING_LOCALS; 4186 RING_LOCALS;
4040 4187
4041 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4047 work->event = event; 4194 work->event = event;
4048 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4049 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4050 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4051 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4052 4199
4053 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
4054 spin_lock_irqsave(&dev->event_lock, flags); 4201 spin_lock_irqsave(&dev->event_lock, flags);
4055 if (intel_crtc->unpin_work) { 4202 if (intel_crtc->unpin_work) {
4203 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4056 spin_unlock_irqrestore(&dev->event_lock, flags); 4204 spin_unlock_irqrestore(&dev->event_lock, flags);
4057 kfree(work); 4205 kfree(work);
4058 mutex_unlock(&dev->struct_mutex); 4206 mutex_unlock(&dev->struct_mutex);
@@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4066 4214
4067 ret = intel_pin_and_fence_fb_obj(dev, obj); 4215 ret = intel_pin_and_fence_fb_obj(dev, obj);
4068 if (ret != 0) { 4216 if (ret != 0) {
4217 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4218 obj->driver_private);
4069 kfree(work); 4219 kfree(work);
4220 intel_crtc->unpin_work = NULL;
4070 mutex_unlock(&dev->struct_mutex); 4221 mutex_unlock(&dev->struct_mutex);
4071 return ret; 4222 return ret;
4072 } 4223 }
4073 4224
4074 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4075 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4076 4228
4077 crtc->fb = fb; 4229 crtc->fb = fb;
4078 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4079 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4080 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4081 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4082 4235
4083 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4084 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4086 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4087 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4088 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4089 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4090 } else { 4244 } else {
4091 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4092 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa74e59bec61..c2e8a45780d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
611 { 611 {
612 .ident = "Samsung SX20S", 612 .ident = "Samsung SX20S",
613 .matches = { 613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), 614 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"), 615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
616 }, 616 },
617 }, 617 },
@@ -623,12 +623,26 @@ static const struct dmi_system_id bad_lid_status[] = {
623 }, 623 },
624 }, 624 },
625 { 625 {
626 .ident = "Aspire 1810T",
627 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
630 },
631 },
632 {
626 .ident = "PC-81005", 633 .ident = "PC-81005",
627 .matches = { 634 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), 635 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), 636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
630 }, 637 },
631 }, 638 },
639 {
640 .ident = "Clevo M5x0N",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
643 DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
644 },
645 },
632 { } 646 { }
633}; 647};
634 648
@@ -643,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
643{ 657{
644 enum drm_connector_status status = connector_status_connected; 658 enum drm_connector_status status = connector_status_connected;
645 659
646 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
647 status = connector_status_disconnected; 661 status = connector_status_disconnected;
648 662
649 return status; 663 return status;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index eaacfd0920df..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2347 (1 << INTEL_ANALOG_CLONE_BIT); 2347 (1 << INTEL_ANALOG_CLONE_BIT);
2348 } else if (flags & SDVO_OUTPUT_CVBS0) {
2349
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2348 } else if (flags & SDVO_OUTPUT_LVDS0) { 2356 } else if (flags & SDVO_OUTPUT_LVDS0) {
2349 2357
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a16..48227e744753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
90{ 90{
91 int result; 91 int result;
92 92
93 if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, 93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
94 &result)) 94 &result))
95 return -ENODEV; 95 return -ENODEV;
96 96
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
98 98
99 if (result & 0x1) { /* Stamina mode - disable the external GPU */ 99 if (result) { /* Ensure that the external GPU is enabled */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
102 NULL);
103 } else { /* Stamina mode - disable the external GPU */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
101 NULL); 105 NULL);
102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
103 NULL); 107 NULL);
104 } else { /* Ensure that the external GPU is enabled */
105 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
107 NULL);
108 } 108 }
109 109
110 return 0; 110 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d7f8d8b4a4b8..0e9cd1d49130 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1865 1865
1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1867 1867
1868 if (dev_priv->card_type >= NV_50) 1868 if (dev_priv->card_type >= NV_40)
1869 return 1; 1869 return 1;
1870 1870
1871 /* 1871 /*
@@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3765 */ 3765 */
3766 3766
3767 struct drm_nouveau_private *dev_priv = dev->dev_private; 3767 struct drm_nouveau_private *dev_priv = dev->dev_private;
3768 struct init_exec iexec = {true, false};
3769 struct nvbios *bios = &dev_priv->VBIOS; 3768 struct nvbios *bios = &dev_priv->VBIOS;
3770 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3769 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3771 uint8_t *otable = NULL; 3770 uint8_t *otable = NULL;
@@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3845 } 3844 }
3846 } 3845 }
3847 3846
3848 bios->display.output = dcbent;
3849
3850 if (pxclk == 0) { 3847 if (pxclk == 0) {
3851 script = ROM16(otable[6]); 3848 script = ROM16(otable[6]);
3852 if (!script) { 3849 if (!script) {
@@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3855 } 3852 }
3856 3853
3857 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 3854 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3858 parse_init_table(bios, script, &iexec); 3855 nouveau_bios_run_init_table(dev, script, dcbent);
3859 } else 3856 } else
3860 if (pxclk == -1) { 3857 if (pxclk == -1) {
3861 script = ROM16(otable[8]); 3858 script = ROM16(otable[8]);
@@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3865 } 3862 }
3866 3863
3867 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 3864 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3868 parse_init_table(bios, script, &iexec); 3865 nouveau_bios_run_init_table(dev, script, dcbent);
3869 } else 3866 } else
3870 if (pxclk == -2) { 3867 if (pxclk == -2) {
3871 if (table[4] >= 12) 3868 if (table[4] >= 12)
@@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3878 } 3875 }
3879 3876
3880 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 3877 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3881 parse_init_table(bios, script, &iexec); 3878 nouveau_bios_run_init_table(dev, script, dcbent);
3882 } else 3879 } else
3883 if (pxclk > 0) { 3880 if (pxclk > 0) {
3884 script = ROM16(otable[table[4] + i*6 + 2]); 3881 script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3890 } 3887 }
3891 3888
3892 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 3889 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3893 parse_init_table(bios, script, &iexec); 3890 nouveau_bios_run_init_table(dev, script, dcbent);
3894 } else 3891 } else
3895 if (pxclk < 0) { 3892 if (pxclk < 0) {
3896 script = ROM16(otable[table[4] + i*6 + 4]); 3893 script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3902 } 3899 }
3903 3900
3904 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 3901 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3905 parse_init_table(bios, script, &iexec); 3902 nouveau_bios_run_init_table(dev, script, dcbent);
3906 } 3903 }
3907 3904
3908 return 0; 3905 return 0;
@@ -5865,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5865 struct nvbios *bios = &dev_priv->VBIOS; 5862 struct nvbios *bios = &dev_priv->VBIOS;
5866 struct init_exec iexec = { true, false }; 5863 struct init_exec iexec = { true, false };
5867 5864
5865 mutex_lock(&bios->lock);
5868 bios->display.output = dcbent; 5866 bios->display.output = dcbent;
5869 parse_init_table(bios, table, &iexec); 5867 parse_init_table(bios, table, &iexec);
5870 bios->display.output = NULL; 5868 bios->display.output = NULL;
5869 mutex_unlock(&bios->lock);
5871} 5870}
5872 5871
5873static bool NVInitVBIOS(struct drm_device *dev) 5872static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5876 struct nvbios *bios = &dev_priv->VBIOS; 5875 struct nvbios *bios = &dev_priv->VBIOS;
5877 5876
5878 memset(bios, 0, sizeof(struct nvbios)); 5877 memset(bios, 0, sizeof(struct nvbios));
5878 mutex_init(&bios->lock);
5879 bios->dev = dev; 5879 bios->dev = dev;
5880 5880
5881 if (!NVShadowVBIOS(dev, bios->data)) 5881 if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 058e98c76d89..fd94bd6dc264 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
205 struct drm_device *dev; 205 struct drm_device *dev;
206 struct nouveau_bios_info pub; 206 struct nouveau_bios_info pub;
207 207
208 struct mutex lock;
209
208 uint8_t data[NV_PROM_SIZE]; 210 uint8_t data[NV_PROM_SIZE];
209 unsigned int length; 211 unsigned int length;
210 bool execute; 212 bool execute;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db0ed4c13f98..028719fddf76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
65 65
66 /* 66 /*
67 * Some of the tile_flags have a periodic structure of N*4096 bytes, 67 * Some of the tile_flags have a periodic structure of N*4096 bytes,
68 * align to to that as well as the page size. Overallocate memory to 68 * align to to that as well as the page size. Align the size to the
69 * avoid corruption of other buffer objects. 69 * appropriate boundaries. This does imply that sizes are rounded up
70 * 3-7 pages, so be aware of this and do not waste memory by allocating
71 * many small buffers.
70 */ 72 */
71 if (dev_priv->card_type == NV_50) { 73 if (dev_priv->card_type == NV_50) {
72 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; 74 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
77 case 0x2800: 79 case 0x2800:
78 case 0x4800: 80 case 0x4800:
79 case 0x7a00: 81 case 0x7a00:
80 *size = roundup(*size, block_size);
81 if (is_power_of_2(block_size)) { 82 if (is_power_of_2(block_size)) {
82 *size += 3 * block_size;
83 for (i = 1; i < 10; i++) { 83 for (i = 1; i < 10; i++) {
84 *align = 12 * i * block_size; 84 *align = 12 * i * block_size;
85 if (!(*align % 65536)) 85 if (!(*align % 65536))
86 break; 86 break;
87 } 87 }
88 } else { 88 } else {
89 *size += 6 * block_size;
90 for (i = 1; i < 10; i++) { 89 for (i = 1; i < 10; i++) {
91 *align = 8 * i * block_size; 90 *align = 8 * i * block_size;
92 if (!(*align % 65536)) 91 if (!(*align % 65536))
93 break; 92 break;
94 } 93 }
95 } 94 }
95 *size = roundup(*size, *align);
96 break; 96 break;
97 default: 97 default:
98 break; 98 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 343d718a9667..2281f99da7fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
278 /* Ensure the channel is no longer active on the GPU */ 278 /* Ensure the channel is no longer active on the GPU */
279 pfifo->reassign(dev, false); 279 pfifo->reassign(dev, false);
280 280
281 if (pgraph->channel(dev) == chan) { 281 pgraph->fifo_access(dev, false);
282 pgraph->fifo_access(dev, false); 282 if (pgraph->channel(dev) == chan)
283 pgraph->unload_context(dev); 283 pgraph->unload_context(dev);
284 pgraph->fifo_access(dev, true);
285 }
286 pgraph->destroy_context(chan); 284 pgraph->destroy_context(chan);
285 pgraph->fifo_access(dev, true);
287 286
288 if (pfifo->channel_id(dev) == chan->id) { 287 if (pfifo->channel_id(dev) == chan->id) {
289 pfifo->disable(dev); 288 pfifo->disable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e6d673f3a23..d2f63353ea97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
88{ 88{
89 struct nouveau_connector *nv_connector = 89 struct nouveau_connector *nv_connector =
90 nouveau_connector(drm_connector); 90 nouveau_connector(drm_connector);
91 struct drm_device *dev = nv_connector->base.dev; 91 struct drm_device *dev;
92
93 NV_DEBUG_KMS(dev, "\n");
94 92
95 if (!nv_connector) 93 if (!nv_connector)
96 return; 94 return;
97 95
96 dev = nv_connector->base.dev;
97 NV_DEBUG_KMS(dev, "\n");
98
98 kfree(nv_connector->edid); 99 kfree(nv_connector->edid);
99 drm_sysfs_connector_remove(drm_connector); 100 drm_sysfs_connector_remove(drm_connector);
100 drm_connector_cleanup(drm_connector); 101 drm_connector_cleanup(drm_connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index dd4937224220..f954ad93e81f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
502 break; 502 break;
503 } 503 }
504 504
505 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
506 ret = -EREMOTEIO;
507 goto out;
508 }
509
510 if (cmd & 1) { 505 if (cmd & 1) {
506 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
511 for (i = 0; i < 4; i++) { 511 for (i = 0; i < 4; i++) {
512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); 513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 343ab7f17ccc..da3b93b84502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
57 57
58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); 58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
59int nouveau_vram_notify; 59int nouveau_vram_notify = 1;
60module_param_named(vram_notify, nouveau_vram_notify, int, 0400); 60module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
61 61
62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); 62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0; 75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77 77
78MODULE_PARM_DESC(noagp, "Disable all acceleration");
79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85
78MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 86MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
79 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
80 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 88 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 6b9690418bc7..1c15ef37b71c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -583,6 +583,7 @@ struct drm_nouveau_private {
583 uint64_t vm_end; 583 uint64_t vm_end;
584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
585 int vm_vram_pt_nr; 585 int vm_vram_pt_nr;
586 uint64_t vram_sys_base;
586 587
587 /* the mtrr covering the FB */ 588 /* the mtrr covering the FB */
588 int fb_mtrr; 589 int fb_mtrr;
@@ -678,6 +679,8 @@ extern int nouveau_reg_debug;
678extern char *nouveau_vbios; 679extern char *nouveau_vbios;
679extern int nouveau_ctxfw; 680extern int nouveau_ctxfw;
680extern int nouveau_ignorelid; 681extern int nouveau_ignorelid;
682extern int nouveau_nofbaccel;
683extern int nouveau_noaccel;
681 684
682/* nouveau_state.c */ 685/* nouveau_state.c */
683extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 686extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0b05c869e0e7..ea879a2efef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
107 .fb_setcmap = drm_fb_helper_setcmap, 107 .fb_setcmap = drm_fb_helper_setcmap,
108}; 108};
109 109
110static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122};
123
124static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit,
132 .fb_sync = nouveau_fbcon_sync,
133 .fb_pan_display = drm_fb_helper_pan_display,
134 .fb_blank = drm_fb_helper_blank,
135 .fb_setcmap = drm_fb_helper_setcmap,
136};
137
110static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 138static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
111 u16 blue, int regno) 139 u16 blue, int regno)
112{ 140{
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
267 dev_priv->fbdev_info = info; 295 dev_priv->fbdev_info = info;
268 296
269 strcpy(info->fix.id, "nouveaufb"); 297 strcpy(info->fix.id, "nouveaufb");
270 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 298 if (nouveau_nofbaccel)
271 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; 299 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
300 else
301 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
302 FBINFO_HWACCEL_FILLRECT |
303 FBINFO_HWACCEL_IMAGEBLIT;
272 info->fbops = &nouveau_fbcon_ops; 304 info->fbops = &nouveau_fbcon_ops;
273 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 305 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
274 dev_priv->vm_vram_base; 306 dev_priv->vm_vram_base;
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
316 par->nouveau_fb = nouveau_fb; 348 par->nouveau_fb = nouveau_fb;
317 par->dev = dev; 349 par->dev = dev;
318 350
319 if (dev_priv->channel) { 351 if (dev_priv->channel && !nouveau_nofbaccel) {
320 switch (dev_priv->card_type) { 352 switch (dev_priv->card_type) {
321 case NV_50: 353 case NV_50:
322 nv50_fbcon_accel_init(info); 354 nv50_fbcon_accel_init(info);
355 info->fbops = &nv50_fbcon_ops;
323 break; 356 break;
324 default: 357 default:
325 nv04_fbcon_accel_init(info); 358 nv04_fbcon_accel_init(info);
359 info->fbops = &nv04_fbcon_ops;
326 break; 360 break;
327 }; 361 };
328 } 362 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 462e0b87b4bd..f9c34e1a8c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void); 40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev); 41void nouveau_fbcon_zfill(struct drm_device *dev);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
43int nv04_fbcon_accel_init(struct fb_info *info); 46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
44int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
45 51
46void nouveau_fbcon_gpu_lockup(struct fb_info *info); 52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 6ac804b0c9f9..70cc30803e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
925 } 925 }
926 926
927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
928 spin_lock(&nvbo->bo.lock);
928 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 929 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
930 spin_unlock(&nvbo->bo.lock);
929 } else { 931 } else {
930 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 932 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
931 if (ret == 0) 933 if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 419f4c2b3b89..c7ebec696747 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
97 } 97 }
98 98
99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
100 if (!pgraph->ctxprog) { 100 if (!pgraph->ctxvals) {
101 NV_ERROR(dev, "OOM copying ctxprog\n"); 101 NV_ERROR(dev, "OOM copying ctxvals\n");
102 release_firmware(fw); 102 release_firmware(fw);
103 nouveau_grctx_fini(dev); 103 nouveau_grctx_fini(dev);
104 return -ENOMEM; 104 return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 3b9bad66162a..447f9f69d6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
211 get + 4); 211 get + 4);
212 } 212 }
213 213
214 if (status & NV_PFIFO_INTR_SEMAPHORE) {
215 uint32_t sem;
216
217 status &= ~NV_PFIFO_INTR_SEMAPHORE;
218 nv_wr32(dev, NV03_PFIFO_INTR_0,
219 NV_PFIFO_INTR_SEMAPHORE);
220
221 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
222 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
223
224 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
225 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
226 }
227
214 if (status) { 228 if (status) {
215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 229 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
216 status, chid); 230 status, chid);
@@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
566static void 580static void
567nv50_pgraph_irq_handler(struct drm_device *dev) 581nv50_pgraph_irq_handler(struct drm_device *dev)
568{ 582{
569 uint32_t status, nsource; 583 uint32_t status;
570 584
571 status = nv_rd32(dev, NV03_PGRAPH_INTR); 585 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
572 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 586 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
573 587
574 if (status & 0x00000001) { 588 if (status & 0x00000001) {
575 nouveau_pgraph_intr_notify(dev, nsource); 589 nouveau_pgraph_intr_notify(dev, nsource);
576 status &= ~0x00000001; 590 status &= ~0x00000001;
577 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 591 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
578 } 592 }
579 593
580 if (status & 0x00000010) { 594 if (status & 0x00000010) {
581 nouveau_pgraph_intr_error(dev, nsource | 595 nouveau_pgraph_intr_error(dev, nsource |
582 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 596 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
583 597
584 status &= ~0x00000010; 598 status &= ~0x00000010;
585 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 599 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
586 } 600 }
587 601
588 if (status & 0x00001000) { 602 if (status & 0x00001000) {
589 nv_wr32(dev, 0x400500, 0x00000000); 603 nv_wr32(dev, 0x400500, 0x00000000);
590 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 604 nv_wr32(dev, NV03_PGRAPH_INTR,
591 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, 605 NV_PGRAPH_INTR_CONTEXT_SWITCH);
592 NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); 606 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
593 nv_wr32(dev, 0x400500, 0x00010001); 607 NV40_PGRAPH_INTR_EN) &
608 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
609 nv_wr32(dev, 0x400500, 0x00010001);
594 610
595 nv50_graph_context_switch(dev); 611 nv50_graph_context_switch(dev);
596 612
597 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 613 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
598 } 614 }
599 615
600 if (status & 0x00100000) { 616 if (status & 0x00100000) {
601 nouveau_pgraph_intr_error(dev, nsource | 617 nouveau_pgraph_intr_error(dev, nsource |
602 NV03_PGRAPH_NSOURCE_DATA_ERROR); 618 NV03_PGRAPH_NSOURCE_DATA_ERROR);
603 619
604 status &= ~0x00100000; 620 status &= ~0x00100000;
605 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 621 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
606 } 622 }
607 623
608 if (status & 0x00200000) { 624 if (status & 0x00200000) {
609 int r; 625 int r;
610 626
611 nouveau_pgraph_intr_error(dev, nsource | 627 nouveau_pgraph_intr_error(dev, nsource |
612 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); 628 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
613 629
614 NV_ERROR(dev, "magic set 1:\n"); 630 NV_ERROR(dev, "magic set 1:\n");
615 for (r = 0x408900; r <= 0x408910; r += 4) 631 for (r = 0x408900; r <= 0x408910; r += 4)
616 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 632 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
617 nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); 633 nv_rd32(dev, r));
618 for (r = 0x408e08; r <= 0x408e24; r += 4) 634 nv_wr32(dev, 0x408900,
619 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 635 nv_rd32(dev, 0x408904) | 0xc0000000);
620 nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); 636 for (r = 0x408e08; r <= 0x408e24; r += 4)
621 637 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
622 NV_ERROR(dev, "magic set 2:\n"); 638 nv_rd32(dev, r));
623 for (r = 0x409900; r <= 0x409910; r += 4) 639 nv_wr32(dev, 0x408e08,
624 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 640 nv_rd32(dev, 0x408e08) | 0xc0000000);
625 nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); 641
626 for (r = 0x409e08; r <= 0x409e24; r += 4) 642 NV_ERROR(dev, "magic set 2:\n");
627 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 643 for (r = 0x409900; r <= 0x409910; r += 4)
628 nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); 644 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
629 645 nv_rd32(dev, r));
630 status &= ~0x00200000; 646 nv_wr32(dev, 0x409900,
631 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); 647 nv_rd32(dev, 0x409904) | 0xc0000000);
632 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 648 for (r = 0x409e08; r <= 0x409e24; r += 4)
633 } 649 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
650 nv_rd32(dev, r));
651 nv_wr32(dev, 0x409e08,
652 nv_rd32(dev, 0x409e08) | 0xc0000000);
653
654 status &= ~0x00200000;
655 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
656 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
657 }
634 658
635 if (status) { 659 if (status) {
636 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); 660 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
637 nv_wr32(dev, NV03_PGRAPH_INTR, status); 661 status);
638 } 662 nv_wr32(dev, NV03_PGRAPH_INTR, status);
663 }
639 664
640 { 665 {
641 const int isb = (1 << 16) | (1 << 0); 666 const int isb = (1 << 16) | (1 << 0);
642 667
643 if ((nv_rd32(dev, 0x400500) & isb) != isb) 668 if ((nv_rd32(dev, 0x400500) & isb) != isb)
644 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); 669 nv_wr32(dev, 0x400500,
645 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 670 nv_rd32(dev, 0x400500) | isb);
671 }
646 } 672 }
647 673
648 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 674 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
675 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
649} 676}
650 677
651static void 678static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 8f3a12f614ed..2dc09dbd817d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
285 uint32_t flags, uint64_t phys) 285 uint32_t flags, uint64_t phys)
286{ 286{
287 struct drm_nouveau_private *dev_priv = dev->dev_private; 287 struct drm_nouveau_private *dev_priv = dev->dev_private;
288 struct nouveau_gpuobj **pgt; 288 struct nouveau_gpuobj *pgt;
289 unsigned psz, pfl, pages; 289 unsigned block;
290 290 int i;
291 if (virt >= dev_priv->vm_gart_base &&
292 (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
293 psz = 12;
294 pgt = &dev_priv->gart_info.sg_ctxdma;
295 pfl = 0x21;
296 virt -= dev_priv->vm_gart_base;
297 } else
298 if (virt >= dev_priv->vm_vram_base &&
299 (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
300 psz = 16;
301 pgt = dev_priv->vm_vram_pt;
302 pfl = 0x01;
303 virt -= dev_priv->vm_vram_base;
304 } else {
305 NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
306 virt, virt + size - 1);
307 return -EINVAL;
308 }
309 291
310 pages = size >> psz; 292 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
293 size = (size >> 16) << 1;
294
295 phys |= ((uint64_t)flags << 32);
296 phys |= 1;
297 if (dev_priv->vram_sys_base) {
298 phys += dev_priv->vram_sys_base;
299 phys |= 0x30;
300 }
311 301
312 dev_priv->engine.instmem.prepare_access(dev, true); 302 dev_priv->engine.instmem.prepare_access(dev, true);
313 if (flags & 0x80000000) { 303 while (size) {
314 while (pages--) { 304 unsigned offset_h = upper_32_bits(phys);
315 struct nouveau_gpuobj *pt = pgt[virt >> 29]; 305 unsigned offset_l = lower_32_bits(phys);
316 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; 306 unsigned pte, end;
307
308 for (i = 7; i >= 0; i--) {
309 block = 1 << (i + 1);
310 if (size >= block && !(virt & (block - 1)))
311 break;
312 }
313 offset_l |= (i << 7);
317 314
318 nv_wo32(dev, pt, pte++, 0x00000000); 315 phys += block << 15;
319 nv_wo32(dev, pt, pte++, 0x00000000); 316 size -= block;
320 317
321 virt += (1 << psz); 318 while (block) {
322 } 319 pgt = dev_priv->vm_vram_pt[virt >> 14];
323 } else { 320 pte = virt & 0x3ffe;
324 while (pages--) {
325 struct nouveau_gpuobj *pt = pgt[virt >> 29];
326 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
327 unsigned offset_h = upper_32_bits(phys) & 0xff;
328 unsigned offset_l = lower_32_bits(phys);
329 321
330 nv_wo32(dev, pt, pte++, offset_l | pfl); 322 end = pte + block;
331 nv_wo32(dev, pt, pte++, offset_h | flags); 323 if (end > 16384)
324 end = 16384;
325 block -= (end - pte);
326 virt += (end - pte);
332 327
333 phys += (1 << psz); 328 while (pte < end) {
334 virt += (1 << psz); 329 nv_wo32(dev, pgt, pte++, offset_l);
330 nv_wo32(dev, pgt, pte++, offset_h);
331 }
335 } 332 }
336 } 333 }
337 dev_priv->engine.instmem.finish_access(dev); 334 dev_priv->engine.instmem.finish_access(dev);
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
356void 353void
357nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 354nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
358{ 355{
359 nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); 356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_gpuobj *pgt;
358 unsigned pages, pte, end;
359
360 virt -= dev_priv->vm_vram_base;
361 pages = (size >> 16) << 1;
362
363 dev_priv->engine.instmem.prepare_access(dev, true);
364 while (pages) {
365 pgt = dev_priv->vm_vram_pt[virt >> 29];
366 pte = (virt & 0x1ffe0000ULL) >> 15;
367
368 end = pte + pages;
369 if (end > 16384)
370 end = 16384;
371 pages -= (end - pte);
372 virt += (end - pte) << 15;
373
374 while (pte < end)
375 nv_wo32(dev, pgt, pte++, 0);
376 }
377 dev_priv->engine.instmem.finish_access(dev);
378
379 nv_wr32(dev, 0x100c80, 0x00050001);
380 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
381 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
382 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
383 return;
384 }
385
386 nv_wr32(dev, 0x100c80, 0x00000001);
387 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
388 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
389 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
390 }
360} 391}
361 392
362/* 393/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b6345..d99dc087f9b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{ 34{
35 struct drm_device *dev = chan->dev; 35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL; 36 struct nouveau_bo *ntfy = NULL;
37 uint32_t flags;
37 int ret; 38 int ret;
38 39
39 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? 40 if (nouveau_vram_notify)
40 TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, 41 flags = TTM_PL_FLAG_VRAM;
42 else
43 flags = TTM_PL_FLAG_TT;
44
45 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
41 0, 0x0000, false, true, &ntfy); 46 0, 0x0000, false, true, &ntfy);
42 if (ret) 47 if (ret)
43 return ret; 48 return ret;
44 49
45 ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); 50 ret = nouveau_bo_pin(ntfy, flags);
46 if (ret) 51 if (ret)
47 goto out_err; 52 goto out_err;
48 53
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
128 target = NV_DMA_TARGET_PCI; 133 target = NV_DMA_TARGET_PCI;
129 } else { 134 } else {
130 target = NV_DMA_TARGET_AGP; 135 target = NV_DMA_TARGET_AGP;
136 if (dev_priv->card_type >= NV_50)
137 offset += dev_priv->vm_gart_base;
131 } 138 }
132 } else { 139 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", 140 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 6c2cf81716df..e7c100ba63a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -885,11 +885,12 @@ int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret) 886 struct nouveau_gpuobj **gpuobj_ret)
887{ 887{
888 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 888 struct drm_nouveau_private *dev_priv;
889 struct nouveau_gpuobj *gpuobj; 889 struct nouveau_gpuobj *gpuobj;
890 890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) 891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL; 892 return -EINVAL;
893 dev_priv = chan->dev->dev_private;
893 894
894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 895 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
895 if (!gpuobj) 896 if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 251f1b3b38b9..aa9b310e41be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
99 * the card will hang early on in the X init process. 99 * the card will hang early on in the X init process.
100 */ 100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13) 101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_GRAPH_UNITS 0x00001540
102#define NV40_PMC_BACKLIGHT 0x000015f0 103#define NV40_PMC_BACKLIGHT 0x000015f0
103# define NV40_PMC_BACKLIGHT_MASK 0x001f0000 104# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
104#define NV40_PMC_1700 0x00001700 105#define NV40_PMC_1700 0x00001700
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e80..ed1590577b6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
54nouveau_sgdma_clear(struct ttm_backend *be) 54nouveau_sgdma_clear(struct ttm_backend *be)
55{ 55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev; 57 struct drm_device *dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60 58
61 if (nvbe && nvbe->pages) { 59 if (nvbe && nvbe->pages) {
60 dev = nvbe->dev;
61 NV_DEBUG(dev, "\n");
62
62 if (nvbe->bound) 63 if (nvbe->bound)
63 be->func->unbind(be); 64 be->func->unbind(be);
64 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index f2d0187ba152..a4851af5b05e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
310static unsigned int 310static unsigned int
311nouveau_vga_set_decode(void *priv, bool state) 311nouveau_vga_set_decode(void *priv, bool state)
312{ 312{
313 struct drm_device *dev = priv;
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315
316 if (dev_priv->chipset >= 0x40)
317 nv_wr32(dev, 0x88054, state);
318 else
319 nv_wr32(dev, 0x1854, state);
320
313 if (state) 321 if (state)
314 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 322 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
315 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 323 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
427 if (ret) 435 if (ret)
428 goto out_timer; 436 goto out_timer;
429 437
430 /* PGRAPH */ 438 if (nouveau_noaccel)
431 ret = engine->graph.init(dev); 439 engine->graph.accel_blocked = true;
432 if (ret) 440 else {
433 goto out_fb; 441 /* PGRAPH */
442 ret = engine->graph.init(dev);
443 if (ret)
444 goto out_fb;
434 445
435 /* PFIFO */ 446 /* PFIFO */
436 ret = engine->fifo.init(dev); 447 ret = engine->fifo.init(dev);
437 if (ret) 448 if (ret)
438 goto out_graph; 449 goto out_graph;
450 }
439 451
440 /* this call irq_preinstall, register irq handler and 452 /* this call irq_preinstall, register irq handler and
441 * call irq_postinstall 453 * call irq_postinstall
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
479out_irq: 491out_irq:
480 drm_irq_uninstall(dev); 492 drm_irq_uninstall(dev);
481out_fifo: 493out_fifo:
482 engine->fifo.takedown(dev); 494 if (!nouveau_noaccel)
495 engine->fifo.takedown(dev);
483out_graph: 496out_graph:
484 engine->graph.takedown(dev); 497 if (!nouveau_noaccel)
498 engine->graph.takedown(dev);
485out_fb: 499out_fb:
486 engine->fb.takedown(dev); 500 engine->fb.takedown(dev);
487out_timer: 501out_timer:
@@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev)
518 dev_priv->channel = NULL; 532 dev_priv->channel = NULL;
519 } 533 }
520 534
521 engine->fifo.takedown(dev); 535 if (!nouveau_noaccel) {
522 engine->graph.takedown(dev); 536 engine->fifo.takedown(dev);
537 engine->graph.takedown(dev);
538 }
523 engine->fb.takedown(dev); 539 engine->fb.takedown(dev);
524 engine->timer.takedown(dev); 540 engine->timer.takedown(dev);
525 engine->mc.takedown(dev); 541 engine->mc.takedown(dev);
@@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
817 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 833 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
818 getparam->value = dev_priv->vm_vram_base; 834 getparam->value = dev_priv->vm_vram_base;
819 break; 835 break;
836 case NOUVEAU_GETPARAM_GRAPH_UNITS:
837 /* NV40 and NV50 versions are quite different, but register
838 * address is the same. User is supposed to know the card
839 * family anyway... */
840 if (dev_priv->chipset >= 0x40) {
841 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
842 break;
843 }
844 /* FALLTHRU */
820 default: 845 default:
821 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 846 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
822 return -EINVAL; 847 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d0e038d28948..1d73b15d70da 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
119 struct drm_connector *connector) 119 struct drm_connector *connector)
120{ 120{
121 struct drm_device *dev = encoder->dev; 121 struct drm_device *dev = encoder->dev;
122 uint8_t saved_seq1, saved_pi, saved_rpc1; 122 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
123 uint8_t saved_palette0[3], saved_palette_mask; 123 uint8_t saved_palette0[3], saved_palette_mask;
124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl; 124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
125 int i; 125 int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
135 /* only implemented for head A for now */ 135 /* only implemented for head A for now */
136 NVSetOwner(dev, 0); 136 NVSetOwner(dev, 0);
137 137
138 saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
139 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
140
138 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); 141 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
139 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); 142 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
140 143
@@ -203,6 +206,7 @@ out:
203 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 206 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
204 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 207 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); 208 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
209 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
206 210
207 if (blue == 0x18) { 211 if (blue == 0x18) {
208 NV_INFO(dev, "Load detected on head A\n"); 212 NV_INFO(dev, "Load detected on head A\n");
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index d910873c1368..fd01caabd5c3 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
27#include "nouveau_dma.h" 27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h" 28#include "nouveau_fbcon.h"
29 29
30static void 30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{ 32{
33 struct nouveau_fbcon_par *par = info->par; 33 struct nouveau_fbcon_par *par = info->par;
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
54 FIRE_RING(chan); 54 FIRE_RING(chan);
55} 55}
56 56
57static void 57void
58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
59{ 59{
60 struct nouveau_fbcon_par *par = info->par; 60 struct nouveau_fbcon_par *par = info->par;
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
88 FIRE_RING(chan); 88 FIRE_RING(chan);
89} 89}
90 90
91static void 91void
92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
93{ 93{
94 struct nouveau_fbcon_par *par = info->par; 94 struct nouveau_fbcon_par *par = info->par;
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
307 307
308 FIRE_RING(chan); 308 FIRE_RING(chan);
309 309
310 info->fbops->fb_fillrect = nv04_fbcon_fillrect;
311 info->fbops->fb_copyarea = nv04_fbcon_copyarea;
312 info->fbops->fb_imageblit = nv04_fbcon_imageblit;
313 return 0; 310 return 0;
314} 311}
315 312
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 58b917c3341b..21ac6e49b6ee 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
579 nouveau_encoder(encoder)->restore.output); 579 nouveau_encoder(encoder)->restore.output);
580 580
581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); 581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
582
583 nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
582} 584}
583 585
584static int nv17_tv_create_resources(struct drm_encoder *encoder, 586static int nv17_tv_create_resources(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 40b7360841f8..d1a651e3400c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
298static void 298static void
299nv50_crtc_destroy(struct drm_crtc *crtc) 299nv50_crtc_destroy(struct drm_crtc *crtc)
300{ 300{
301 struct drm_device *dev = crtc->dev; 301 struct drm_device *dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 302 struct nouveau_crtc *nv_crtc;
303
304 NV_DEBUG_KMS(dev, "\n");
305 303
306 if (!crtc) 304 if (!crtc)
307 return; 305 return;
308 306
307 dev = crtc->dev;
308 nv_crtc = nouveau_crtc(crtc);
309
310 NV_DEBUG_KMS(dev, "\n");
311
309 drm_crtc_cleanup(&nv_crtc->base); 312 drm_crtc_cleanup(&nv_crtc->base);
310 313
311 nv50_cursor_fini(nv_crtc); 314 nv50_cursor_fini(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e4f279ee61cf..0f57cdf7ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
3#include "nouveau_dma.h" 3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h" 4#include "nouveau_fbcon.h"
5 5
6static void 6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{ 8{
9 struct nouveau_fbcon_par *par = info->par; 9 struct nouveau_fbcon_par *par = info->par;
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
46 FIRE_RING(chan); 46 FIRE_RING(chan);
47} 47}
48 48
49static void 49void
50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
51{ 51{
52 struct nouveau_fbcon_par *par = info->par; 52 struct nouveau_fbcon_par *par = info->par;
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
81 FIRE_RING(chan); 81 FIRE_RING(chan);
82} 82}
83 83
84static void 84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{ 86{
87 struct nouveau_fbcon_par *par = info->par; 87 struct nouveau_fbcon_par *par = info->par;
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
263 dev_priv->vm_vram_base); 263 dev_priv->vm_vram_base);
264 264
265 info->fbops->fb_fillrect = nv50_fbcon_fillrect;
266 info->fbops->fb_copyarea = nv50_fbcon_copyarea;
267 info->fbops->fb_imageblit = nv50_fbcon_imageblit;
268 return 0; 265 return 0;
269} 266}
270 267
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 32b244bcb482..204a79ff10f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -317,17 +317,20 @@ void
317nv50_fifo_destroy_context(struct nouveau_channel *chan) 317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{ 318{
319 struct drm_device *dev = chan->dev; 319 struct drm_device *dev = chan->dev;
320 struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
320 321
321 NV_DEBUG(dev, "ch%d\n", chan->id); 322 NV_DEBUG(dev, "ch%d\n", chan->id);
322 323
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 324 /* This will ensure the channel is seen as disabled. */
324 nouveau_gpuobj_ref_del(dev, &chan->cache); 325 chan->ramfc = NULL;
325
326 nv50_fifo_channel_disable(dev, chan->id, false); 326 nv50_fifo_channel_disable(dev, chan->id, false);
327 327
328 /* Dummy channel, also used on ch 127 */ 328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0) 329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false); 330 nv50_fifo_channel_disable(dev, 127, false);
331
332 nouveau_gpuobj_ref_del(dev, &ramfc);
333 nouveau_gpuobj_ref_del(dev, &chan->cache);
331} 334}
332 335
333int 336int
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 20319e59d368..6d504801b514 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
165 uint32_t inst; 165 uint32_t inst;
166 int i; 166 int i;
167 167
168 /* Be sure we're not in the middle of a context switch or bad things
169 * will happen, such as unloading the wrong pgraph context.
170 */
171 if (!nv_wait(0x400300, 0x00000001, 0x00000000))
172 NV_ERROR(dev, "Ctxprog is still running\n");
173
168 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 174 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
169 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 175 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
170 return NULL; 176 return NULL;
@@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)
275int 281int
276nv50_graph_unload_context(struct drm_device *dev) 282nv50_graph_unload_context(struct drm_device *dev)
277{ 283{
278 uint32_t inst, fifo = nv_rd32(dev, 0x400500); 284 uint32_t inst;
279 285
280 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 286 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
281 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 287 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
@@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev)
283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 289 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
284 290
285 nouveau_wait_for_idle(dev); 291 nouveau_wait_for_idle(dev);
286 nv_wr32(dev, 0x400500, fifo & ~1);
287 nv_wr32(dev, 0x400784, inst); 292 nv_wr32(dev, 0x400784, inst);
288 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 293 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
289 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 294 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
290 nouveau_wait_for_idle(dev); 295 nouveau_wait_for_idle(dev);
291 nv_wr32(dev, 0x400500, fifo);
292 296
293 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 297 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
294 return 0; 298 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 94400f777e7f..f0dc4e36ef05 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
76 for (i = 0x1700; i <= 0x1710; i += 4) 76 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78 78
79 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
80 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
81 else
82 dev_priv->vram_sys_base = 0;
83
79 /* Reserve the last MiB of VRAM, we should probably try to avoid 84 /* Reserve the last MiB of VRAM, we should probably try to avoid
80 * setting up the below tables over the top of the VBIOS image at 85 * setting up the below tables over the top of the VBIOS image at
81 * some point. 86 * some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
172 * We map the entire fake channel into the start of the PRAMIN BAR 177 * We map the entire fake channel into the start of the PRAMIN BAR
173 */ 178 */
174 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, 179 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
175 0, &priv->pramin_pt); 180 0, &priv->pramin_pt);
176 if (ret) 181 if (ret)
177 return ret; 182 return ret;
178 183
179 for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { 184 v = c_offset | 1;
180 if (v < (c_offset + c_size)) 185 if (dev_priv->vram_sys_base) {
181 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); 186 v += dev_priv->vram_sys_base;
182 else 187 v |= 0x30;
183 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); 188 }
189
190 i = 0;
191 while (v < dev_priv->vram_sys_base + c_offset + c_size) {
192 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
193 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
194 v += 0x1000;
195 i += 8;
196 }
197
198 while (i < pt_size) {
199 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
184 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 200 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
201 i += 8;
185 } 202 }
186 203
187 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); 204 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
416{ 433{
417 struct drm_nouveau_private *dev_priv = dev->dev_private; 434 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 435 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
419 uint32_t pte, pte_end, vram; 436 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
437 uint32_t pte, pte_end;
438 uint64_t vram;
420 439
421 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) 440 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
422 return -EINVAL; 441 return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
424 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", 443 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
425 gpuobj->im_pramin->start, gpuobj->im_pramin->size); 444 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
426 445
427 pte = (gpuobj->im_pramin->start >> 12) << 3; 446 pte = (gpuobj->im_pramin->start >> 12) << 1;
428 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 447 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
429 vram = gpuobj->im_backing_start; 448 vram = gpuobj->im_backing_start;
430 449
431 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", 450 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
432 gpuobj->im_pramin->start, pte, pte_end); 451 gpuobj->im_pramin->start, pte, pte_end);
433 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); 452 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
434 453
454 vram |= 1;
455 if (dev_priv->vram_sys_base) {
456 vram += dev_priv->vram_sys_base;
457 vram |= 0x30;
458 }
459
435 dev_priv->engine.instmem.prepare_access(dev, true); 460 dev_priv->engine.instmem.prepare_access(dev, true);
436 while (pte < pte_end) { 461 while (pte < pte_end) {
437 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); 462 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
438 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 463 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
439
440 pte += 8;
441 vram += NV50_INSTMEM_PAGE_SIZE; 464 vram += NV50_INSTMEM_PAGE_SIZE;
442 } 465 }
443 dev_priv->engine.instmem.finish_access(dev); 466 dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
470 if (gpuobj->im_bound == 0) 493 if (gpuobj->im_bound == 0)
471 return -EINVAL; 494 return -EINVAL;
472 495
473 pte = (gpuobj->im_pramin->start >> 12) << 3; 496 pte = (gpuobj->im_pramin->start >> 12) << 1;
474 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 497 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
475 498
476 dev_priv->engine.instmem.prepare_access(dev, true); 499 dev_priv->engine.instmem.prepare_access(dev, true);
477 while (pte < pte_end) { 500 while (pte < pte_end) {
478 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); 501 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
479 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 502 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
480 pte += 8;
481 } 503 }
482 dev_priv->engine.instmem.finish_access(dev); 504 dev_priv->engine.instmem.finish_access(dev);
483 505
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index ecf1936b8224..c2fff543b06f 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc); 101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102 102
103 if (nvenc == nv_encoder || 103 if (nvenc == nv_encoder ||
104 nvenc->disconnect != nv50_sor_disconnect ||
104 nvenc->dcb->or != nv_encoder->dcb->or) 105 nvenc->dcb->or != nv_encoder->dcb->or)
105 continue; 106 continue;
106 107
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d5..1c02d23f6fcc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default" 2 bool "Enable modesetting on radeon by default - NEW DRIVER"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 help 4 help
5 Choose this option if you want kernel modesetting enabled by default, 5 Choose this option if you want kernel modesetting enabled by default.
6 and you have a new enough userspace to support this. Running old 6
7 userspaces with this enabled will cause pain. 7 This is a completely new driver. It's only part of the existing drm
8 for compatibility reasons. It requires an entirely different graphics
9 stack above it and works very differently from the old drm stack.
10 i.e. don't enable this unless you know what you are doing it may
11 cause issues or bugs compared to the previous userspace driver stack.
8 12
9 When kernel modesetting is enabled the IOCTL of radeon/drm 13 When kernel modesetting is enabled the IOCTL of radeon/drm
10 driver are considered as invalid and an error message is printed 14 driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index e3b44562d265..7f152f66f196 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <asm/unaligned.h>
27 28
28#define ATOM_DEBUG 29#define ATOM_DEBUG
29 30
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
212 case ATOM_ARG_PS: 213 case ATOM_ARG_PS:
213 idx = U8(*ptr); 214 idx = U8(*ptr);
214 (*ptr)++; 215 (*ptr)++;
215 val = le32_to_cpu(ctx->ps[idx]); 216 /* get_unaligned_le32 avoids unaligned accesses from atombios
217 * tables, noticed on a DEC Alpha. */
218 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
216 if (print) 219 if (print)
217 DEBUG("PS[0x%02X,0x%04X]", idx, val); 220 DEBUG("PS[0x%02X,0x%04X]", idx, val);
218 break; 221 break;
@@ -640,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
640 uint8_t count = U8((*ptr)++); 643 uint8_t count = U8((*ptr)++);
641 SDEBUG(" count: %d\n", count); 644 SDEBUG(" count: %d\n", count);
642 if (arg == ATOM_UNIT_MICROSEC) 645 if (arg == ATOM_UNIT_MICROSEC)
643 schedule_timeout_uninterruptible(usecs_to_jiffies(count)); 646 udelay(count);
644 else 647 else
645 schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 648 schedule_timeout_uninterruptible(msecs_to_jiffies(count));
646} 649}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 71060114d5de..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 334 unsigned char *base;
335 int retry_count = 0;
335 336
336 memset(&args, 0, sizeof(args)); 337 memset(&args, 0, sizeof(args));
337 338
338 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 339 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
339 340
341retry:
340 memcpy(base, req_bytes, num_bytes); 342 memcpy(base, req_bytes, num_bytes);
341 343
342 args.lpAuxRequest = 0; 344 args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
347 349
348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
349 351
350 if (args.ucReplyStatus) { 352 if (args.ucReplyStatus && !args.ucDataOutLen) {
351 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", 353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
352 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
353 chan->rec.i2c_id, args.ucReplyStatus); 357 chan->rec.i2c_id, args.ucReplyStatus, retry_count);
354 return false; 358 return false;
355 } 359 }
356 360
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 11c9a3fe6810..c0d4650cdb79 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
354 return RREG32(RADEON_CRTC2_CRNT_FRAME); 354 return RREG32(RADEON_CRTC2_CRNT_FRAME);
355} 355}
356 356
357/* Who ever call radeon_fence_emit should call ring_lock and ask
358 * for enough space (today caller are ib schedule and buffer move) */
357void r100_fence_ring_emit(struct radeon_device *rdev, 359void r100_fence_ring_emit(struct radeon_device *rdev,
358 struct radeon_fence *fence) 360 struct radeon_fence *fence)
359{ 361{
360 /* Who ever call radeon_fence_emit should call ring_lock and ask 362 /* We have to make sure that caches are flushed before
361 * for enough space (today caller are ib schedule and buffer move) */ 363 * CPU might read something from VRAM. */
364 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
365 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
362 /* Wait until IDLE & CLEAN */ 368 /* Wait until IDLE & CLEAN */
363 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 369 radeon_ring_write(rdev, PACKET0(0x1720, 0));
364 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 370 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
@@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev)
3369 3375
3370void r100_fini(struct radeon_device *rdev) 3376void r100_fini(struct radeon_device *rdev)
3371{ 3377{
3372 r100_suspend(rdev);
3373 r100_cp_fini(rdev); 3378 r100_cp_fini(rdev);
3374 r100_wb_fini(rdev); 3379 r100_wb_fini(rdev);
3375 r100_ib_fini(rdev); 3380 r100_ib_fini(rdev);
@@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
3481 if (r) { 3486 if (r) {
3482 /* Somethings want wront with the accel init stop accel */ 3487 /* Somethings want wront with the accel init stop accel */
3483 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3488 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3484 r100_suspend(rdev);
3485 r100_cp_fini(rdev); 3489 r100_cp_fini(rdev);
3486 r100_wb_fini(rdev); 3490 r100_wb_fini(rdev);
3487 r100_ib_fini(rdev); 3491 r100_ib_fini(rdev);
3492 radeon_irq_kms_fini(rdev);
3488 if (rdev->flags & RADEON_IS_PCI) 3493 if (rdev->flags & RADEON_IS_PCI)
3489 r100_pci_gart_fini(rdev); 3494 r100_pci_gart_fini(rdev);
3490 radeon_irq_kms_fini(rdev);
3491 rdev->accel_working = false; 3495 rdev->accel_working = false;
3492 } 3496 }
3493 return 0; 3497 return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0051d11b907c..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
506 506
507 /* DDR for all card after R300 & IGP */ 507 /* DDR for all card after R300 & IGP */
508 rdev->mc.vram_is_ddr = true; 508 rdev->mc.vram_is_ddr = true;
509
509 tmp = RREG32(RADEON_MEM_CNTL); 510 tmp = RREG32(RADEON_MEM_CNTL);
510 if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 511 tmp &= R300_MEM_NUM_CHANNELS_MASK;
511 rdev->mc.vram_width = 128; 512 switch (tmp) {
512 } else { 513 case 0: rdev->mc.vram_width = 64; break;
513 rdev->mc.vram_width = 64; 514 case 1: rdev->mc.vram_width = 128; break;
515 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break;
514 } 517 }
515 518
516 r100_vram_init_sizes(rdev); 519 r100_vram_init_sizes(rdev);
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
1327 1330
1328void r300_fini(struct radeon_device *rdev) 1331void r300_fini(struct radeon_device *rdev)
1329{ 1332{
1330 r300_suspend(rdev);
1331 r100_cp_fini(rdev); 1333 r100_cp_fini(rdev);
1332 r100_wb_fini(rdev); 1334 r100_wb_fini(rdev);
1333 r100_ib_fini(rdev); 1335 r100_ib_fini(rdev);
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
1418 if (r) { 1420 if (r) {
1419 /* Somethings want wront with the accel init stop accel */ 1421 /* Somethings want wront with the accel init stop accel */
1420 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1422 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1421 r300_suspend(rdev);
1422 r100_cp_fini(rdev); 1423 r100_cp_fini(rdev);
1423 r100_wb_fini(rdev); 1424 r100_wb_fini(rdev);
1424 r100_ib_fini(rdev); 1425 r100_ib_fini(rdev);
1426 radeon_irq_kms_fini(rdev);
1425 if (rdev->flags & RADEON_IS_PCIE) 1427 if (rdev->flags & RADEON_IS_PCIE)
1426 rv370_pcie_gart_fini(rdev); 1428 rv370_pcie_gart_fini(rdev);
1427 if (rdev->flags & RADEON_IS_PCI) 1429 if (rdev->flags & RADEON_IS_PCI)
1428 r100_pci_gart_fini(rdev); 1430 r100_pci_gart_fini(rdev);
1429 radeon_irq_kms_fini(rdev); 1431 radeon_agp_fini(rdev);
1430 rdev->accel_working = false; 1432 rdev->accel_working = false;
1431 } 1433 }
1432 return 0; 1434 return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4526faaacca8..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev)
389 if (r) { 389 if (r) {
390 /* Somethings want wront with the accel init stop accel */ 390 /* Somethings want wront with the accel init stop accel */
391 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 391 dev_err(rdev->dev, "Disabling GPU acceleration\n");
392 r420_suspend(rdev);
393 r100_cp_fini(rdev); 392 r100_cp_fini(rdev);
394 r100_wb_fini(rdev); 393 r100_wb_fini(rdev);
395 r100_ib_fini(rdev); 394 r100_ib_fini(rdev);
395 radeon_irq_kms_fini(rdev);
396 if (rdev->flags & RADEON_IS_PCIE) 396 if (rdev->flags & RADEON_IS_PCIE)
397 rv370_pcie_gart_fini(rdev); 397 rv370_pcie_gart_fini(rdev);
398 if (rdev->flags & RADEON_IS_PCI) 398 if (rdev->flags & RADEON_IS_PCI)
399 r100_pci_gart_fini(rdev); 399 r100_pci_gart_fini(rdev);
400 radeon_agp_fini(rdev); 400 radeon_agp_fini(rdev);
401 radeon_irq_kms_fini(rdev);
402 rdev->accel_working = false; 401 rdev->accel_working = false;
403 } 402 }
404 return 0; 403 return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 9a189072f2b9..ddf5731eba0d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
294 if (r) { 294 if (r) {
295 /* Somethings want wront with the accel init stop accel */ 295 /* Somethings want wront with the accel init stop accel */
296 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 296 dev_err(rdev->dev, "Disabling GPU acceleration\n");
297 rv515_suspend(rdev);
298 r100_cp_fini(rdev); 297 r100_cp_fini(rdev);
299 r100_wb_fini(rdev); 298 r100_wb_fini(rdev);
300 r100_ib_fini(rdev); 299 r100_ib_fini(rdev);
300 radeon_irq_kms_fini(rdev);
301 rv370_pcie_gart_fini(rdev); 301 rv370_pcie_gart_fini(rdev);
302 radeon_agp_fini(rdev); 302 radeon_agp_fini(rdev);
303 radeon_irq_kms_fini(rdev);
304 rdev->accel_working = false; 303 rdev->accel_working = false;
305 } 304 }
306 return 0; 305 return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 1b6d0001b20e..2ffcf5a03551 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1654 rdev->cp.align_mask = 16 - 1; 1654 rdev->cp.align_mask = 16 - 1;
1655} 1655}
1656 1656
1657void r600_cp_fini(struct radeon_device *rdev)
1658{
1659 r600_cp_stop(rdev);
1660 radeon_ring_fini(rdev);
1661}
1662
1657 1663
1658/* 1664/*
1659 * GPU scratch registers helpers function. 1665 * GPU scratch registers helpers function.
@@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev)
1861 return r; 1867 return r;
1862 } 1868 }
1863 r600_gpu_init(rdev); 1869 r600_gpu_init(rdev);
1870 r = r600_blit_init(rdev);
1871 if (r) {
1872 r600_blit_fini(rdev);
1873 rdev->asic->copy = NULL;
1874 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1875 }
1864 /* pin copy shader into vram */ 1876 /* pin copy shader into vram */
1865 if (rdev->r600_blit.shader_obj) { 1877 if (rdev->r600_blit.shader_obj) {
1866 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1878 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -1938,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
1938 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1939 return r; 1951 return r;
1940 } 1952 }
1953
1954 r = r600_audio_init(rdev);
1955 if (r) {
1956 DRM_ERROR("radeon: audio resume failed\n");
1957 return r;
1958 }
1959
1941 return r; 1960 return r;
1942} 1961}
1943 1962
@@ -1945,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev)
1945{ 1964{
1946 int r; 1965 int r;
1947 1966
1967 r600_audio_fini(rdev);
1948 /* FIXME: we should wait for ring to be empty */ 1968 /* FIXME: we should wait for ring to be empty */
1949 r600_cp_stop(rdev); 1969 r600_cp_stop(rdev);
1950 rdev->cp.ready = false; 1970 rdev->cp.ready = false;
@@ -2045,19 +2065,15 @@ int r600_init(struct radeon_device *rdev)
2045 r = r600_pcie_gart_init(rdev); 2065 r = r600_pcie_gart_init(rdev);
2046 if (r) 2066 if (r)
2047 return r; 2067 return r;
2048 r = r600_blit_init(rdev);
2049 if (r) {
2050 r600_blit_fini(rdev);
2051 rdev->asic->copy = NULL;
2052 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2053 }
2054 2068
2055 rdev->accel_working = true; 2069 rdev->accel_working = true;
2056 r = r600_startup(rdev); 2070 r = r600_startup(rdev);
2057 if (r) { 2071 if (r) {
2058 r600_suspend(rdev); 2072 dev_err(rdev->dev, "disabling GPU acceleration\n");
2073 r600_cp_fini(rdev);
2059 r600_wb_fini(rdev); 2074 r600_wb_fini(rdev);
2060 radeon_ring_fini(rdev); 2075 r600_irq_fini(rdev);
2076 radeon_irq_kms_fini(rdev);
2061 r600_pcie_gart_fini(rdev); 2077 r600_pcie_gart_fini(rdev);
2062 rdev->accel_working = false; 2078 rdev->accel_working = false;
2063 } 2079 }
@@ -2083,20 +2099,17 @@ int r600_init(struct radeon_device *rdev)
2083 2099
2084void r600_fini(struct radeon_device *rdev) 2100void r600_fini(struct radeon_device *rdev)
2085{ 2101{
2086 /* Suspend operations */
2087 r600_suspend(rdev);
2088
2089 r600_audio_fini(rdev); 2102 r600_audio_fini(rdev);
2090 r600_blit_fini(rdev); 2103 r600_blit_fini(rdev);
2104 r600_cp_fini(rdev);
2105 r600_wb_fini(rdev);
2091 r600_irq_fini(rdev); 2106 r600_irq_fini(rdev);
2092 radeon_irq_kms_fini(rdev); 2107 radeon_irq_kms_fini(rdev);
2093 radeon_ring_fini(rdev);
2094 r600_wb_fini(rdev);
2095 r600_pcie_gart_fini(rdev); 2108 r600_pcie_gart_fini(rdev);
2109 radeon_agp_fini(rdev);
2096 radeon_gem_fini(rdev); 2110 radeon_gem_fini(rdev);
2097 radeon_fence_driver_fini(rdev); 2111 radeon_fence_driver_fini(rdev);
2098 radeon_clocks_fini(rdev); 2112 radeon_clocks_fini(rdev);
2099 radeon_agp_fini(rdev);
2100 radeon_bo_fini(rdev); 2113 radeon_bo_fini(rdev);
2101 radeon_atombios_fini(rdev); 2114 radeon_atombios_fini(rdev);
2102 kfree(rdev->bios); 2115 kfree(rdev->bios);
@@ -2900,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2900 return 0; 2913 return 0;
2901#endif 2914#endif
2902} 2915}
2916
2917/**
2918 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2919 * rdev: radeon device structure
2920 * bo: buffer object struct which userspace is waiting for idle
2921 *
2922 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2923 * through ring buffer, this leads to corruption in rendering, see
2924 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2925 * directly perform HDP flush by writing register through MMIO.
2926 */
2927void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2928{
2929 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2930}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 99e2c3891a7d..0dcb6904c4ff 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
261 if (!r600_audio_chipset_supported(rdev)) 261 if (!r600_audio_chipset_supported(rdev))
262 return; 262 return;
263 263
264 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
265
266 del_timer(&rdev->audio_timer); 264 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
267} 266}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index af1c3ca8a4cb..446b765ac72a 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
543void r600_vb_ib_put(struct radeon_device *rdev) 543void r600_vb_ib_put(struct radeon_device *rdev)
544{ 544{
545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); 545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
546 mutex_lock(&rdev->ib_pool.mutex);
547 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
548 mutex_unlock(&rdev->ib_pool.mutex);
549 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 546 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
550} 547}
551 548
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 6d5a711c2e91..75bcf35a0931 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
1428 1428
1429 gb_tiling_config |= R600_BANK_SWAPS(1); 1429 gb_tiling_config |= R600_BANK_SWAPS(1);
1430 1430
1431 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 1431 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
1432 dev_priv->r600_max_backends, 1432 backend_map = 0x28;
1433 (0xff << dev_priv->r600_max_backends) & 0xff); 1433 else
1434 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
1435 dev_priv->r600_max_backends,
1436 (0xff << dev_priv->r600_max_backends) & 0xff);
1434 gb_tiling_config |= R600_BACKEND_MAP(backend_map); 1437 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1435 1438
1436 cc_gc_shader_pipe_config = 1439 cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2d5f2bfa7201..c0356bb193e5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@ extern int radeon_audio;
96 * symbol; 96 * symbol;
97 */ 97 */
98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
99/* RADEON_IB_POOL_SIZE must be a power of 2 */
99#define RADEON_IB_POOL_SIZE 16 100#define RADEON_IB_POOL_SIZE 16
100#define RADEON_DEBUGFS_MAX_NUM_FILES 32 101#define RADEON_DEBUGFS_MAX_NUM_FILES 32
101#define RADEONFB_CONN_LIMIT 4 102#define RADEONFB_CONN_LIMIT 4
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
363 */ 364 */
364struct radeon_ib { 365struct radeon_ib {
365 struct list_head list; 366 struct list_head list;
366 unsigned long idx; 367 unsigned idx;
367 uint64_t gpu_addr; 368 uint64_t gpu_addr;
368 struct radeon_fence *fence; 369 struct radeon_fence *fence;
369 uint32_t *ptr; 370 uint32_t *ptr;
370 uint32_t length_dw; 371 uint32_t length_dw;
372 bool free;
371}; 373};
372 374
373/* 375/*
@@ -377,10 +379,9 @@ struct radeon_ib {
377struct radeon_ib_pool { 379struct radeon_ib_pool {
378 struct mutex mutex; 380 struct mutex mutex;
379 struct radeon_bo *robj; 381 struct radeon_bo *robj;
380 struct list_head scheduled_ibs;
381 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 382 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
382 bool ready; 383 bool ready;
383 DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); 384 unsigned head_id;
384}; 385};
385 386
386struct radeon_cp { 387struct radeon_cp {
@@ -661,6 +662,13 @@ struct radeon_asic {
661 void (*hpd_fini)(struct radeon_device *rdev); 662 void (*hpd_fini)(struct radeon_device *rdev);
662 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 663 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
663 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 664 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
665 /* ioctl hw specific callback. Some hw might want to perform special
666 * operation on specific ioctl. For instance on wait idle some hw
667 * might want to perform and HDP flush through MMIO as it seems that
668 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
669 * through ring.
670 */
671 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
664}; 672};
665 673
666/* 674/*
@@ -1143,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
1143extern void r600_cp_stop(struct radeon_device *rdev); 1151extern void r600_cp_stop(struct radeon_device *rdev);
1144extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1152extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1145extern int r600_cp_resume(struct radeon_device *rdev); 1153extern int r600_cp_resume(struct radeon_device *rdev);
1154extern void r600_cp_fini(struct radeon_device *rdev);
1146extern int r600_count_pipe_bits(uint32_t val); 1155extern int r600_count_pipe_bits(uint32_t val);
1147extern int r600_gart_clear_page(struct radeon_device *rdev, int i); 1156extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1148extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1157extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2fbd2e4e9df..05ee1aeac3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
117 .hpd_fini = &r100_hpd_fini, 117 .hpd_fini = &r100_hpd_fini,
118 .hpd_sense = &r100_hpd_sense, 118 .hpd_sense = &r100_hpd_sense,
119 .hpd_set_polarity = &r100_hpd_set_polarity, 119 .hpd_set_polarity = &r100_hpd_set_polarity,
120 .ioctl_wait_idle = NULL,
120}; 121};
121 122
122 123
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
176 .hpd_fini = &r100_hpd_fini, 177 .hpd_fini = &r100_hpd_fini,
177 .hpd_sense = &r100_hpd_sense, 178 .hpd_sense = &r100_hpd_sense,
178 .hpd_set_polarity = &r100_hpd_set_polarity, 179 .hpd_set_polarity = &r100_hpd_set_polarity,
180 .ioctl_wait_idle = NULL,
179}; 181};
180 182
181/* 183/*
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
219 .hpd_fini = &r100_hpd_fini, 221 .hpd_fini = &r100_hpd_fini,
220 .hpd_sense = &r100_hpd_sense, 222 .hpd_sense = &r100_hpd_sense,
221 .hpd_set_polarity = &r100_hpd_set_polarity, 223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
222}; 225};
223 226
224 227
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
267 .hpd_fini = &r100_hpd_fini, 270 .hpd_fini = &r100_hpd_fini,
268 .hpd_sense = &r100_hpd_sense, 271 .hpd_sense = &r100_hpd_sense,
269 .hpd_set_polarity = &r100_hpd_set_polarity, 272 .hpd_set_polarity = &r100_hpd_set_polarity,
273 .ioctl_wait_idle = NULL,
270}; 274};
271 275
272 276
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
323 .hpd_fini = &rs600_hpd_fini, 327 .hpd_fini = &rs600_hpd_fini,
324 .hpd_sense = &rs600_hpd_sense, 328 .hpd_sense = &rs600_hpd_sense,
325 .hpd_set_polarity = &rs600_hpd_set_polarity, 329 .hpd_set_polarity = &rs600_hpd_set_polarity,
330 .ioctl_wait_idle = NULL,
326}; 331};
327 332
328 333
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
370 .hpd_fini = &rs600_hpd_fini, 375 .hpd_fini = &rs600_hpd_fini,
371 .hpd_sense = &rs600_hpd_sense, 376 .hpd_sense = &rs600_hpd_sense,
372 .hpd_set_polarity = &rs600_hpd_set_polarity, 377 .hpd_set_polarity = &rs600_hpd_set_polarity,
378 .ioctl_wait_idle = NULL,
373}; 379};
374 380
375 381
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
421 .hpd_fini = &rs600_hpd_fini, 427 .hpd_fini = &rs600_hpd_fini,
422 .hpd_sense = &rs600_hpd_sense, 428 .hpd_sense = &rs600_hpd_sense,
423 .hpd_set_polarity = &rs600_hpd_set_polarity, 429 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL,
424}; 431};
425 432
426 433
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
463 .hpd_fini = &rs600_hpd_fini, 470 .hpd_fini = &rs600_hpd_fini,
464 .hpd_sense = &rs600_hpd_sense, 471 .hpd_sense = &rs600_hpd_sense,
465 .hpd_set_polarity = &rs600_hpd_set_polarity, 472 .hpd_set_polarity = &rs600_hpd_set_polarity,
473 .ioctl_wait_idle = NULL,
466}; 474};
467 475
468/* 476/*
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
504bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 512bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
505void r600_hpd_set_polarity(struct radeon_device *rdev, 513void r600_hpd_set_polarity(struct radeon_device *rdev,
506 enum radeon_hpd_id hpd); 514 enum radeon_hpd_id hpd);
515extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
507 516
508static struct radeon_asic r600_asic = { 517static struct radeon_asic r600_asic = {
509 .init = &r600_init, 518 .init = &r600_init,
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = {
538 .hpd_fini = &r600_hpd_fini, 547 .hpd_fini = &r600_hpd_fini,
539 .hpd_sense = &r600_hpd_sense, 548 .hpd_sense = &r600_hpd_sense,
540 .hpd_set_polarity = &r600_hpd_set_polarity, 549 .hpd_set_polarity = &r600_hpd_set_polarity,
550 .ioctl_wait_idle = r600_ioctl_wait_idle,
541}; 551};
542 552
543/* 553/*
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = {
582 .hpd_fini = &r600_hpd_fini, 592 .hpd_fini = &r600_hpd_fini,
583 .hpd_sense = &r600_hpd_sense, 593 .hpd_sense = &r600_hpd_sense,
584 .hpd_set_polarity = &r600_hpd_set_polarity, 594 .hpd_set_polarity = &r600_hpd_set_polarity,
595 .ioctl_wait_idle = r600_ioctl_wait_idle,
585}; 596};
586 597
587#endif 598#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fa82ca74324e..4d8831548a5f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
206 *connector_type = DRM_MODE_CONNECTOR_DVID; 206 *connector_type = DRM_MODE_CONNECTOR_DVID;
207 } 207 }
208 208
209 /* Asrock RS600 board lists the DVI port as HDMI */
210 if ((dev->pdev->device == 0x7941) &&
211 (dev->pdev->subsystem_vendor == 0x1849) &&
212 (dev->pdev->subsystem_device == 0x7941)) {
213 if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
214 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
215 *connector_type = DRM_MODE_CONNECTOR_DVID;
216 }
217
209 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 218 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
210 if ((dev->pdev->device == 0x7941) && 219 if ((dev->pdev->device == 0x7941) &&
211 (dev->pdev->subsystem_vendor == 0x147b) && 220 (dev->pdev->subsystem_vendor == 0x147b) &&
@@ -287,6 +296,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
287 *connector_type = DRM_MODE_CONNECTOR_DVID; 296 *connector_type = DRM_MODE_CONNECTOR_DVID;
288 } 297 }
289 298
299 /* XFX Pine Group device rv730 reports no VGA DDC lines
300 * even though they are wired up to record 0x93
301 */
302 if ((dev->pdev->device == 0x9498) &&
303 (dev->pdev->subsystem_vendor == 0x1682) &&
304 (dev->pdev->subsystem_device == 0x2452)) {
305 struct radeon_device *rdev = dev->dev_private;
306 *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
307 }
290 return true; 308 return true;
291} 309}
292 310
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc51..7932dc4d6b90 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
65 if (r) { 65 if (r) {
66 goto out_cleanup; 66 goto out_cleanup;
67 } 67 }
68 start_jiffies = jiffies; 68
69 for (i = 0; i < n; i++) { 69 /* r100 doesn't have dma engine so skip the test */
70 r = radeon_fence_create(rdev, &fence); 70 if (rdev->asic->copy_dma) {
71 if (r) { 71
72 goto out_cleanup; 72 start_jiffies = jiffies;
73 for (i = 0; i < n; i++) {
74 r = radeon_fence_create(rdev, &fence);
75 if (r) {
76 goto out_cleanup;
77 }
78
79 r = radeon_copy_dma(rdev, saddr, daddr,
80 size / RADEON_GPU_PAGE_SIZE, fence);
81
82 if (r) {
83 goto out_cleanup;
84 }
85 r = radeon_fence_wait(fence, false);
86 if (r) {
87 goto out_cleanup;
88 }
89 radeon_fence_unref(&fence);
73 } 90 }
74 r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); 91 end_jiffies = jiffies;
75 if (r) { 92 time = end_jiffies - start_jiffies;
76 goto out_cleanup; 93 time = jiffies_to_msecs(time);
94 if (time > 0) {
95 i = ((n * size) >> 10) / time;
96 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
97 " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
98 n, size >> 10,
99 sdomain, ddomain, time,
100 i, i * 1000, (i * 1000) / 1024);
77 } 101 }
78 r = radeon_fence_wait(fence, false);
79 if (r) {
80 goto out_cleanup;
81 }
82 radeon_fence_unref(&fence);
83 }
84 end_jiffies = jiffies;
85 time = end_jiffies - start_jiffies;
86 time = jiffies_to_msecs(time);
87 if (time > 0) {
88 i = ((n * size) >> 10) / time;
89 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
90 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
91 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
92 } 102 }
103
93 start_jiffies = jiffies; 104 start_jiffies = jiffies;
94 for (i = 0; i < n; i++) { 105 for (i = 0; i < n; i++) {
95 r = radeon_fence_create(rdev, &fence); 106 r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 579c8920e081..e7b19440102e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
971 lvds->native_mode.vdisplay); 971 lvds->native_mode.vdisplay);
972 972
973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); 973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
974 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) 974 lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
975 lvds->panel_vcc_delay = 2000;
976 975
977 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); 976 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
978 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; 977 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 55266416fa47..65f81942f399 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
580 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 580 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
581 struct drm_encoder *encoder; 581 struct drm_encoder *encoder;
582 struct drm_encoder_helper_funcs *encoder_funcs; 582 struct drm_encoder_helper_funcs *encoder_funcs;
583 bool dret; 583 bool dret = false;
584 enum drm_connector_status ret = connector_status_disconnected; 584 enum drm_connector_status ret = connector_status_disconnected;
585 585
586 encoder = radeon_best_single_encoder(connector); 586 encoder = radeon_best_single_encoder(connector);
587 if (!encoder) 587 if (!encoder)
588 ret = connector_status_disconnected; 588 ret = connector_status_disconnected;
589 589
590 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 590 if (radeon_connector->ddc_bus) {
591 dret = radeon_ddc_probe(radeon_connector); 591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
592 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 592 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
593 if (dret) { 595 if (dret) {
594 if (radeon_connector->edid) { 596 if (radeon_connector->edid) {
595 kfree(radeon_connector->edid); 597 kfree(radeon_connector->edid);
@@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
740 struct drm_mode_object *obj; 742 struct drm_mode_object *obj;
741 int i; 743 int i;
742 enum drm_connector_status ret = connector_status_disconnected; 744 enum drm_connector_status ret = connector_status_disconnected;
743 bool dret; 745 bool dret = false;
744 746
745 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 747 if (radeon_connector->ddc_bus) {
746 dret = radeon_ddc_probe(radeon_connector); 748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
747 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 749 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
748 if (dret) { 752 if (dret) {
749 if (radeon_connector->edid) { 753 if (radeon_connector->edid) {
750 kfree(radeon_connector->edid); 754 kfree(radeon_connector->edid);
@@ -776,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
776 * connected and the DVI port disconnected. If the edid doesn't 780 * connected and the DVI port disconnected. If the edid doesn't
777 * say HDMI, vice versa. 781 * say HDMI, vice versa.
778 */ 782 */
779 if (radeon_connector->shared_ddc && connector_status_connected) { 783 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
780 struct drm_device *dev = connector->dev; 784 struct drm_device *dev = connector->dev;
781 struct drm_connector *list_connector; 785 struct drm_connector *list_connector;
782 struct radeon_connector *list_radeon_connector; 786 struct radeon_connector *list_radeon_connector;
@@ -1056,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1056 return; 1060 return;
1057 } 1061 }
1058 if (radeon_connector->ddc_bus && i2c_bus->valid) { 1062 if (radeon_connector->ddc_bus && i2c_bus->valid) {
1059 if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, 1063 if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
1060 sizeof(struct radeon_i2c_bus_rec)) == 0) {
1061 radeon_connector->shared_ddc = true; 1064 radeon_connector->shared_ddc = true;
1062 shared_ddc = true; 1065 shared_ddc = true;
1063 } 1066 }
@@ -1343,7 +1346,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1343 radeon_connector->dac_load_detect = false; 1346 radeon_connector->dac_load_detect = false;
1344 drm_connector_attach_property(&radeon_connector->base, 1347 drm_connector_attach_property(&radeon_connector->base,
1345 rdev->mode_info.load_detect_property, 1348 rdev->mode_info.load_detect_property,
1346 1); 1349 radeon_connector->dac_load_detect);
1347 drm_connector_attach_property(&radeon_connector->base, 1350 drm_connector_attach_property(&radeon_connector->base,
1348 rdev->mode_info.tv_std_property, 1351 rdev->mode_info.tv_std_property,
1349 radeon_combios_get_tv_info(rdev)); 1352 radeon_combios_get_tv_info(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1190148cf5e6..e9d085021c1f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_bo_list_validate(&p->validated, p->ib->fence); 89 return radeon_bo_list_validate(&p->validated);
90} 90}
91 91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{ 189{
190 unsigned i; 190 unsigned i;
191 191
192 if (error && parser->ib) { 192 if (!error && parser->ib) {
193 radeon_bo_list_unvalidate(&parser->validated, 193 radeon_bo_list_fence(&parser->validated, parser->ib->fence);
194 parser->ib->fence);
195 } else {
196 radeon_bo_list_unreserve(&parser->validated);
197 } 194 }
195 radeon_bo_list_unreserve(&parser->validated);
198 for (i = 0; i < parser->nrelocs; i++) { 196 for (i = 0; i < parser->nrelocs; i++) {
199 if (parser->relocs[i].gobj) { 197 if (parser->relocs[i].gobj) {
200 mutex_lock(&parser->rdev->ddev->struct_mutex); 198 mutex_lock(&parser->rdev->ddev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a92f994cc26..7e17a362b54b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
278 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 278 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
281 if (radeon_connector->ddc_bus) 281 if (radeon_connector->ddc_bus) {
282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
283 radeon_connector->ddc_bus->rec.mask_clk_reg, 283 radeon_connector->ddc_bus->rec.mask_clk_reg,
284 radeon_connector->ddc_bus->rec.mask_data_reg, 284 radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
288 radeon_connector->ddc_bus->rec.en_data_reg, 288 radeon_connector->ddc_bus->rec.en_data_reg,
289 radeon_connector->ddc_bus->rec.y_clk_reg, 289 radeon_connector->ddc_bus->rec.y_clk_reg,
290 radeon_connector->ddc_bus->rec.y_data_reg); 290 radeon_connector->ddc_bus->rec.y_data_reg);
291 } else {
292 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
293 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
294 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
295 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
296 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
297 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
298 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
299 }
291 DRM_INFO(" Encoders:\n"); 300 DRM_INFO(" Encoders:\n");
292 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 301 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
293 radeon_encoder = to_radeon_encoder(encoder); 302 radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e13785282a82..c57ad606504d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -106,9 +106,10 @@
106 * 1.29- R500 3D cmd buffer support 106 * 1.29- R500 3D cmd buffer support
107 * 1.30- Add support for occlusion queries 107 * 1.30- Add support for occlusion queries
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup
109 */ 110 */
110#define DRIVER_MAJOR 1 111#define DRIVER_MAJOR 1
111#define DRIVER_MINOR 31 112#define DRIVER_MINOR 32
112#define DRIVER_PATCHLEVEL 0 113#define DRIVER_PATCHLEVEL 0
113 114
114enum radeon_cp_microcode_version { 115enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06c..d71e346e9ab5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
248 if (ret) 248 if (ret)
249 goto out_unref; 249 goto out_unref;
250 250
251 memset_io(fbptr, 0xff, aligned_size); 251 memset_io(fbptr, 0x0, aligned_size);
252 252
253 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
254 254
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0e1325e18534..db8e9a355a01 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
308 } 308 }
309 robj = gobj->driver_private; 309 robj = gobj->driver_private;
310 r = radeon_bo_wait(robj, NULL, false); 310 r = radeon_bo_wait(robj, NULL, false);
311 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
311 mutex_lock(&dev->struct_mutex); 314 mutex_lock(&dev->struct_mutex);
312 drm_gem_object_unreference(gobj); 315 drm_gem_object_unreference(gobj);
313 mutex_unlock(&dev->struct_mutex); 316 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d72a71bff218..f1da370928eb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
306 } 306 }
307} 307}
308 308
309int radeon_bo_list_validate(struct list_head *head, void *fence) 309int radeon_bo_list_validate(struct list_head *head)
310{ 310{
311 struct radeon_bo_list *lobj; 311 struct radeon_bo_list *lobj;
312 struct radeon_bo *bo; 312 struct radeon_bo *bo;
313 struct radeon_fence *old_fence = NULL;
314 int r; 313 int r;
315 314
316 r = radeon_bo_list_reserve(head); 315 r = radeon_bo_list_reserve(head);
@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
334 } 333 }
335 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 334 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
336 lobj->tiling_flags = bo->tiling_flags; 335 lobj->tiling_flags = bo->tiling_flags;
337 if (fence) {
338 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
339 bo->tbo.sync_obj = radeon_fence_ref(fence);
340 bo->tbo.sync_obj_arg = NULL;
341 }
342 if (old_fence) {
343 radeon_fence_unref(&old_fence);
344 }
345 } 336 }
346 return 0; 337 return 0;
347} 338}
348 339
349void radeon_bo_list_unvalidate(struct list_head *head, void *fence) 340void radeon_bo_list_fence(struct list_head *head, void *fence)
350{ 341{
351 struct radeon_bo_list *lobj; 342 struct radeon_bo_list *lobj;
352 struct radeon_fence *old_fence; 343 struct radeon_bo *bo;
353 344 struct radeon_fence *old_fence = NULL;
354 if (fence) 345
355 list_for_each_entry(lobj, head, list) { 346 list_for_each_entry(lobj, head, list) {
356 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); 347 bo = lobj->bo;
357 if (old_fence == fence) { 348 spin_lock(&bo->tbo.lock);
358 lobj->bo->tbo.sync_obj = NULL; 349 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
359 radeon_fence_unref(&old_fence); 350 bo->tbo.sync_obj = radeon_fence_ref(fence);
360 } 351 bo->tbo.sync_obj_arg = NULL;
352 spin_unlock(&bo->tbo.lock);
353 if (old_fence) {
354 radeon_fence_unref(&old_fence);
361 } 355 }
362 radeon_bo_list_unreserve(head); 356 }
363} 357}
364 358
365int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 359int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index a02f18011ad1..7ab43de1e244 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
156 struct list_head *head); 156 struct list_head *head);
157extern int radeon_bo_list_reserve(struct list_head *head); 157extern int radeon_bo_list_reserve(struct list_head *head);
158extern void radeon_bo_list_unreserve(struct list_head *head); 158extern void radeon_bo_list_unreserve(struct list_head *head);
159extern int radeon_bo_list_validate(struct list_head *head, void *fence); 159extern int radeon_bo_list_validate(struct list_head *head);
160extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); 160extern void radeon_bo_list_fence(struct list_head *head, void *fence);
161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
162 struct vm_area_struct *vma); 162 struct vm_area_struct *vma);
163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d17b4d..6579eb4c1f28 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{ 41{
42 struct radeon_fence *fence; 42 struct radeon_fence *fence;
43 struct radeon_ib *nib; 43 struct radeon_ib *nib;
44 unsigned long i; 44 int r = 0, i, c;
45 int r = 0;
46 45
47 *ib = NULL; 46 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence); 47 r = radeon_fence_create(rdev, &fence);
49 if (r) { 48 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n"); 49 dev_err(rdev->dev, "failed to create fence for new IB\n");
51 return r; 50 return r;
52 } 51 }
53 mutex_lock(&rdev->ib_pool.mutex); 52 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 53 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
55 if (i < RADEON_IB_POOL_SIZE) { 54 i &= (RADEON_IB_POOL_SIZE - 1);
56 set_bit(i, rdev->ib_pool.alloc_bm); 55 if (rdev->ib_pool.ibs[i].free) {
57 rdev->ib_pool.ibs[i].length_dw = 0; 56 nib = &rdev->ib_pool.ibs[i];
58 *ib = &rdev->ib_pool.ibs[i]; 57 break;
59 mutex_unlock(&rdev->ib_pool.mutex); 58 }
60 goto out;
61 } 59 }
62 if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 60 if (nib == NULL) {
63 /* we go do nothings here */ 61 /* This should never happen, it means we allocated all
62 * IB and haven't scheduled one yet, return EBUSY to
63 * userspace hoping that on ioctl recall we get better
64 * luck
65 */
66 dev_err(rdev->dev, "no free indirect buffer !\n");
64 mutex_unlock(&rdev->ib_pool.mutex); 67 mutex_unlock(&rdev->ib_pool.mutex);
65 DRM_ERROR("all IB allocated none scheduled.\n"); 68 radeon_fence_unref(&fence);
66 r = -EINVAL; 69 return -EBUSY;
67 goto out;
68 } 70 }
69 /* get the first ib on the scheduled list */ 71 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
70 nib = list_entry(rdev->ib_pool.scheduled_ibs.next, 72 nib->free = false;
71 struct radeon_ib, list); 73 if (nib->fence) {
72 if (nib->fence == NULL) {
73 /* we go do nothings here */
74 mutex_unlock(&rdev->ib_pool.mutex); 74 mutex_unlock(&rdev->ib_pool.mutex);
75 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 75 r = radeon_fence_wait(nib->fence, false);
76 r = -EINVAL; 76 if (r) {
77 goto out; 77 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
78 } 78 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
79 mutex_unlock(&rdev->ib_pool.mutex); 79 mutex_lock(&rdev->ib_pool.mutex);
80 80 nib->free = true;
81 r = radeon_fence_wait(nib->fence, false); 81 mutex_unlock(&rdev->ib_pool.mutex);
82 if (r) { 82 radeon_fence_unref(&fence);
83 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 83 return r;
84 (unsigned long)nib->gpu_addr, nib->length_dw); 84 }
85 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); 85 mutex_lock(&rdev->ib_pool.mutex);
86 goto out;
87 } 86 }
88 radeon_fence_unref(&nib->fence); 87 radeon_fence_unref(&nib->fence);
89 88 nib->fence = fence;
90 nib->length_dw = 0; 89 nib->length_dw = 0;
91
92 /* scheduled list is accessed here */
93 mutex_lock(&rdev->ib_pool.mutex);
94 list_del(&nib->list);
95 INIT_LIST_HEAD(&nib->list);
96 mutex_unlock(&rdev->ib_pool.mutex); 90 mutex_unlock(&rdev->ib_pool.mutex);
97
98 *ib = nib; 91 *ib = nib;
99out: 92 return 0;
100 if (r) {
101 radeon_fence_unref(&fence);
102 } else {
103 (*ib)->fence = fence;
104 }
105 return r;
106} 93}
107 94
108void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 95void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
113 if (tmp == NULL) { 100 if (tmp == NULL) {
114 return; 101 return;
115 } 102 }
116 mutex_lock(&rdev->ib_pool.mutex); 103 if (!tmp->fence->emited)
117 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
118 /* IB is scheduled & not signaled don't do anythings */
119 mutex_unlock(&rdev->ib_pool.mutex);
120 return;
121 }
122 list_del(&tmp->list);
123 INIT_LIST_HEAD(&tmp->list);
124 if (tmp->fence)
125 radeon_fence_unref(&tmp->fence); 104 radeon_fence_unref(&tmp->fence);
126 105 mutex_lock(&rdev->ib_pool.mutex);
127 tmp->length_dw = 0; 106 tmp->free = true;
128 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
129 mutex_unlock(&rdev->ib_pool.mutex); 107 mutex_unlock(&rdev->ib_pool.mutex);
130} 108}
131 109
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
135 113
136 if (!ib->length_dw || !rdev->cp.ready) { 114 if (!ib->length_dw || !rdev->cp.ready) {
137 /* TODO: Nothings in the ib we should report. */ 115 /* TODO: Nothings in the ib we should report. */
138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 116 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
139 return -EINVAL; 117 return -EINVAL;
140 } 118 }
141 119
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
148 radeon_ring_ib_execute(rdev, ib); 126 radeon_ring_ib_execute(rdev, ib);
149 radeon_fence_emit(rdev, ib->fence); 127 radeon_fence_emit(rdev, ib->fence);
150 mutex_lock(&rdev->ib_pool.mutex); 128 mutex_lock(&rdev->ib_pool.mutex);
151 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 129 /* once scheduled IB is considered free and protected by the fence */
130 ib->free = true;
152 mutex_unlock(&rdev->ib_pool.mutex); 131 mutex_unlock(&rdev->ib_pool.mutex);
153 radeon_ring_unlock_commit(rdev); 132 radeon_ring_unlock_commit(rdev);
154 return 0; 133 return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
164 if (rdev->ib_pool.robj) 143 if (rdev->ib_pool.robj)
165 return 0; 144 return 0;
166 /* Allocate 1M object buffer */ 145 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 146 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169 true, RADEON_GEM_DOMAIN_GTT, 147 true, RADEON_GEM_DOMAIN_GTT,
170 &rdev->ib_pool.robj); 148 &rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
195 rdev->ib_pool.ibs[i].ptr = ptr + offset; 173 rdev->ib_pool.ibs[i].ptr = ptr + offset;
196 rdev->ib_pool.ibs[i].idx = i; 174 rdev->ib_pool.ibs[i].idx = i;
197 rdev->ib_pool.ibs[i].length_dw = 0; 175 rdev->ib_pool.ibs[i].length_dw = 0;
198 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); 176 rdev->ib_pool.ibs[i].free = true;
199 } 177 }
200 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 178 rdev->ib_pool.head_id = 0;
201 rdev->ib_pool.ready = true; 179 rdev->ib_pool.ready = true;
202 DRM_INFO("radeon: ib pool ready.\n"); 180 DRM_INFO("radeon: ib pool ready.\n");
203 if (radeon_debugfs_ib_init(rdev)) { 181 if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
214 return; 192 return;
215 } 193 }
216 mutex_lock(&rdev->ib_pool.mutex); 194 mutex_lock(&rdev->ib_pool.mutex);
217 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
218 if (rdev->ib_pool.robj) { 195 if (rdev->ib_pool.robj) {
219 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 196 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
220 if (likely(r == 0)) { 197 if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
363 if (ib == NULL) { 340 if (ib == NULL) {
364 return 0; 341 return 0;
365 } 342 }
366 seq_printf(m, "IB %04lu\n", ib->idx); 343 seq_printf(m, "IB %04u\n", ib->idx);
367 seq_printf(m, "IB fence %p\n", ib->fence); 344 seq_printf(m, "IB fence %p\n", ib->fence);
368 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 345 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
369 for (i = 0; i < ib->length_dw; i++) { 346 for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9f5418983e2a..287fcebfb4e6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
223 return 0; 223 return 0;
224} 224}
225 225
226int rs400_mc_wait_for_idle(struct radeon_device *rdev)
227{
228 unsigned i;
229 uint32_t tmp;
230
231 for (i = 0; i < rdev->usec_timeout; i++) {
232 /* read MC_STATUS */
233 tmp = RREG32(0x0150);
234 if (tmp & (1 << 2)) {
235 return 0;
236 }
237 DRM_UDELAY(1);
238 }
239 return -1;
240}
241
226void rs400_gpu_init(struct radeon_device *rdev) 242void rs400_gpu_init(struct radeon_device *rdev)
227{ 243{
228 /* FIXME: HDP same place on rs400 ? */ 244 /* FIXME: HDP same place on rs400 ? */
229 r100_hdp_reset(rdev); 245 r100_hdp_reset(rdev);
230 /* FIXME: is this correct ? */ 246 /* FIXME: is this correct ? */
231 r420_pipes_init(rdev); 247 r420_pipes_init(rdev);
232 if (r300_mc_wait_for_idle(rdev)) { 248 if (rs400_mc_wait_for_idle(rdev)) {
233 printk(KERN_WARNING "Failed to wait MC idle while " 249 printk(KERN_WARNING "rs400: Failed to wait MC idle while "
234 "programming pipes. Bad things might happen.\n"); 250 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
235 } 251 }
236} 252}
237 253
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
370 r100_mc_stop(rdev, &save); 386 r100_mc_stop(rdev, &save);
371 387
372 /* Wait for mc idle */ 388 /* Wait for mc idle */
373 if (r300_mc_wait_for_idle(rdev)) 389 if (rs400_mc_wait_for_idle(rdev))
374 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 390 dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
375 WREG32(R_000148_MC_FB_LOCATION, 391 WREG32(R_000148_MC_FB_LOCATION,
376 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 392 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
377 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 393 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
448 464
449void rs400_fini(struct radeon_device *rdev) 465void rs400_fini(struct radeon_device *rdev)
450{ 466{
451 rs400_suspend(rdev);
452 r100_cp_fini(rdev); 467 r100_cp_fini(rdev);
453 r100_wb_fini(rdev); 468 r100_wb_fini(rdev);
454 r100_ib_fini(rdev); 469 r100_ib_fini(rdev);
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
527 if (r) { 542 if (r) {
528 /* Somethings want wront with the accel init stop accel */ 543 /* Somethings want wront with the accel init stop accel */
529 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 544 dev_err(rdev->dev, "Disabling GPU acceleration\n");
530 rs400_suspend(rdev);
531 r100_cp_fini(rdev); 545 r100_cp_fini(rdev);
532 r100_wb_fini(rdev); 546 r100_wb_fini(rdev);
533 r100_ib_fini(rdev); 547 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d5255751e7b3..c3818562a13e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
610 610
611void rs600_fini(struct radeon_device *rdev) 611void rs600_fini(struct radeon_device *rdev)
612{ 612{
613 rs600_suspend(rdev);
614 r100_cp_fini(rdev); 613 r100_cp_fini(rdev);
615 r100_wb_fini(rdev); 614 r100_wb_fini(rdev);
616 r100_ib_fini(rdev); 615 r100_ib_fini(rdev);
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
689 if (r) { 688 if (r) {
690 /* Somethings want wront with the accel init stop accel */ 689 /* Somethings want wront with the accel init stop accel */
691 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 690 dev_err(rdev->dev, "Disabling GPU acceleration\n");
692 rs600_suspend(rdev);
693 r100_cp_fini(rdev); 691 r100_cp_fini(rdev);
694 r100_wb_fini(rdev); 692 r100_wb_fini(rdev);
695 r100_ib_fini(rdev); 693 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index cd31da913771..06e2771aee5a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
676 676
677void rs690_fini(struct radeon_device *rdev) 677void rs690_fini(struct radeon_device *rdev)
678{ 678{
679 rs690_suspend(rdev);
680 r100_cp_fini(rdev); 679 r100_cp_fini(rdev);
681 r100_wb_fini(rdev); 680 r100_wb_fini(rdev);
682 r100_ib_fini(rdev); 681 r100_ib_fini(rdev);
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
756 if (r) { 755 if (r) {
757 /* Somethings want wront with the accel init stop accel */ 756 /* Somethings want wront with the accel init stop accel */
758 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 757 dev_err(rdev->dev, "Disabling GPU acceleration\n");
759 rs690_suspend(rdev);
760 r100_cp_fini(rdev); 758 r100_cp_fini(rdev);
761 r100_wb_fini(rdev); 759 r100_wb_fini(rdev);
762 r100_ib_fini(rdev); 760 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 62756717b044..0e1e6b8632b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
537 537
538void rv515_fini(struct radeon_device *rdev) 538void rv515_fini(struct radeon_device *rdev)
539{ 539{
540 rv515_suspend(rdev);
541 r100_cp_fini(rdev); 540 r100_cp_fini(rdev);
542 r100_wb_fini(rdev); 541 r100_wb_fini(rdev);
543 r100_ib_fini(rdev); 542 r100_ib_fini(rdev);
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
615 if (r) { 614 if (r) {
616 /* Somethings want wront with the accel init stop accel */ 615 /* Somethings want wront with the accel init stop accel */
617 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 616 dev_err(rdev->dev, "Disabling GPU acceleration\n");
618 rv515_suspend(rdev);
619 r100_cp_fini(rdev); 617 r100_cp_fini(rdev);
620 r100_wb_fini(rdev); 618 r100_wb_fini(rdev);
621 r100_ib_fini(rdev); 619 r100_ib_fini(rdev);
620 radeon_irq_kms_fini(rdev);
622 rv370_pcie_gart_fini(rdev); 621 rv370_pcie_gart_fini(rdev);
623 radeon_agp_fini(rdev); 622 radeon_agp_fini(rdev);
624 radeon_irq_kms_fini(rdev);
625 rdev->accel_working = false; 623 rdev->accel_working = false;
626 } 624 }
627 return 0; 625 return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index afd9e8213c29..03021674d097 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
549 549
550 gb_tiling_config |= BANK_SWAPS(1); 550 gb_tiling_config |= BANK_SWAPS(1);
551 551
552 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, 552 if (rdev->family == CHIP_RV740)
553 rdev->config.rv770.max_backends, 553 backend_map = 0x28;
554 (0xff << rdev->config.rv770.max_backends) & 0xff); 554 else
555 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
556 rdev->config.rv770.max_backends,
557 (0xff << rdev->config.rv770.max_backends) & 0xff);
555 gb_tiling_config |= BACKEND_MAP(backend_map); 558 gb_tiling_config |= BACKEND_MAP(backend_map);
556 559
557 cc_gc_shader_pipe_config = 560 cc_gc_shader_pipe_config =
@@ -887,6 +890,12 @@ static int rv770_startup(struct radeon_device *rdev)
887 return r; 890 return r;
888 } 891 }
889 rv770_gpu_init(rdev); 892 rv770_gpu_init(rdev);
893 r = r600_blit_init(rdev);
894 if (r) {
895 r600_blit_fini(rdev);
896 rdev->asic->copy = NULL;
897 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
898 }
890 /* pin copy shader into vram */ 899 /* pin copy shader into vram */
891 if (rdev->r600_blit.shader_obj) { 900 if (rdev->r600_blit.shader_obj) {
892 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 901 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -1055,19 +1064,15 @@ int rv770_init(struct radeon_device *rdev)
1055 r = r600_pcie_gart_init(rdev); 1064 r = r600_pcie_gart_init(rdev);
1056 if (r) 1065 if (r)
1057 return r; 1066 return r;
1058 r = r600_blit_init(rdev);
1059 if (r) {
1060 r600_blit_fini(rdev);
1061 rdev->asic->copy = NULL;
1062 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1063 }
1064 1067
1065 rdev->accel_working = true; 1068 rdev->accel_working = true;
1066 r = rv770_startup(rdev); 1069 r = rv770_startup(rdev);
1067 if (r) { 1070 if (r) {
1068 rv770_suspend(rdev); 1071 dev_err(rdev->dev, "disabling GPU acceleration\n");
1072 r600_cp_fini(rdev);
1069 r600_wb_fini(rdev); 1073 r600_wb_fini(rdev);
1070 radeon_ring_fini(rdev); 1074 r600_irq_fini(rdev);
1075 radeon_irq_kms_fini(rdev);
1071 rv770_pcie_gart_fini(rdev); 1076 rv770_pcie_gart_fini(rdev);
1072 rdev->accel_working = false; 1077 rdev->accel_working = false;
1073 } 1078 }
@@ -1089,13 +1094,11 @@ int rv770_init(struct radeon_device *rdev)
1089 1094
1090void rv770_fini(struct radeon_device *rdev) 1095void rv770_fini(struct radeon_device *rdev)
1091{ 1096{
1092 rv770_suspend(rdev);
1093
1094 r600_blit_fini(rdev); 1097 r600_blit_fini(rdev);
1098 r600_cp_fini(rdev);
1099 r600_wb_fini(rdev);
1095 r600_irq_fini(rdev); 1100 r600_irq_fini(rdev);
1096 radeon_irq_kms_fini(rdev); 1101 radeon_irq_kms_fini(rdev);
1097 radeon_ring_fini(rdev);
1098 r600_wb_fini(rdev);
1099 rv770_pcie_gart_fini(rdev); 1102 rv770_pcie_gart_fini(rdev);
1100 radeon_gem_fini(rdev); 1103 radeon_gem_fini(rdev);
1101 radeon_fence_driver_fini(rdev); 1104 radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1a3e909b7bba..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1020 struct ttm_mem_reg *mem) 1020 struct ttm_mem_reg *mem)
1021{ 1021{
1022 int i; 1022 int i;
1023 struct drm_mm_node *node = mem->mm_node;
1024
1025 if (node && placement->lpfn != 0 &&
1026 (node->start < placement->fpfn ||
1027 node->start + node->size > placement->lpfn))
1028 return -1;
1023 1029
1024 for (i = 0; i < placement->num_placement; i++) { 1030 for (i = 0; i < placement->num_placement; i++) {
1025 if ((placement->placement[i] & mem->placement & 1031 if ((placement->placement[i] & mem->placement &
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e2123af7775a..3d47a2c12322 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate);
196 196
197#ifdef CONFIG_X86 197#ifdef CONFIG_X86
198static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,
199 enum ttm_caching_state c_state) 199 enum ttm_caching_state c_old,
200 enum ttm_caching_state c_new)
200{ 201{
201 int ret = 0; 202 int ret = 0;
202 203
203 if (PageHighMem(p)) 204 if (PageHighMem(p))
204 return 0; 205 return 0;
205 206
206 if (get_page_memtype(p) != -1) { 207 if (c_old != tt_cached) {
207 /* p isn't in the default caching state, set it to 208 /* p isn't in the default caching state, set it to
208 * writeback first to free its current memtype. */ 209 * writeback first to free its current memtype. */
209 210
@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p,
212 return ret; 213 return ret;
213 } 214 }
214 215
215 if (c_state == tt_wc) 216 if (c_new == tt_wc)
216 ret = set_memory_wc((unsigned long) page_address(p), 1); 217 ret = set_memory_wc((unsigned long) page_address(p), 1);
217 else if (c_state == tt_uncached) 218 else if (c_new == tt_uncached)
218 ret = set_pages_uc(p, 1); 219 ret = set_pages_uc(p, 1);
219 220
220 return ret; 221 return ret;
221} 222}
222#else /* CONFIG_X86 */ 223#else /* CONFIG_X86 */
223static inline int ttm_tt_set_page_caching(struct page *p, 224static inline int ttm_tt_set_page_caching(struct page *p,
224 enum ttm_caching_state c_state) 225 enum ttm_caching_state c_old,
226 enum ttm_caching_state c_new)
225{ 227{
226 return 0; 228 return 0;
227} 229}
@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
254 for (i = 0; i < ttm->num_pages; ++i) { 256 for (i = 0; i < ttm->num_pages; ++i) {
255 cur_page = ttm->pages[i]; 257 cur_page = ttm->pages[i];
256 if (likely(cur_page != NULL)) { 258 if (likely(cur_page != NULL)) {
257 ret = ttm_tt_set_page_caching(cur_page, c_state); 259 ret = ttm_tt_set_page_caching(cur_page,
260 ttm->caching_state,
261 c_state);
258 if (unlikely(ret != 0)) 262 if (unlikely(ret != 0))
259 goto out_err; 263 goto out_err;
260 } 264 }
@@ -268,7 +272,7 @@ out_err:
268 for (j = 0; j < i; ++j) { 272 for (j = 0; j < i; ++j) {
269 cur_page = ttm->pages[j]; 273 cur_page = ttm->pages[j];
270 if (likely(cur_page != NULL)) { 274 if (likely(cur_page != NULL)) {
271 (void)ttm_tt_set_page_caching(cur_page, 275 (void)ttm_tt_set_page_caching(cur_page, c_state,
272 ttm->caching_state); 276 ttm->caching_state);
273 } 277 }
274 } 278 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a6e8f687fa64..0c9c0811f42d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
348 */ 348 */
349 349
350 DRM_INFO("It appears like vesafb is loaded. " 350 DRM_INFO("It appears like vesafb is loaded. "
351 "Ignore above error if any. Entering stealth mode.\n"); 351 "Ignore above error if any.\n");
352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
353 if (unlikely(ret != 0)) { 353 if (unlikely(ret != 0)) {
354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
355 goto out_no_device; 355 goto out_no_device;
356 } 356 }
357 vmw_kms_init(dev_priv);
358 vmw_overlay_init(dev_priv);
359 } else {
360 ret = vmw_request_device(dev_priv);
361 if (unlikely(ret != 0))
362 goto out_no_device;
363 vmw_kms_init(dev_priv);
364 vmw_overlay_init(dev_priv);
365 vmw_fb_init(dev_priv);
366 } 357 }
358 ret = vmw_request_device(dev_priv);
359 if (unlikely(ret != 0))
360 goto out_no_device;
361 vmw_kms_init(dev_priv);
362 vmw_overlay_init(dev_priv);
363 vmw_fb_init(dev_priv);
367 364
368 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 365 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
369 register_pm_notifier(&dev_priv->pm_nb); 366 register_pm_notifier(&dev_priv->pm_nb);
@@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
406 403
407 unregister_pm_notifier(&dev_priv->pm_nb); 404 unregister_pm_notifier(&dev_priv->pm_nb);
408 405
409 if (!dev_priv->stealth) { 406 vmw_fb_close(dev_priv);
410 vmw_fb_close(dev_priv); 407 vmw_kms_close(dev_priv);
411 vmw_kms_close(dev_priv); 408 vmw_overlay_close(dev_priv);
412 vmw_overlay_close(dev_priv); 409 vmw_release_device(dev_priv);
413 vmw_release_device(dev_priv); 410 if (dev_priv->stealth)
414 pci_release_regions(dev->pdev);
415 } else {
416 vmw_kms_close(dev_priv);
417 vmw_overlay_close(dev_priv);
418 pci_release_region(dev->pdev, 2); 411 pci_release_region(dev->pdev, 2);
419 } 412 else
413 pci_release_regions(dev->pdev);
414
420 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 415 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
421 drm_irq_uninstall(dev_priv->dev); 416 drm_irq_uninstall(dev_priv->dev);
422 if (dev->devname == vmw_devname) 417 if (dev->devname == vmw_devname)
@@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
585 int ret = 0; 580 int ret = 0;
586 581
587 DRM_INFO("Master set.\n"); 582 DRM_INFO("Master set.\n");
588 if (dev_priv->stealth) {
589 ret = vmw_request_device(dev_priv);
590 if (unlikely(ret != 0))
591 return ret;
592 }
593 583
594 if (active) { 584 if (active) {
595 BUG_ON(active != &dev_priv->fbdev_master); 585 BUG_ON(active != &dev_priv->fbdev_master);
@@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
649 639
650 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 640 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
651 641
652 if (dev_priv->stealth) {
653 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
654 if (unlikely(ret != 0))
655 DRM_ERROR("Unable to clean VRAM on master drop.\n");
656 vmw_release_device(dev_priv);
657 }
658 dev_priv->active_master = &dev_priv->fbdev_master; 642 dev_priv->active_master = &dev_priv->fbdev_master;
659 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 643 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
660 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 644 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
661 645
662 if (!dev_priv->stealth) 646 vmw_fb_on(dev_priv);
663 vmw_fb_on(dev_priv);
664} 647}
665 648
666 649
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 135be9688c90..356dc935ec13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,10 +39,10 @@
39#include "ttm/ttm_execbuf_util.h" 39#include "ttm/ttm_execbuf_util.h"
40#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
41 41
42#define VMWGFX_DRIVER_DATE "20090724" 42#define VMWGFX_DRIVER_DATE "20100209"
43#define VMWGFX_DRIVER_MAJOR 0 43#define VMWGFX_DRIVER_MAJOR 1
44#define VMWGFX_DRIVER_MINOR 1 44#define VMWGFX_DRIVER_MINOR 0
45#define VMWGFX_DRIVER_PATCHLEVEL 2 45#define VMWGFX_DRIVER_PATCHLEVEL 0
46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
48#define VMWGFX_MAX_RELOCATIONS 2048 48#define VMWGFX_MAX_RELOCATIONS 2048
@@ -113,6 +113,7 @@ struct vmw_fifo_state {
113 unsigned long static_buffer_size; 113 unsigned long static_buffer_size;
114 bool using_bounce_buffer; 114 bool using_bounce_buffer;
115 uint32_t capabilities; 115 uint32_t capabilities;
116 struct mutex fifo_mutex;
116 struct rw_semaphore rwsem; 117 struct rw_semaphore rwsem;
117}; 118};
118 119
@@ -213,7 +214,7 @@ struct vmw_private {
213 * Fencing and IRQs. 214 * Fencing and IRQs.
214 */ 215 */
215 216
216 uint32_t fence_seq; 217 atomic_t fence_seq;
217 wait_queue_head_t fence_queue; 218 wait_queue_head_t fence_queue;
218 wait_queue_head_t fifo_queue; 219 wait_queue_head_t fifo_queue;
219 atomic_t fence_queue_waiters; 220 atomic_t fence_queue_waiters;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d69caf92ffe7..0897359b3e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183} 183}
184 184
185static int vmw_cmd_dma(struct vmw_private *dev_priv, 185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context, 186 struct vmw_sw_context *sw_context,
187 SVGA3dCmdHeader *header) 187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
188{ 189{
189 uint32_t handle;
190 struct vmw_dma_buffer *vmw_bo = NULL; 190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo; 191 struct ttm_buffer_object *bo;
192 struct vmw_surface *srf = NULL; 192 uint32_t handle = ptr->gmrId;
193 struct vmw_dma_cmd {
194 SVGA3dCmdHeader header;
195 SVGA3dCmdSurfaceDMA dma;
196 } *cmd;
197 struct vmw_relocation *reloc; 193 struct vmw_relocation *reloc;
198 int ret;
199 uint32_t cur_validate_node; 194 uint32_t cur_validate_node;
200 struct ttm_validate_buffer *val_buf; 195 struct ttm_validate_buffer *val_buf;
196 int ret;
201 197
202 cmd = container_of(header, struct vmw_dma_cmd, header);
203 handle = cmd->dma.guest.ptr.gmrId;
204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
205 if (unlikely(ret != 0)) { 199 if (unlikely(ret != 0)) {
206 DRM_ERROR("Could not find or use GMR region.\n"); 200 DRM_ERROR("Could not find or use GMR region.\n");
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
209 bo = &vmw_bo->base; 203 bo = &vmw_bo->base;
210 204
211 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
212 DRM_ERROR("Max number of DMA commands per submission" 206 DRM_ERROR("Max number relocations per submission"
213 " exceeded\n"); 207 " exceeded\n");
214 ret = -EINVAL; 208 ret = -EINVAL;
215 goto out_no_reloc; 209 goto out_no_reloc;
216 } 210 }
217 211
218 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
219 reloc->location = &cmd->dma.guest.ptr; 213 reloc->location = ptr;
220 214
221 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); 215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
222 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { 216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
234 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 228 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
235 ++sw_context->cur_val_buf; 229 ++sw_context->cur_val_buf;
236 } 230 }
231 *vmw_bo_p = vmw_bo;
232 return 0;
233
234out_no_reloc:
235 vmw_dmabuf_unreference(&vmw_bo);
236 vmw_bo_p = NULL;
237 return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 struct vmw_dma_buffer *vmw_bo;
245 struct vmw_query_cmd {
246 SVGA3dCmdHeader header;
247 SVGA3dCmdEndQuery q;
248 } *cmd;
249 int ret;
250
251 cmd = container_of(header, struct vmw_query_cmd, header);
252 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253 if (unlikely(ret != 0))
254 return ret;
255
256 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257 &cmd->q.guestResult,
258 &vmw_bo);
259 if (unlikely(ret != 0))
260 return ret;
261
262 vmw_dmabuf_unreference(&vmw_bo);
263 return 0;
264}
237 265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267 struct vmw_sw_context *sw_context,
268 SVGA3dCmdHeader *header)
269{
270 struct vmw_dma_buffer *vmw_bo;
271 struct vmw_query_cmd {
272 SVGA3dCmdHeader header;
273 SVGA3dCmdWaitForQuery q;
274 } *cmd;
275 int ret;
276
277 cmd = container_of(header, struct vmw_query_cmd, header);
278 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279 if (unlikely(ret != 0))
280 return ret;
281
282 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283 &cmd->q.guestResult,
284 &vmw_bo);
285 if (unlikely(ret != 0))
286 return ret;
287
288 vmw_dmabuf_unreference(&vmw_bo);
289 return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294 struct vmw_sw_context *sw_context,
295 SVGA3dCmdHeader *header)
296{
297 struct vmw_dma_buffer *vmw_bo = NULL;
298 struct ttm_buffer_object *bo;
299 struct vmw_surface *srf = NULL;
300 struct vmw_dma_cmd {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdSurfaceDMA dma;
303 } *cmd;
304 int ret;
305
306 cmd = container_of(header, struct vmw_dma_cmd, header);
307 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308 &cmd->dma.guest.ptr,
309 &vmw_bo);
310 if (unlikely(ret != 0))
311 return ret;
312
313 bo = &vmw_bo->base;
238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, 314 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
239 cmd->dma.host.sid, &srf); 315 cmd->dma.host.sid, &srf);
240 if (ret) { 316 if (ret) {
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
383 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), 459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
384 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
385 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
386 &vmw_cmd_blt_surf_screen_check) 462 &vmw_cmd_blt_surf_screen_check)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4f4f6432be8b..a93367041cdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
559 info->pixmap.scan_align = 1; 559 info->pixmap.scan_align = 1;
560#endif 560#endif
561 561
562 info->aperture_base = vmw_priv->vram_start;
563 info->aperture_size = vmw_priv->vram_size;
564
562 /* 565 /*
563 * Dirty & Deferred IO 566 * Dirty & Deferred IO
564 */ 567 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 4157547cc6e4..39d43a01d846 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
74 fifo->reserved_size = 0; 74 fifo->reserved_size = 0;
75 fifo->using_bounce_buffer = false; 75 fifo->using_bounce_buffer = false;
76 76
77 mutex_init(&fifo->fifo_mutex);
77 init_rwsem(&fifo->rwsem); 78 init_rwsem(&fifo->rwsem);
78 79
79 /* 80 /*
@@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
117 (unsigned int) min, 118 (unsigned int) min,
118 (unsigned int) fifo->capabilities); 119 (unsigned int) fifo->capabilities);
119 120
120 dev_priv->fence_seq = dev_priv->last_read_sequence; 121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
121 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
122 123
123 return vmw_fifo_send_fence(dev_priv, &dummy); 124 return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
283 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
284 int ret; 285 int ret;
285 286
286 down_write(&fifo_state->rwsem); 287 mutex_lock(&fifo_state->fifo_mutex);
287 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 288 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
288 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 289 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
289 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
351 } 352 }
352out_err: 353out_err:
353 fifo_state->reserved_size = 0; 354 fifo_state->reserved_size = 0;
354 up_write(&fifo_state->rwsem); 355 mutex_unlock(&fifo_state->fifo_mutex);
355 return NULL; 356 return NULL;
356} 357}
357 358
@@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
426 427
427 } 428 }
428 429
430 down_write(&fifo_state->rwsem);
429 if (fifo_state->using_bounce_buffer || reserveable) { 431 if (fifo_state->using_bounce_buffer || reserveable) {
430 next_cmd += bytes; 432 next_cmd += bytes;
431 if (next_cmd >= max) 433 if (next_cmd >= max)
@@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
437 if (reserveable) 439 if (reserveable)
438 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
439 mb(); 441 mb();
440 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
441 up_write(&fifo_state->rwsem); 442 up_write(&fifo_state->rwsem);
443 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
444 mutex_unlock(&fifo_state->fifo_mutex);
442} 445}
443 446
444int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 447int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
451 454
452 fm = vmw_fifo_reserve(dev_priv, bytes); 455 fm = vmw_fifo_reserve(dev_priv, bytes);
453 if (unlikely(fm == NULL)) { 456 if (unlikely(fm == NULL)) {
454 down_write(&fifo_state->rwsem); 457 *sequence = atomic_read(&dev_priv->fence_seq);
455 *sequence = dev_priv->fence_seq;
456 up_write(&fifo_state->rwsem);
457 ret = -ENOMEM; 458 ret = -ENOMEM;
458 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 459 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
459 false, 3*HZ); 460 false, 3*HZ);
@@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
461 } 462 }
462 463
463 do { 464 do {
464 *sequence = dev_priv->fence_seq++; 465 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
465 } while (*sequence == 0); 466 } while (*sequence == 0);
466 467
467 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 778851f9f1d6..1c7a316454d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
48 case DRM_VMW_PARAM_FIFO_OFFSET: 48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start; 49 param->value = dev_priv->mmio_start;
50 break; 50 break;
51 case DRM_VMW_PARAM_HW_CAPS:
52 param->value = dev_priv->capabilities;
53 break;
54 case DRM_VMW_PARAM_FIFO_CAPS:
55 param->value = dev_priv->fifo.capabilities;
56 break;
51 default: 57 default:
52 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 58 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
53 param->param); 59 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc8647..4d7cb5393860 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
85 return true; 85 return true;
86 86
87 /** 87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually 88 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled. 89 * emitted. Then the fence is stale and signaled.
97 */ 90 */
98 91
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); 92 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
100 up_read(&fifo_state->rwsem); 93 > VMW_FENCE_WRAP);
101 94
102 return ret; 95 return ret;
103} 96}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
127 120
128 if (fifo_idle) 121 if (fifo_idle)
129 down_read(&fifo_state->rwsem); 122 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq; 123 signal_seq = atomic_read(&dev_priv->fence_seq);
131 ret = 0; 124 ret = 0;
132 125
133 for (;;) { 126 for (;;) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index eeba6d1d06e4..31f9afed0a63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
769 769
770 drm_mode_config_init(dev); 770 drm_mode_config_init(dev);
771 dev->mode_config.funcs = &vmw_kms_funcs; 771 dev->mode_config.funcs = &vmw_kms_funcs;
772 dev->mode_config.min_width = 640; 772 dev->mode_config.min_width = 1;
773 dev->mode_config.min_height = 480; 773 dev->mode_config.min_height = 1;
774 dev->mode_config.max_width = 2048; 774 dev->mode_config.max_width = dev_priv->fb_max_width;
775 dev->mode_config.max_height = 2048; 775 dev->mode_config.max_height = dev_priv->fb_max_height;
776 776
777 ret = vmw_kms_init_legacy_display_system(dev_priv); 777 ret = vmw_kms_init_legacy_display_system(dev_priv);
778 778
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c7efbd47ab84..f8fbbc67a406 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,11 +35,6 @@
35#define VMW_RES_SURFACE ttm_driver_type1 35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2 36#define VMW_RES_STREAM ttm_driver_type2
37 37
38/* XXX: This isn't a real hardware flag, but just a hack for kernel to
39 * know about primary surfaces. Find a better way to accomplish this.
40 */
41#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
42
43struct vmw_user_context { 38struct vmw_user_context {
44 struct ttm_base_object base; 39 struct ttm_base_object base;
45 struct vmw_resource res; 40 struct vmw_resource res;
@@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
579 574
580 srf->flags = req->flags; 575 srf->flags = req->flags;
581 srf->format = req->format; 576 srf->format = req->format;
577 srf->scanout = req->scanout;
582 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 578 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
583 srf->num_sizes = 0; 579 srf->num_sizes = 0;
584 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 580 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
604 if (unlikely(ret != 0)) 600 if (unlikely(ret != 0))
605 goto out_err1; 601 goto out_err1;
606 602
607 if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
608 /* we should not send this flag down to hardware since
609 * its not a official one
610 */
611 srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
612 srf->scanout = true;
613 } else {
614 srf->scanout = false;
615 }
616
617 if (srf->scanout && 603 if (srf->scanout &&
618 srf->num_sizes == 1 && 604 srf->num_sizes == 1 &&
619 srf->sizes[0].width == 64 && 605 srf->sizes[0].width == 64 &&
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1ac0c93603c9..2f6cf69ecb39 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
961 remaining -= 7; 961 remaining -= 7;
962 pr_devel("client 0x%p called 'target'\n", priv); 962 pr_devel("client 0x%p called 'target'\n", priv);
963 /* if target is default */ 963 /* if target is default */
964 if (!strncmp(buf, "default", 7)) 964 if (!strncmp(curr_pos, "default", 7))
965 pdev = pci_dev_get(vga_default_device()); 965 pdev = pci_dev_get(vga_default_device());
966 else { 966 else {
967 if (!vga_pci_str_to_vars(curr_pos, remaining, 967 if (!vga_pci_str_to_vars(curr_pos, remaining,
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index a31e77c776ae..b8156b4893bb 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
179 * 179 *
180 * Some, but not all, of these voltages have low/high limits. 180 * Some, but not all, of these voltages have low/high limits.
181 */ 181 */
182#define ADT7462_VOLT_COUNT 12 182#define ADT7462_VOLT_COUNT 13
183 183
184#define ADT7462_VENDOR 0x41 184#define ADT7462_VENDOR 0x41
185#define ADT7462_DEVICE 0x62 185#define ADT7462_DEVICE 0x62
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index cadcbd90ff3b..72ff2c4e757d 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev)
851static int __init lm78_isa_found(unsigned short address) 851static int __init lm78_isa_found(unsigned short address)
852{ 852{
853 int val, save, found = 0; 853 int val, save, found = 0;
854 854 int port;
855 /* We have to request the region in two parts because some 855
856 boards declare base+4 to base+7 as a PNP device */ 856 /* Some boards declare base+0 to base+7 as a PNP device, some base+4
857 if (!request_region(address, 4, "lm78")) { 857 * to base+7 and some base+5 to base+6. So we better request each port
858 pr_debug("lm78: Failed to request low part of region\n"); 858 * individually for the probing phase. */
859 return 0; 859 for (port = address; port < address + LM78_EXTENT; port++) {
860 } 860 if (!request_region(port, 1, "lm78")) {
861 if (!request_region(address + 4, 4, "lm78")) { 861 pr_debug("lm78: Failed to request port 0x%x\n", port);
862 pr_debug("lm78: Failed to request high part of region\n"); 862 goto release;
863 release_region(address, 4); 863 }
864 return 0;
865 } 864 }
866 865
867#define REALLY_SLOW_IO 866#define REALLY_SLOW_IO
@@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address)
925 val & 0x80 ? "LM79" : "LM78", (int)address); 924 val & 0x80 ? "LM79" : "LM78", (int)address);
926 925
927 release: 926 release:
928 release_region(address + 4, 4); 927 for (port--; port >= address; port--)
929 release_region(address, 4); 928 release_region(port, 1);
930 return found; 929 return found;
931} 930}
932 931
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 05f9225b6f94..32d4adee73db 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1793,17 +1793,17 @@ static int __init
1793w83781d_isa_found(unsigned short address) 1793w83781d_isa_found(unsigned short address)
1794{ 1794{
1795 int val, save, found = 0; 1795 int val, save, found = 0;
1796 1796 int port;
1797 /* We have to request the region in two parts because some 1797
1798 boards declare base+4 to base+7 as a PNP device */ 1798 /* Some boards declare base+0 to base+7 as a PNP device, some base+4
1799 if (!request_region(address, 4, "w83781d")) { 1799 * to base+7 and some base+5 to base+6. So we better request each port
1800 pr_debug("w83781d: Failed to request low part of region\n"); 1800 * individually for the probing phase. */
1801 return 0; 1801 for (port = address; port < address + W83781D_EXTENT; port++) {
1802 } 1802 if (!request_region(port, 1, "w83781d")) {
1803 if (!request_region(address + 4, 4, "w83781d")) { 1803 pr_debug("w83781d: Failed to request port 0x%x\n",
1804 pr_debug("w83781d: Failed to request high part of region\n"); 1804 port);
1805 release_region(address, 4); 1805 goto release;
1806 return 0; 1806 }
1807 } 1807 }
1808 1808
1809#define REALLY_SLOW_IO 1809#define REALLY_SLOW_IO
@@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address)
1877 val == 0x30 ? "W83782D" : "W83781D", (int)address); 1877 val == 0x30 ? "W83782D" : "W83781D", (int)address);
1878 1878
1879 release: 1879 release:
1880 release_region(address + 4, 4); 1880 for (port--; port >= address; port--)
1881 release_region(address, 4); 1881 release_region(port, 1);
1882 return found; 1882 return found;
1883} 1883}
1884 1884
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index b1c050ff311d..e29b6d5ba8ef 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/types.h>
16 17
17/* include interfaces to usb layer */ 18/* include interfaces to usb layer */
18#include <linux/usb.h> 19#include <linux/usb.h>
@@ -31,8 +32,8 @@
31#define CMD_I2C_IO_END (1<<1) 32#define CMD_I2C_IO_END (1<<1)
32 33
33/* i2c bit delay, default is 10us -> 100kHz */ 34/* i2c bit delay, default is 10us -> 100kHz */
34static int delay = 10; 35static unsigned short delay = 10;
35module_param(delay, int, 0); 36module_param(delay, ushort, 0);
36MODULE_PARM_DESC(delay, "bit delay in microseconds, " 37MODULE_PARM_DESC(delay, "bit delay in microseconds, "
37 "e.g. 10 for 100kHz (default is 100kHz)"); 38 "e.g. 10 for 100kHz (default is 100kHz)");
38 39
@@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
109 110
110static u32 usb_func(struct i2c_adapter *adapter) 111static u32 usb_func(struct i2c_adapter *adapter)
111{ 112{
112 u32 func; 113 __le32 func;
113 114
114 /* get functionality from adapter */ 115 /* get functionality from adapter */
115 if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != 116 if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
@@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter)
118 return 0; 119 return 0;
119 } 120 }
120 121
121 return func; 122 return le32_to_cpu(func);
122} 123}
123 124
124/* This is the actual algorithm we define */ 125/* This is the actual algorithm we define */
@@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
216 "i2c-tiny-usb at bus %03d device %03d", 217 "i2c-tiny-usb at bus %03d device %03d",
217 dev->usb_dev->bus->busnum, dev->usb_dev->devnum); 218 dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
218 219
219 if (usb_write(&dev->adapter, CMD_SET_DELAY, 220 if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
220 cpu_to_le16(delay), 0, NULL, 0) != 0) {
221 dev_err(&dev->adapter.dev, 221 dev_err(&dev->adapter.dev,
222 "failure setting delay to %dus\n", delay); 222 "failure setting delay to %dus\n", delay);
223 retval = -EIO; 223 retval = -EIO;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index cc9b5940fa97..875e34e0b235 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2115 if (ret) 2115 if (ret)
2116 goto err1; 2116 goto err1;
2117 2117
2118 if (cma_loopback_addr(addr)) { 2118 if (!cma_any_addr(addr)) {
2119 ret = cma_bind_loopback(id_priv);
2120 } else if (!cma_zero_addr(addr)) {
2121 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2119 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2122 if (ret) 2120 if (ret)
2123 goto err1; 2121 goto err1;
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index aa6713b4a988..291d9393d359 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -100,6 +100,12 @@ static void input_close_polled_device(struct input_dev *input)
100 struct input_polled_dev *dev = input_get_drvdata(input); 100 struct input_polled_dev *dev = input_get_drvdata(input);
101 101
102 cancel_delayed_work_sync(&dev->work); 102 cancel_delayed_work_sync(&dev->work);
103 /*
104 * Clean up work struct to remove references to the workqueue.
105 * It may be destroyed by the next call. This causes problems
106 * at next device open-close in case of poll_interval == 0.
107 */
108 INIT_DELAYED_WORK(&dev->work, dev->work.work.func);
103 input_polldev_stop_workqueue(); 109 input_polldev_stop_workqueue();
104 110
105 if (dev->close) 111 if (dev->close)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 9774bdfaa482..d8c0c8d6992c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1141,7 +1141,14 @@ static void psmouse_cleanup(struct serio *serio)
1141 psmouse_deactivate(parent); 1141 psmouse_deactivate(parent);
1142 } 1142 }
1143 1143
1144 psmouse_deactivate(psmouse); 1144 psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
1145
1146 /*
1147 * Disable stream mode so cleanup routine can proceed undisturbed.
1148 */
1149 if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
1150 printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n",
1151 psmouse->ps2dev.serio->phys);
1145 1152
1146 if (psmouse->cleanup) 1153 if (psmouse->cleanup)
1147 psmouse->cleanup(psmouse); 1154 psmouse->cleanup(psmouse);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index d84a36e545f6..b54aee7cd9e3 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev)
1161 return 0; 1161 return 0;
1162} 1162}
1163 1163
1164static int i8042_pm_thaw(struct device *dev)
1165{
1166 i8042_interrupt(0, NULL);
1167
1168 return 0;
1169}
1170
1164static const struct dev_pm_ops i8042_pm_ops = { 1171static const struct dev_pm_ops i8042_pm_ops = {
1165 .suspend = i8042_pm_reset, 1172 .suspend = i8042_pm_reset,
1166 .resume = i8042_pm_restore, 1173 .resume = i8042_pm_restore,
1174 .thaw = i8042_pm_thaw,
1167 .poweroff = i8042_pm_reset, 1175 .poweroff = i8042_pm_reset,
1168 .restore = i8042_pm_restore, 1176 .restore = i8042_pm_restore,
1169}; 1177};
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 09a5e7341bd5..5256123a5228 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -618,8 +618,8 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
618#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH 618#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
619static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) 619static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
620{ 620{
621 dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ; 621 dev->x = (pkt[2] << 8) | pkt[1];
622 dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ; 622 dev->y = (pkt[4] << 8) | pkt[3];
623 dev->press = pkt[5] & 0xff; 623 dev->press = pkt[5] & 0xff;
624 dev->touch = pkt[0] & 0x01; 624 dev->touch = pkt[0] & 0x01;
625 625
@@ -809,9 +809,9 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
809#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH 809#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
810 [DEVTYPE_GENERAL_TOUCH] = { 810 [DEVTYPE_GENERAL_TOUCH] = {
811 .min_xc = 0x0, 811 .min_xc = 0x0,
812 .max_xc = 0x0500, 812 .max_xc = 0x7fff,
813 .min_yc = 0x0, 813 .min_yc = 0x0,
814 .max_yc = 0x0500, 814 .max_yc = 0x7fff,
815 .rept_size = 7, 815 .rept_size = 7,
816 .read_data = general_touch_read_data, 816 .read_data = general_touch_read_data,
817 }, 817 },
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 54abf9e303b7..f1c8cae70b4b 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
172{ 172{
173 int r = 0; 173 int r = 0;
174 size_t dummy = 0; 174 size_t dummy = 0;
175 int overhead_size = 175 int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
176 sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
177 struct dm_ulog_request *tfr = prealloced_ulog_tfr; 176 struct dm_ulog_request *tfr = prealloced_ulog_tfr;
178 struct receiving_pkg pkg; 177 struct receiving_pkg pkg;
179 178
179 /*
180 * Given the space needed to hold the 'struct cn_msg' and
181 * 'struct dm_ulog_request' - do we have enough payload
182 * space remaining?
183 */
180 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { 184 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
181 DMINFO("Size of tfr exceeds preallocated size"); 185 DMINFO("Size of tfr exceeds preallocated size");
182 return -EINVAL; 186 return -EINVAL;
@@ -191,7 +195,7 @@ resend:
191 */ 195 */
192 mutex_lock(&dm_ulog_lock); 196 mutex_lock(&dm_ulog_lock);
193 197
194 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); 198 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
195 memcpy(tfr->uuid, uuid, DM_UUID_LEN); 199 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
196 tfr->luid = luid; 200 tfr->luid = luid;
197 tfr->seq = dm_ulog_seq++; 201 tfr->seq = dm_ulog_seq++;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ad779bd13aec..6c1046df81f6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
724 /* 724 /*
725 * Dispatch io. 725 * Dispatch io.
726 */ 726 */
727 if (unlikely(ms->log_failure)) { 727 if (unlikely(ms->log_failure) && errors_handled(ms)) {
728 spin_lock_irq(&ms->lock); 728 spin_lock_irq(&ms->lock);
729 bio_list_merge(&ms->failures, &sync); 729 bio_list_merge(&ms->failures, &sync);
730 spin_unlock_irq(&ms->lock); 730 spin_unlock_irq(&ms->lock);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 5f19ceb6fe91..168bd38f5006 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success)
660 spin_lock_irq(&rh->region_lock); 660 spin_lock_irq(&rh->region_lock);
661 if (success) 661 if (success)
662 list_add(&reg->list, &reg->rh->recovered_regions); 662 list_add(&reg->list, &reg->rh->recovered_regions);
663 else { 663 else
664 reg->state = DM_RH_NOSYNC;
665 list_add(&reg->list, &reg->rh->failed_recovered_regions); 664 list_add(&reg->list, &reg->rh->failed_recovered_regions);
666 } 665
667 spin_unlock_irq(&rh->region_lock); 666 spin_unlock_irq(&rh->region_lock);
668 667
669 rh->wakeup_workers(rh->context); 668 rh->wakeup_workers(rh->context);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 7d08879689ac..c097d8a4823d 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
254 * Issue the synchronous I/O from a different thread 254 * Issue the synchronous I/O from a different thread
255 * to avoid generic_make_request recursion. 255 * to avoid generic_make_request recursion.
256 */ 256 */
257 INIT_WORK(&req.work, do_metadata); 257 INIT_WORK_ON_STACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_workqueue(ps->metadata_wq); 259 flush_workqueue(ps->metadata_wq);
260 260
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e0efc1adcaff..bd58703ee8f6 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
110 } 110 }
111 111
112 stripes = simple_strtoul(argv[0], &end, 10); 112 stripes = simple_strtoul(argv[0], &end, 10);
113 if (*end) { 113 if (!stripes || *end) {
114 ti->error = "Invalid stripe count"; 114 ti->error = "Invalid stripe count";
115 return -EINVAL; 115 return -EINVAL;
116 } 116 }
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index f53392df7b97..f91b40942e07 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = {
80}; 80};
81 81
82/* 82/*
83 * The sysfs structure is embedded in md struct, nothing to do here
84 */
85static void dm_sysfs_release(struct kobject *kobj)
86{
87}
88
89/*
90 * dm kobject is embedded in mapped_device structure 83 * dm kobject is embedded in mapped_device structure
91 * no need to define release function here 84 * no need to define release function here
92 */ 85 */
93static struct kobj_type dm_ktype = { 86static struct kobj_type dm_ktype = {
94 .sysfs_ops = &dm_sysfs_ops, 87 .sysfs_ops = &dm_sysfs_ops,
95 .default_attrs = dm_attrs, 88 .default_attrs = dm_attrs,
96 .release = dm_sysfs_release
97}; 89};
98 90
99/* 91/*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3167480b532c..aa4e2aa86d49 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1595 return BLKPREP_OK; 1595 return BLKPREP_OK;
1596} 1596}
1597 1597
1598static void map_request(struct dm_target *ti, struct request *clone, 1598/*
1599 struct mapped_device *md) 1599 * Returns:
1600 * 0 : the request has been processed (not requeued)
1601 * !0 : the request has been requeued
1602 */
1603static int map_request(struct dm_target *ti, struct request *clone,
1604 struct mapped_device *md)
1600{ 1605{
1601 int r; 1606 int r, requeued = 0;
1602 struct dm_rq_target_io *tio = clone->end_io_data; 1607 struct dm_rq_target_io *tio = clone->end_io_data;
1603 1608
1604 /* 1609 /*
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
1625 case DM_MAPIO_REQUEUE: 1630 case DM_MAPIO_REQUEUE:
1626 /* The target wants to requeue the I/O */ 1631 /* The target wants to requeue the I/O */
1627 dm_requeue_unmapped_request(clone); 1632 dm_requeue_unmapped_request(clone);
1633 requeued = 1;
1628 break; 1634 break;
1629 default: 1635 default:
1630 if (r > 0) { 1636 if (r > 0) {
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
1636 dm_kill_unmapped_request(clone, r); 1642 dm_kill_unmapped_request(clone, r);
1637 break; 1643 break;
1638 } 1644 }
1645
1646 return requeued;
1639} 1647}
1640 1648
1641/* 1649/*
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
1677 atomic_inc(&md->pending[rq_data_dir(clone)]); 1685 atomic_inc(&md->pending[rq_data_dir(clone)]);
1678 1686
1679 spin_unlock(q->queue_lock); 1687 spin_unlock(q->queue_lock);
1680 map_request(ti, clone, md); 1688 if (map_request(ti, clone, md))
1689 goto requeued;
1690
1681 spin_lock_irq(q->queue_lock); 1691 spin_lock_irq(q->queue_lock);
1682 } 1692 }
1683 1693
1684 goto out; 1694 goto out;
1685 1695
1696requeued:
1697 spin_lock_irq(q->queue_lock);
1698
1686plug_and_out: 1699plug_and_out:
1687 if (!elv_queue_empty(q)) 1700 if (!elv_queue_empty(q))
1688 /* Some requests still remain, retry later */ 1701 /* Some requests still remain, retry later */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dd3dfe42d5a9..a20a71e5efd3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws)
4075{ 4075{
4076 mddev_t *mddev = container_of(ws, mddev_t, del_work); 4076 mddev_t *mddev = container_of(ws, mddev_t, del_work);
4077 4077
4078 if (mddev->private == &md_redundancy_group) { 4078 if (mddev->private) {
4079 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 4079 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
4080 if (mddev->private != (void*)1)
4081 sysfs_remove_group(&mddev->kobj, mddev->private);
4080 if (mddev->sysfs_action) 4082 if (mddev->sysfs_action)
4081 sysfs_put(mddev->sysfs_action); 4083 sysfs_put(mddev->sysfs_action);
4082 mddev->sysfs_action = NULL; 4084 mddev->sysfs_action = NULL;
@@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
4287 sysfs_notify_dirent(rdev->sysfs_state); 4289 sysfs_notify_dirent(rdev->sysfs_state);
4288 } 4290 }
4289 4291
4290 md_probe(mddev->unit, NULL, NULL);
4291 disk = mddev->gendisk; 4292 disk = mddev->gendisk;
4292 if (!disk)
4293 return -ENOMEM;
4294 4293
4295 spin_lock(&pers_lock); 4294 spin_lock(&pers_lock);
4296 pers = find_pers(mddev->level, mddev->clevel); 4295 pers = find_pers(mddev->level, mddev->clevel);
@@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4530 mddev->queue->unplug_fn = NULL; 4529 mddev->queue->unplug_fn = NULL;
4531 mddev->queue->backing_dev_info.congested_fn = NULL; 4530 mddev->queue->backing_dev_info.congested_fn = NULL;
4532 module_put(mddev->pers->owner); 4531 module_put(mddev->pers->owner);
4533 if (mddev->pers->sync_request) 4532 if (mddev->pers->sync_request && mddev->private == NULL)
4534 mddev->private = &md_redundancy_group; 4533 mddev->private = (void*)1;
4535 mddev->pers = NULL; 4534 mddev->pers = NULL;
4536 /* tell userspace to handle 'inactive' */ 4535 /* tell userspace to handle 'inactive' */
4537 sysfs_notify_dirent(mddev->sysfs_state); 4536 sysfs_notify_dirent(mddev->sysfs_state);
@@ -4578,9 +4577,6 @@ out:
4578 } 4577 }
4579 mddev->bitmap_info.offset = 0; 4578 mddev->bitmap_info.offset = 0;
4580 4579
4581 /* make sure all md_delayed_delete calls have finished */
4582 flush_scheduled_work();
4583
4584 export_array(mddev); 4580 export_array(mddev);
4585 4581
4586 mddev->array_sectors = 0; 4582 mddev->array_sectors = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e84204eb12df..ceb24afdc147 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev)
5136 mddev->thread = NULL; 5136 mddev->thread = NULL;
5137 mddev->queue->backing_dev_info.congested_fn = NULL; 5137 mddev->queue->backing_dev_info.congested_fn = NULL;
5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5139 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
5140 free_conf(conf); 5139 free_conf(conf);
5141 mddev->private = NULL; 5140 mddev->private = &raid5_attrs_group;
5142 return 0; 5141 return 0;
5143} 5142}
5144 5143
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev)
5464 !test_bit(Faulty, &rdev->flags)) { 5463 !test_bit(Faulty, &rdev->flags)) {
5465 if (raid5_add_disk(mddev, rdev) == 0) { 5464 if (raid5_add_disk(mddev, rdev) == 0) {
5466 char nm[20]; 5465 char nm[20];
5467 if (rdev->raid_disk >= conf->previous_raid_disks) 5466 if (rdev->raid_disk >= conf->previous_raid_disks) {
5468 set_bit(In_sync, &rdev->flags); 5467 set_bit(In_sync, &rdev->flags);
5469 else 5468 added_devices++;
5469 } else
5470 rdev->recovery_offset = 0; 5470 rdev->recovery_offset = 0;
5471 added_devices++;
5472 sprintf(nm, "rd%d", rdev->raid_disk); 5471 sprintf(nm, "rd%d", rdev->raid_disk);
5473 if (sysfs_create_link(&mddev->kobj, 5472 if (sysfs_create_link(&mddev->kobj,
5474 &rdev->kobj, nm)) 5473 &rdev->kobj, nm))
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev)
5480 break; 5479 break;
5481 } 5480 }
5482 5481
5482 /* When a reshape changes the number of devices, ->degraded
5483 * is measured against the large of the pre and post number of
5484 * devices.*/
5483 if (mddev->delta_disks > 0) { 5485 if (mddev->delta_disks > 0) {
5484 spin_lock_irqsave(&conf->device_lock, flags); 5486 spin_lock_irqsave(&conf->device_lock, flags);
5485 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) 5487 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5486 - added_devices; 5488 - added_devices;
5487 spin_unlock_irqrestore(&conf->device_lock, flags); 5489 spin_unlock_irqrestore(&conf->device_lock, flags);
5488 } 5490 }
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index becbaadb3b77..5ed75263340a 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1333,9 +1333,9 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
1333 1333
1334 DEB_CAP(("vbuf:%p\n",vb)); 1334 DEB_CAP(("vbuf:%p\n",vb));
1335 1335
1336 release_all_pagetables(dev, buf);
1337
1338 saa7146_dma_free(dev,q,buf); 1336 saa7146_dma_free(dev,q,buf);
1337
1338 release_all_pagetables(dev, buf);
1339} 1339}
1340 1340
1341static struct videobuf_queue_ops video_qops = { 1341static struct videobuf_queue_ops video_qops = {
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index c37790ad92d0..9ddc57909d49 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); 761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
762 dmxdevfilter->type = DMXDEV_TYPE_NONE; 762 dmxdevfilter->type = DMXDEV_TYPE_NONE;
763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); 763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
764 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
765 init_timer(&dmxdevfilter->timer); 764 init_timer(&dmxdevfilter->timer);
766 765
767 dvbdev->users++; 766 dvbdev->users++;
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
887 dmxdevfilter->type = DMXDEV_TYPE_PES; 886 dmxdevfilter->type = DMXDEV_TYPE_PES;
888 memcpy(&dmxdevfilter->params, params, 887 memcpy(&dmxdevfilter->params, params,
889 sizeof(struct dmx_pes_filter_params)); 888 sizeof(struct dmx_pes_filter_params));
889 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
890 890
891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
892 892
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index b78cfb7d1897..67f189b7aa1f 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
426 }; 426 };
427 }; 427 };
428 428
429 if (dvb_demux_tscheck) { 429 if (demux->cnt_storage) {
430 if (!demux->cnt_storage)
431 demux->cnt_storage = vmalloc(MAX_PID + 1);
432
433 if (!demux->cnt_storage) {
434 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
435 dvb_demux_tscheck = 0;
436 goto no_dvb_demux_tscheck;
437 }
438
439 /* check pkt counter */ 430 /* check pkt counter */
440 if (pid < MAX_PID) { 431 if (pid < MAX_PID) {
441 if (buf[1] & 0x80) 432 if (buf[1] & 0x80)
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
454 }; 445 };
455 /* end check */ 446 /* end check */
456 }; 447 };
457no_dvb_demux_tscheck:
458 448
459 list_for_each_entry(feed, &demux->feed_list, list_head) { 449 list_for_each_entry(feed, &demux->feed_list, list_head) {
460 if ((feed->pid != pid) && (feed->pid != 0x2000)) 450 if ((feed->pid != pid) && (feed->pid != 0x2000))
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1246 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); 1236 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
1247 if (!dvbdemux->feed) { 1237 if (!dvbdemux->feed) {
1248 vfree(dvbdemux->filter); 1238 vfree(dvbdemux->filter);
1239 dvbdemux->filter = NULL;
1249 return -ENOMEM; 1240 return -ENOMEM;
1250 } 1241 }
1251 for (i = 0; i < dvbdemux->filternum; i++) { 1242 for (i = 0; i < dvbdemux->filternum; i++) {
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1257 dvbdemux->feed[i].index = i; 1248 dvbdemux->feed[i].index = i;
1258 } 1249 }
1259 1250
1251 if (dvb_demux_tscheck) {
1252 dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
1253
1254 if (!dvbdemux->cnt_storage)
1255 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
1256 }
1257
1260 INIT_LIST_HEAD(&dvbdemux->frontend_list); 1258 INIT_LIST_HEAD(&dvbdemux->frontend_list);
1261 1259
1262 for (i = 0; i < DMX_TS_PES_OTHER; i++) { 1260 for (i = 0; i < DMX_TS_PES_OTHER; i++) {
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 1b249897c9fb..465295b1d14b 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -112,11 +112,13 @@ config DVB_USB_CXUSB
112 select DVB_MT352 if !DVB_FE_CUSTOMISE 112 select DVB_MT352 if !DVB_FE_CUSTOMISE
113 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 113 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
114 select DVB_DIB7000P if !DVB_FE_CUSTOMISE 114 select DVB_DIB7000P if !DVB_FE_CUSTOMISE
115 select DVB_LGS8GL5 if !DVB_FE_CUSTOMISE
116 select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE 115 select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE
116 select DVB_ATBM8830 if !DVB_FE_CUSTOMISE
117 select DVB_LGS8GXX if !DVB_FE_CUSTOMISE
117 select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE 118 select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE
118 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE 119 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
119 select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE 120 select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
121 select MEDIA_TUNER_MAX2165 if !MEDIA_TUNER_CUSTOMISE
120 help 122 help
121 Say Y here to support the Conexant USB2.0 hybrid reference design. 123 Say Y here to support the Conexant USB2.0 hybrid reference design.
122 Currently, only DVB and ATSC modes are supported, analog mode 124 Currently, only DVB and ATSC modes are supported, analog mode
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c
index 3051b64aa17c..445fa1068064 100644
--- a/drivers/media/dvb/frontends/l64781.c
+++ b/drivers/media/dvb/frontends/l64781.c
@@ -192,8 +192,8 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
192 spi_bias *= qam_tab[p->constellation]; 192 spi_bias *= qam_tab[p->constellation];
193 spi_bias /= p->code_rate_HP + 1; 193 spi_bias /= p->code_rate_HP + 1;
194 spi_bias /= (guard_tab[p->guard_interval] + 32); 194 spi_bias /= (guard_tab[p->guard_interval] + 32);
195 spi_bias *= 1000ULL; 195 spi_bias *= 1000;
196 spi_bias /= 1000ULL + ppm/1000; 196 spi_bias /= 1000 + ppm/1000;
197 spi_bias *= p->code_rate_HP; 197 spi_bias *= p->code_rate_HP;
198 198
199 val0x04 = (p->transmission_mode << 2) | p->guard_interval; 199 val0x04 = (p->transmission_mode << 2) | p->guard_interval;
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 3182a406bdd1..ae08b077fd04 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4461,6 +4461,7 @@ static int __devinit bttv_probe(struct pci_dev *dev,
4461 request_modules(btv); 4461 request_modules(btv);
4462 } 4462 }
4463 4463
4464 init_bttv_i2c_ir(btv);
4464 bttv_input_init(btv); 4465 bttv_input_init(btv);
4465 4466
4466 /* everything is fine */ 4467 /* everything is fine */
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 63aa31a041e8..407fa61e4cda 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -388,7 +388,12 @@ int __devinit init_bttv_i2c(struct bttv *btv)
388 if (0 == btv->i2c_rc && i2c_scan) 388 if (0 == btv->i2c_rc && i2c_scan)
389 do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client); 389 do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client);
390 390
391 /* Instantiate the IR receiver device, if present */ 391 return btv->i2c_rc;
392}
393
394/* Instantiate the I2C IR receiver device, if present */
395void __devinit init_bttv_i2c_ir(struct bttv *btv)
396{
392 if (0 == btv->i2c_rc) { 397 if (0 == btv->i2c_rc) {
393 struct i2c_board_info info; 398 struct i2c_board_info info;
394 /* The external IR receiver is at i2c address 0x34 (0x35 for 399 /* The external IR receiver is at i2c address 0x34 (0x35 for
@@ -408,7 +413,6 @@ int __devinit init_bttv_i2c(struct bttv *btv)
408 strlcpy(info.type, "ir_video", I2C_NAME_SIZE); 413 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
409 i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list); 414 i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
410 } 415 }
411 return btv->i2c_rc;
412} 416}
413 417
414int __devexit fini_bttv_i2c(struct bttv *btv) 418int __devexit fini_bttv_i2c(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index a1d0e9c9f286..6cccc2a17eee 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -279,6 +279,7 @@ extern unsigned int bttv_debug;
279extern unsigned int bttv_gpio; 279extern unsigned int bttv_gpio;
280extern void bttv_gpio_tracking(struct bttv *btv, char *comment); 280extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
281extern int init_bttv_i2c(struct bttv *btv); 281extern int init_bttv_i2c(struct bttv *btv);
282extern void init_bttv_i2c_ir(struct bttv *btv);
282extern int fini_bttv_i2c(struct bttv *btv); 283extern int fini_bttv_i2c(struct bttv *btv);
283 284
284#define bttv_printk if (bttv_verbose) printk 285#define bttv_printk if (bttv_verbose) printk
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index fc4dd6045720..7438f8d775ba 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -514,7 +514,7 @@ static int mt9t112_init_pll(const struct i2c_client *client)
514 /* poll to verify out of standby. Must Poll this bit */ 514 /* poll to verify out of standby. Must Poll this bit */
515 for (i = 0; i < 100; i++) { 515 for (i = 0; i < 100; i++) {
516 mt9t112_reg_read(data, client, 0x0018); 516 mt9t112_reg_read(data, client, 0x0018);
517 if (0x4000 & data) 517 if (!(0x4000 & data))
518 break; 518 break;
519 519
520 mdelay(10); 520 mdelay(10);
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 50b415e07eda..f7f7e04cf485 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -753,7 +753,7 @@ int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
753 buf[0] = 0xff; /* fixed */ 753 buf[0] = 0xff; /* fixed */
754 754
755 ret = send_control_msg(pdev, 755 ret = send_control_msg(pdev,
756 SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf)); 756 SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
757 757
758 if (!mode && ret >= 0) { 758 if (!mode && ret >= 0) {
759 if (value < 0) 759 if (value < 0)
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 7dfecfc6017c..ee5bff02a92c 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -93,9 +93,9 @@ static int ts_open(struct file *file)
93 dprintk("open dev=%s\n", video_device_node_name(vdev)); 93 dprintk("open dev=%s\n", video_device_node_name(vdev));
94 err = -EBUSY; 94 err = -EBUSY;
95 if (!mutex_trylock(&dev->empress_tsq.vb_lock)) 95 if (!mutex_trylock(&dev->empress_tsq.vb_lock))
96 goto done; 96 return err;
97 if (atomic_read(&dev->empress_users)) 97 if (atomic_read(&dev->empress_users))
98 goto done_up; 98 goto done;
99 99
100 /* Unmute audio */ 100 /* Unmute audio */
101 saa_writeb(SAA7134_AUDIO_MUTE_CTRL, 101 saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
@@ -105,10 +105,8 @@ static int ts_open(struct file *file)
105 file->private_data = dev; 105 file->private_data = dev;
106 err = 0; 106 err = 0;
107 107
108done_up:
109 mutex_unlock(&dev->empress_tsq.vb_lock);
110done: 108done:
111 unlock_kernel(); 109 mutex_unlock(&dev->empress_tsq.vb_lock);
112 return err; 110 return err;
113} 111}
114 112
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 57752751712b..81279b3d694c 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1796 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " 1796 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
1797 "Command not in the active list! (sc=%p)\n", ioc->name, 1797 "Command not in the active list! (sc=%p)\n", ioc->name,
1798 SCpnt)); 1798 SCpnt));
1799 retval = 0; 1799 retval = SUCCESS;
1800 goto out; 1800 goto out;
1801 } 1801 }
1802 1802
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b9f1e84897cc..e7f8027165e6 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
74 } 74 }
75 75
76 mrq->cmd->arg = dev_addr; 76 mrq->cmd->arg = dev_addr;
77 if (!mmc_card_blockaddr(test->card))
78 mrq->cmd->arg <<= 9;
79
77 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 80 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
78 81
79 if (blocks == 1) 82 if (blocks == 1)
@@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
190 } 193 }
191 194
192 for (i = 0;i < BUFFER_SIZE / 512;i++) { 195 for (i = 0;i < BUFFER_SIZE / 512;i++) {
193 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 196 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
194 if (ret) 197 if (ret)
195 return ret; 198 return ret;
196 } 199 }
@@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
219 memset(test->buffer, 0, 512); 222 memset(test->buffer, 0, 512);
220 223
221 for (i = 0;i < BUFFER_SIZE / 512;i++) { 224 for (i = 0;i < BUFFER_SIZE / 512;i++) {
222 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 225 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
223 if (ret) 226 if (ret)
224 return ret; 227 return ret;
225 } 228 }
@@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
426 for (i = 0;i < sectors;i++) { 429 for (i = 0;i < sectors;i++) {
427 ret = mmc_test_buffer_transfer(test, 430 ret = mmc_test_buffer_transfer(test,
428 test->buffer + i * 512, 431 test->buffer + i * 512,
429 dev_addr + i * 512, 512, 0); 432 dev_addr + i, 512, 0);
430 if (ret) 433 if (ret)
431 return ret; 434 return ret;
432 } 435 }
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 62d9c9cc5671..1dd4403247ca 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
921 size = (res->end - res->start) + 1; 921 size = (res->end - res->start) + 1;
922 922
923 ax->mem2 = request_mem_region(res->start, size, pdev->name); 923 ax->mem2 = request_mem_region(res->start, size, pdev->name);
924 if (ax->mem == NULL) { 924 if (ax->mem2 == NULL) {
925 dev_err(&pdev->dev, "cannot reserve registers\n"); 925 dev_err(&pdev->dev, "cannot reserve registers\n");
926 ret = -ENXIO; 926 ret = -ENXIO;
927 goto exit_mem1; 927 goto exit_mem1;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index fee6eee7ae5b..006cb2efcd22 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
296 req_hdr->opcode = opcode; 296 req_hdr->opcode = opcode;
297 req_hdr->subsystem = subsystem; 297 req_hdr->subsystem = subsystem;
298 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 298 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
299 req_hdr->version = 0;
299} 300}
300 301
301static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 302static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4b..318a018ca7c5 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2079 struct sge_fl *fl, int len, int complete) 2079 struct sge_fl *fl, int len, int complete)
2080{ 2080{
2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2082 struct port_info *pi = netdev_priv(qs->netdev);
2082 struct sk_buff *skb = NULL; 2083 struct sk_buff *skb = NULL;
2083 struct cpl_rx_pkt *cpl; 2084 struct cpl_rx_pkt *cpl;
2084 struct skb_frag_struct *rx_frag; 2085 struct skb_frag_struct *rx_frag;
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2116 2117
2117 if (!nr_frags) { 2118 if (!nr_frags) {
2118 offset = 2 + sizeof(struct cpl_rx_pkt); 2119 offset = 2 + sizeof(struct cpl_rx_pkt);
2119 qs->lro_va = sd->pg_chunk.va + 2; 2120 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2120 }
2121 len -= offset;
2122 2121
2123 prefetch(qs->lro_va); 2122 if ((pi->rx_offload & T3_RX_CSUM) &&
2123 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2124 skb->ip_summed = CHECKSUM_UNNECESSARY;
2125 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2126 } else
2127 skb->ip_summed = CHECKSUM_NONE;
2128 } else
2129 cpl = qs->lro_va;
2130
2131 len -= offset;
2124 2132
2125 rx_frag += nr_frags; 2133 rx_frag += nr_frags;
2126 rx_frag->page = sd->pg_chunk.page; 2134 rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2136 return; 2144 return;
2137 2145
2138 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2146 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2139 skb->ip_summed = CHECKSUM_UNNECESSARY;
2140 cpl = qs->lro_va;
2141 2147
2142 if (unlikely(cpl->vlan_valid)) { 2148 if (unlikely(cpl->vlan_valid)) {
2143 struct net_device *dev = qs->netdev;
2144 struct port_info *pi = netdev_priv(dev);
2145 struct vlan_group *grp = pi->vlan_grp; 2149 struct vlan_group *grp = pi->vlan_grp;
2146 2150
2147 if (likely(grp != NULL)) { 2151 if (likely(grp != NULL)) {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d29bb532eccf..765543663a4f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -4006,11 +4006,21 @@ check_page:
4006 } 4006 }
4007 } 4007 }
4008 4008
4009 if (!buffer_info->dma) 4009 if (!buffer_info->dma) {
4010 buffer_info->dma = pci_map_page(pdev, 4010 buffer_info->dma = pci_map_page(pdev,
4011 buffer_info->page, 0, 4011 buffer_info->page, 0,
4012 buffer_info->length, 4012 buffer_info->length,
4013 PCI_DMA_FROMDEVICE); 4013 PCI_DMA_FROMDEVICE);
4014 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
4015 put_page(buffer_info->page);
4016 dev_kfree_skb(skb);
4017 buffer_info->page = NULL;
4018 buffer_info->skb = NULL;
4019 buffer_info->dma = 0;
4020 adapter->alloc_rx_buff_failed++;
4021 break; /* while !buffer_info->skb */
4022 }
4023 }
4014 4024
4015 rx_desc = E1000_RX_DESC(*rx_ring, i); 4025 rx_desc = E1000_RX_DESC(*rx_ring, i);
4016 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4026 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4101,6 +4111,13 @@ map_skb:
4101 skb->data, 4111 skb->data,
4102 buffer_info->length, 4112 buffer_info->length,
4103 PCI_DMA_FROMDEVICE); 4113 PCI_DMA_FROMDEVICE);
4114 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
4115 dev_kfree_skb(skb);
4116 buffer_info->skb = NULL;
4117 buffer_info->dma = 0;
4118 adapter->alloc_rx_buff_failed++;
4119 break; /* while !buffer_info->skb */
4120 }
4104 4121
4105 /* 4122 /*
4106 * XXX if it was allocated cleanly it will never map to a 4123 * XXX if it was allocated cleanly it will never map to a
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 997124d2992a..c881347cb26d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
422 if (tx_queue > IGB_N0_QUEUE) 422 if (tx_queue > IGB_N0_QUEUE)
423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
424 if (!adapter->msix_entries && msix_vector == 0)
425 msixbm |= E1000_EIMS_OTHER;
424 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 426 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
425 q_vector->eims_value = msixbm; 427 q_vector->eims_value = msixbm;
426 break; 428 break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
877{ 879{
878 struct net_device *netdev = adapter->netdev; 880 struct net_device *netdev = adapter->netdev;
879 struct pci_dev *pdev = adapter->pdev; 881 struct pci_dev *pdev = adapter->pdev;
880 struct e1000_hw *hw = &adapter->hw;
881 int err = 0; 882 int err = 0;
882 883
883 if (adapter->msix_entries) { 884 if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
909 igb_setup_all_tx_resources(adapter); 910 igb_setup_all_tx_resources(adapter);
910 igb_setup_all_rx_resources(adapter); 911 igb_setup_all_rx_resources(adapter);
911 } else { 912 } else {
912 switch (hw->mac.type) { 913 igb_assign_vector(adapter->q_vector[0], 0);
913 case e1000_82575:
914 wr32(E1000_MSIXBM(0),
915 (E1000_EICR_RX_QUEUE0 |
916 E1000_EICR_TX_QUEUE0 |
917 E1000_EIMS_OTHER));
918 break;
919 case e1000_82580:
920 case e1000_82576:
921 wr32(E1000_IVAR0, E1000_IVAR_VALID);
922 break;
923 default:
924 break;
925 }
926 } 914 }
927 915
928 if (adapter->flags & IGB_FLAG_HAS_MSI) { 916 if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
1140 } 1128 }
1141 if (adapter->msix_entries) 1129 if (adapter->msix_entries)
1142 igb_configure_msix(adapter); 1130 igb_configure_msix(adapter);
1131 else
1132 igb_assign_vector(adapter->q_vector[0], 0);
1143 1133
1144 /* Clear any pending interrupts. */ 1134 /* Clear any pending interrupts. */
1145 rd32(E1000_ICR); 1135 rd32(E1000_ICR);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 3103f4165311..35a06b47587b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
357 u32 fctrl_reg; 357 u32 fctrl_reg;
358 u32 rmcs_reg; 358 u32 rmcs_reg;
359 u32 reg; 359 u32 reg;
360 u32 link_speed = 0;
361 bool link_up;
360 362
361#ifdef CONFIG_DCB 363#ifdef CONFIG_DCB
362 if (hw->fc.requested_mode == ixgbe_fc_pfc) 364 if (hw->fc.requested_mode == ixgbe_fc_pfc)
363 goto out; 365 goto out;
364 366
365#endif /* CONFIG_DCB */ 367#endif /* CONFIG_DCB */
368 /*
369 * On 82598 having Rx FC on causes resets while doing 1G
370 * so if it's on turn it off once we know link_speed. For
371 * more details see 82598 Specification update.
372 */
373 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
374 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
375 switch (hw->fc.requested_mode) {
376 case ixgbe_fc_full:
377 hw->fc.requested_mode = ixgbe_fc_tx_pause;
378 break;
379 case ixgbe_fc_rx_pause:
380 hw->fc.requested_mode = ixgbe_fc_none;
381 break;
382 default:
383 /* no change */
384 break;
385 }
386 }
387
366 /* Negotiate the fc mode to use */ 388 /* Negotiate the fc mode to use */
367 ret_val = ixgbe_fc_autoneg(hw); 389 ret_val = ixgbe_fc_autoneg(hw);
368 if (ret_val) 390 if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index b5f64ad67975..951b73cf5ca2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5179,7 +5179,7 @@ dma_error:
5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
5180 } 5180 }
5181 5181
5182 return count; 5182 return 0;
5183} 5183}
5184 5184
5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5329 struct ixgbe_adapter *adapter = netdev_priv(dev); 5329 struct ixgbe_adapter *adapter = netdev_priv(dev);
5330 int txq = smp_processor_id(); 5330 int txq = smp_processor_id();
5331 5331
5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) 5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5333 while (unlikely(txq >= dev->real_num_tx_queues))
5334 txq -= dev->real_num_tx_queues;
5333 return txq; 5335 return txq;
5336 }
5334 5337
5335#ifdef IXGBE_FCOE 5338#ifdef IXGBE_FCOE
5336 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5339 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
@@ -5760,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5760 if (err) 5763 if (err)
5761 goto err_sw_init; 5764 goto err_sw_init;
5762 5765
5766 /* Make it possible the adapter to be woken up via WOL */
5767 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
5768 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5769
5763 /* 5770 /*
5764 * If there is a fan on this device and it has failed log the 5771 * If there is a fan on this device and it has failed log the
5765 * failure. 5772 * failure.
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 9f9d6081959b..24279e6e55f5 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1941 netif_wake_queue(adapter->netdev); 1941 netif_wake_queue(adapter->netdev);
1942 1942
1943 clear_bit(__NX_RESETTING, &adapter->state); 1943 clear_bit(__NX_RESETTING, &adapter->state);
1944 1944 return;
1945 } else { 1945 } else {
1946 clear_bit(__NX_RESETTING, &adapter->state); 1946 clear_bit(__NX_RESETTING, &adapter->state);
1947 if (!netxen_nic_reset_context(adapter)) { 1947 if (!netxen_nic_reset_context(adapter)) {
@@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work)
2240 2240
2241 netxen_nic_down(adapter, netdev); 2241 netxen_nic_down(adapter, netdev);
2242 2242
2243 rtnl_lock();
2243 netxen_nic_detach(adapter); 2244 netxen_nic_detach(adapter);
2245 rtnl_unlock();
2244 2246
2245 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2247 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
2246 2248
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 103e8b0e2a0d..46997e177ee3 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2284 fail2: 2284 fail2:
2285 efx_fini_struct(efx); 2285 efx_fini_struct(efx);
2286 fail1: 2286 fail1:
2287 WARN_ON(rc > 0);
2287 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); 2288 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2288 free_netdev(net_dev); 2289 free_netdev(net_dev);
2289 return rc; 2290 return rc;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index bf0b96af5334..5712fddd72f2 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -29,6 +29,15 @@
29#define FALCON_BOARD_SFN4111T 0x51 29#define FALCON_BOARD_SFN4111T 0x51
30#define FALCON_BOARD_SFN4112F 0x52 30#define FALCON_BOARD_SFN4112F 0x52
31 31
32/* Board temperature is about 15°C above ambient when air flow is
33 * limited. */
34#define FALCON_BOARD_TEMP_BIAS 15
35
36/* SFC4000 datasheet says: 'The maximum permitted junction temperature
37 * is 125°C; the thermal design of the environment for the SFC4000
38 * should aim to keep this well below 100°C.' */
39#define FALCON_JUNC_TEMP_MAX 90
40
32/***************************************************************************** 41/*****************************************************************************
33 * Support for LM87 sensor chip used on several boards 42 * Support for LM87 sensor chip used on several boards
34 */ 43 */
@@ -548,16 +557,16 @@ fail_hwmon:
548static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ 557static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
549 558
550static const u8 sfe4002_lm87_regs[] = { 559static const u8 sfe4002_lm87_regs[] = {
551 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ 560 LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
552 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ 561 LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
553 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ 562 LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
554 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */ 563 LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
555 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ 564 LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
556 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ 565 LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
557 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */ 566 LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
558 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ 567 LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
559 LM87_TEMP_INT_LIMITS(10, 60), /* board */ 568 LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
560 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ 569 LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
561 0 570 0
562}; 571};
563 572
@@ -619,14 +628,14 @@ static int sfe4002_init(struct efx_nic *efx)
619static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ 628static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
620 629
621static const u8 sfn4112f_lm87_regs[] = { 630static const u8 sfn4112f_lm87_regs[] = {
622 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ 631 LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
623 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ 632 LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
624 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ 633 LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
625 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ 634 LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
626 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ 635 LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
627 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ 636 LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
628 LM87_TEMP_INT_LIMITS(10, 60), /* board */ 637 LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
629 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ 638 LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
630 0 639 0
631}; 640};
632 641
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 9f035b9f0350..f66b3da6ddff 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -127,7 +127,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
127 efx_dword_t reg; 127 efx_dword_t reg;
128 128
129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
130 rc = efx_mcdi_poll_reboot(efx); 130 rc = -efx_mcdi_poll_reboot(efx);
131 if (rc) 131 if (rc)
132 goto out; 132 goto out;
133 133
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index e0d13a451019..67eec7a6e487 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -320,7 +320,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
320 320
321 falcon_board(efx)->type->init_phy(efx); 321 falcon_board(efx)->type->init_phy(efx);
322 322
323 return rc; 323 return 0;
324 324
325 fail: 325 fail:
326 EFX_ERR(efx, "PHY reset timed out\n"); 326 EFX_ERR(efx, "PHY reset timed out\n");
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d760650c5c04..67249c3c9f50 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
1025static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) 1025static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
1026{ 1026{
1027 struct sky2_tx_le *le = sky2->tx_le + *slot; 1027 struct sky2_tx_le *le = sky2->tx_le + *slot;
1028 struct tx_ring_info *re = sky2->tx_ring + *slot;
1029 1028
1030 *slot = RING_NEXT(*slot, sky2->tx_ring_size); 1029 *slot = RING_NEXT(*slot, sky2->tx_ring_size);
1031 re->flags = 0;
1032 re->skb = NULL;
1033 le->ctrl = 0; 1030 le->ctrl = 0;
1034 return le; 1031 return le;
1035} 1032}
@@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1622 return count; 1619 return count;
1623} 1620}
1624 1621
1625static void sky2_tx_unmap(struct pci_dev *pdev, 1622static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1626 const struct tx_ring_info *re)
1627{ 1623{
1628 if (re->flags & TX_MAP_SINGLE) 1624 if (re->flags & TX_MAP_SINGLE)
1629 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), 1625 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
1633 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1629 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
1634 pci_unmap_len(re, maplen), 1630 pci_unmap_len(re, maplen),
1635 PCI_DMA_TODEVICE); 1631 PCI_DMA_TODEVICE);
1632 re->flags = 0;
1636} 1633}
1637 1634
1638/* 1635/*
@@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1839 dev->stats.tx_packets++; 1836 dev->stats.tx_packets++;
1840 dev->stats.tx_bytes += skb->len; 1837 dev->stats.tx_bytes += skb->len;
1841 1838
1839 re->skb = NULL;
1842 dev_kfree_skb_any(skb); 1840 dev_kfree_skb_any(skb);
1843 1841
1844 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); 1842 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5e..d71c1976072e 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1437 /* Transmit complete. */ 1437 /* Transmit complete. */
1438 lp->lstats.tx_ints++; 1438 lp->lstats.tx_ints++;
1439 tc35815_txdone(dev); 1439 tc35815_txdone(dev);
1440 netif_wake_queue(dev);
1441 if (ret < 0) 1440 if (ret < 0)
1442 ret = 0; 1441 ret = 0;
1443 } 1442 }
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4f27f022fbf7..5f3b9eaeb04f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -584,6 +584,11 @@ static const struct usb_device_id products [] = {
584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
585 .driver_info = (unsigned long) &mbm_info, 585 .driver_info = (unsigned long) &mbm_info,
586}, { 586}, {
587 /* Ericsson C3607w ver 2 */
588 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
589 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
590 .driver_info = (unsigned long) &mbm_info,
591}, {
587 /* Toshiba F3507g */ 592 /* Toshiba F3507g */
588 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, 593 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
589 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 594 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index c93f58f5c6f2..317aa34b21cf 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
1877/** 1877/**
1878 * tx_srv - transmit interrupt service 1878 * tx_srv - transmit interrupt service
1879 * @vptr; Velocity 1879 * @vptr; Velocity
1880 * @status:
1881 * 1880 *
1882 * Scan the queues looking for transmitted packets that 1881 * Scan the queues looking for transmitted packets that
1883 * we can complete and clean up. Update any statistics as 1882 * we can complete and clean up. Update any statistics as
1884 * necessary/ 1883 * necessary/
1885 */ 1884 */
1886static int velocity_tx_srv(struct velocity_info *vptr, u32 status) 1885static int velocity_tx_srv(struct velocity_info *vptr)
1887{ 1886{
1888 struct tx_desc *td; 1887 struct tx_desc *td;
1889 int qnum; 1888 int qnum;
@@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2090/** 2089/**
2091 * velocity_rx_srv - service RX interrupt 2090 * velocity_rx_srv - service RX interrupt
2092 * @vptr: velocity 2091 * @vptr: velocity
2093 * @status: adapter status (unused)
2094 * 2092 *
2095 * Walk the receive ring of the velocity adapter and remove 2093 * Walk the receive ring of the velocity adapter and remove
2096 * any received packets from the receive queue. Hand the ring 2094 * any received packets from the receive queue. Hand the ring
2097 * slots back to the adapter for reuse. 2095 * slots back to the adapter for reuse.
2098 */ 2096 */
2099static int velocity_rx_srv(struct velocity_info *vptr, int status, 2097static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2100 int budget_left)
2101{ 2098{
2102 struct net_device_stats *stats = &vptr->dev->stats; 2099 struct net_device_stats *stats = &vptr->dev->stats;
2103 int rd_curr = vptr->rx.curr; 2100 int rd_curr = vptr->rx.curr;
@@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget)
2151 struct velocity_info *vptr = container_of(napi, 2148 struct velocity_info *vptr = container_of(napi,
2152 struct velocity_info, napi); 2149 struct velocity_info, napi);
2153 unsigned int rx_done; 2150 unsigned int rx_done;
2154 u32 isr_status; 2151 unsigned long flags;
2155
2156 spin_lock(&vptr->lock);
2157 isr_status = mac_read_isr(vptr->mac_regs);
2158
2159 /* Ack the interrupt */
2160 mac_write_isr(vptr->mac_regs, isr_status);
2161 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2162 velocity_error(vptr, isr_status);
2163 2152
2153 spin_lock_irqsave(&vptr->lock, flags);
2164 /* 2154 /*
2165 * Do rx and tx twice for performance (taken from the VIA 2155 * Do rx and tx twice for performance (taken from the VIA
2166 * out-of-tree driver). 2156 * out-of-tree driver).
2167 */ 2157 */
2168 rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); 2158 rx_done = velocity_rx_srv(vptr, budget / 2);
2169 velocity_tx_srv(vptr, isr_status); 2159 velocity_tx_srv(vptr);
2170 rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); 2160 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2171 velocity_tx_srv(vptr, isr_status); 2161 velocity_tx_srv(vptr);
2172
2173 spin_unlock(&vptr->lock);
2174 2162
2175 /* If budget not fully consumed, exit the polling mode */ 2163 /* If budget not fully consumed, exit the polling mode */
2176 if (rx_done < budget) { 2164 if (rx_done < budget) {
2177 napi_complete(napi); 2165 napi_complete(napi);
2178 mac_enable_int(vptr->mac_regs); 2166 mac_enable_int(vptr->mac_regs);
2179 } 2167 }
2168 spin_unlock_irqrestore(&vptr->lock, flags);
2180 2169
2181 return rx_done; 2170 return rx_done;
2182} 2171}
@@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
2206 return IRQ_NONE; 2195 return IRQ_NONE;
2207 } 2196 }
2208 2197
2198 /* Ack the interrupt */
2199 mac_write_isr(vptr->mac_regs, isr_status);
2200
2209 if (likely(napi_schedule_prep(&vptr->napi))) { 2201 if (likely(napi_schedule_prep(&vptr->napi))) {
2210 mac_disable_int(vptr->mac_regs); 2202 mac_disable_int(vptr->mac_regs);
2211 __napi_schedule(&vptr->napi); 2203 __napi_schedule(&vptr->napi);
2212 } 2204 }
2205
2206 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2207 velocity_error(vptr, isr_status);
2208
2213 spin_unlock(&vptr->lock); 2209 spin_unlock(&vptr->lock);
2214 2210
2215 return IRQ_HANDLED; 2211 return IRQ_HANDLED;
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
3100 velocity_init_registers(vptr, VELOCITY_INIT_WOL); 3096 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3101 mac_disable_int(vptr->mac_regs); 3097 mac_disable_int(vptr->mac_regs);
3102 3098
3103 velocity_tx_srv(vptr, 0); 3099 velocity_tx_srv(vptr);
3104 3100
3105 for (i = 0; i < vptr->tx.numq; i++) { 3101 for (i = 0; i < vptr->tx.numq; i++) {
3106 if (vptr->tx.used[i]) 3102 if (vptr->tx.used[i])
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3344{ 3340{
3345 struct velocity_info *vptr = netdev_priv(dev); 3341 struct velocity_info *vptr = netdev_priv(dev);
3346 int max_us = 0x3f * 64; 3342 int max_us = 0x3f * 64;
3343 unsigned long flags;
3347 3344
3348 /* 6 bits of */ 3345 /* 6 bits of */
3349 if (ecmd->tx_coalesce_usecs > max_us) 3346 if (ecmd->tx_coalesce_usecs > max_us)
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3365 ecmd->tx_coalesce_usecs); 3362 ecmd->tx_coalesce_usecs);
3366 3363
3367 /* Setup the interrupt suppression and queue timers */ 3364 /* Setup the interrupt suppression and queue timers */
3365 spin_lock_irqsave(&vptr->lock, flags);
3368 mac_disable_int(vptr->mac_regs); 3366 mac_disable_int(vptr->mac_regs);
3369 setup_adaptive_interrupts(vptr); 3367 setup_adaptive_interrupts(vptr);
3370 setup_queue_timers(vptr); 3368 setup_queue_timers(vptr);
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3372 mac_write_int_mask(vptr->int_mask, vptr->mac_regs); 3370 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3373 mac_clear_isr(vptr->mac_regs); 3371 mac_clear_isr(vptr->mac_regs);
3374 mac_enable_int(vptr->mac_regs); 3372 mac_enable_int(vptr->mac_regs);
3373 spin_unlock_irqrestore(&vptr->lock, flags);
3375 3374
3376 return 0; 3375 return 0;
3377} 3376}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fa12b9060b0b..29bf33692f71 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1615,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1615 bf->bf_frmlen -= padsize; 1615 bf->bf_frmlen -= padsize;
1616 } 1616 }
1617 1617
1618 if (conf_is_ht(&hw->conf) && !is_pae(skb)) 1618 if (conf_is_ht(&hw->conf))
1619 bf->bf_state.bf_type |= BUF_HT; 1619 bf->bf_state.bf_type |= BUF_HT;
1620 1620
1621 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); 1621 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1701,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1701 goto tx_done; 1701 goto tx_done;
1702 } 1702 }
1703 1703
1704 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1704 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
1705 /* 1705 /*
1706 * Try aggregation if it's a unicast data frame 1706 * Try aggregation if it's a unicast data frame
1707 * and the destination is HT capable. 1707 * and the destination is HT capable.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf9491997..c484cc253892 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -115,6 +115,7 @@
115#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ 115#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
116#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ 116#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
117#define B43_MMIO_RNG 0x65A 117#define B43_MMIO_RNG 0x65A
118#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
118#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ 119#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
119#define B43_MMIO_IFSCTL_USE_EDCF 0x0004 120#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
120#define B43_MMIO_POWERUP_DELAY 0x6A8 121#define B43_MMIO_POWERUP_DELAY 0x6A8
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe44f26..490fb45d1d05 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
628static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) 628static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
629{ 629{
630 /* slot_time is in usec. */ 630 /* slot_time is in usec. */
631 if (dev->phy.type != B43_PHYTYPE_G) 631 /* This test used to exit for all but a G PHY. */
632 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
632 return; 633 return;
633 b43_write16(dev, 0x684, 510 + slot_time); 634 b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
634 b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); 635 /* Shared memory location 0x0010 is the slot time and should be
636 * set to slot_time; however, this register is initially 0 and changing
637 * the value adversely affects the transmit rate for BCM4311
638 * devices. Until this behavior is unterstood, delete this step
639 *
640 * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
641 */
635} 642}
636 643
637static void b43_short_slot_timing_enable(struct b43_wldev *dev) 644static void b43_short_slot_timing_enable(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9b4b8b5c7574..31462813bac0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2008 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 2008 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2009 "%d index %d\n", scd_ssn , index); 2009 "%d index %d\n", scd_ssn , index);
2010 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2010 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2011 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 2011 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
2012 2012
2013 if (priv->mac80211_registered && 2013 if (priv->mac80211_registered &&
2014 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 2014 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index de45f308b744..cffaae772d51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1125,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1125 scd_ssn , index, txq_id, txq->swq_id); 1125 scd_ssn , index, txq_id, txq->swq_id);
1126 1126
1127 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1127 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1128 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1128 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1129 1129
1130 if (priv->mac80211_registered && 1130 if (priv->mac80211_registered &&
1131 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 1131 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -1153,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1153 tx_resp->failure_frame); 1153 tx_resp->failure_frame);
1154 1154
1155 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1155 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1156 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1156 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1157 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1158 1157
1159 if (priv->mac80211_registered && 1158 if (priv->mac80211_registered &&
1160 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1159 (iwl_queue_space(&txq->q) > txq->q.low_mark))
1161 iwl_wake_queue(priv, txq_id); 1160 iwl_wake_queue(priv, txq_id);
1162 } 1161 }
1163 1162
1164 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1163 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1165 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1166 1164
1167 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 1165 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1168 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); 1166 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 5461f105bd2d..f36f804804fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2745,6 +2745,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2745 priv->staging_rxon.flags = 0; 2745 priv->staging_rxon.flags = 0;
2746 2746
2747 iwl_set_rxon_channel(priv, conf->channel); 2747 iwl_set_rxon_channel(priv, conf->channel);
2748 iwl_set_rxon_ht(priv, ht_conf);
2748 2749
2749 iwl_set_flags_for_band(priv, conf->channel->band); 2750 iwl_set_flags_for_band(priv, conf->channel->band);
2750 spin_unlock_irqrestore(&priv->lock, flags); 2751 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27ca859e7453..b69e972671b2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
446int iwl_hw_tx_queue_init(struct iwl_priv *priv, 446int iwl_hw_tx_queue_init(struct iwl_priv *priv,
447 struct iwl_tx_queue *txq); 447 struct iwl_tx_queue *txq);
448int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 448int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
449void iwl_free_tfds_in_queue(struct iwl_priv *priv,
450 int sta_id, int tid, int freed);
449int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 451int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
450 int slots_num, u32 txq_id); 452 int slots_num, u32 txq_id);
451void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 453void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6f36b6e79f5e..2dbce85404aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -928,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
928 if (ieee80211_is_mgmt(fc) || 928 if (ieee80211_is_mgmt(fc) ||
929 ieee80211_has_protected(fc) || 929 ieee80211_has_protected(fc) ||
930 ieee80211_has_morefrags(fc) || 930 ieee80211_has_morefrags(fc) ||
931 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) 931 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
932 (ieee80211_is_data_qos(fc) &&
933 *ieee80211_get_qos_ctl(hdr) &
934 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
932 ret = skb_linearize(skb); 935 ret = skb_linearize(skb);
933 else 936 else
934 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? 937 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 87ce2bd292c7..8f4071562857 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
120EXPORT_SYMBOL(iwl_txq_update_write_ptr); 120EXPORT_SYMBOL(iwl_txq_update_write_ptr);
121 121
122 122
123void iwl_free_tfds_in_queue(struct iwl_priv *priv,
124 int sta_id, int tid, int freed)
125{
126 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
127 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
128 else {
129 IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
130 priv->stations[sta_id].tid[tid].tfds_in_queue,
131 freed);
132 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
133 }
134}
135EXPORT_SYMBOL(iwl_free_tfds_in_queue);
136
123/** 137/**
124 * iwl_tx_queue_free - Deallocate DMA queue. 138 * iwl_tx_queue_free - Deallocate DMA queue.
125 * @txq: Transmit queue to deallocate. 139 * @txq: Transmit queue to deallocate.
@@ -1131,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1131 struct iwl_queue *q = &txq->q; 1145 struct iwl_queue *q = &txq->q;
1132 struct iwl_tx_info *tx_info; 1146 struct iwl_tx_info *tx_info;
1133 int nfreed = 0; 1147 int nfreed = 0;
1148 struct ieee80211_hdr *hdr;
1134 1149
1135 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1150 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1136 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " 1151 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
@@ -1145,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1145 1160
1146 tx_info = &txq->txb[txq->q.read_ptr]; 1161 tx_info = &txq->txb[txq->q.read_ptr];
1147 iwl_tx_status(priv, tx_info->skb[0]); 1162 iwl_tx_status(priv, tx_info->skb[0]);
1163
1164 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1165 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1166 nfreed++;
1148 tx_info->skb[0] = NULL; 1167 tx_info->skb[0] = NULL;
1149 1168
1150 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1169 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1151 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); 1170 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1152 1171
1153 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 1172 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1154 nfreed++;
1155 } 1173 }
1156 return nfreed; 1174 return nfreed;
1157} 1175}
@@ -1559,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1559 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { 1577 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1560 /* calculate mac80211 ampdu sw queue to wake */ 1578 /* calculate mac80211 ampdu sw queue to wake */
1561 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); 1579 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1562 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1580 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1563 1581
1564 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1582 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1565 priv->mac80211_registered && 1583 priv->mac80211_registered &&
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 6d6ed7485175..f727b4a83196 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
794 } 794 }
795 795
796 bss->bss = kzalloc(bss_len, GFP_KERNEL); 796 bss->bss = kzalloc(bss_len, GFP_KERNEL);
797 if (!bss) { 797 if (!bss->bss) {
798 kfree(bss); 798 kfree(bss);
799 IWM_ERR(iwm, "Couldn't allocate bss\n"); 799 IWM_ERR(iwm, "Couldn't allocate bss\n");
800 return -ENOMEM; 800 return -ENOMEM;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726dd5fe4..7ba3052b0708 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
65 /* Sitecom */ 65 /* Sitecom */
66 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, 66 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
67 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, 67 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
68 {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
68 /* Sphairon Access Systems GmbH */ 69 /* Sphairon Access Systems GmbH */
69 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, 70 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
70 /* Dick Smith Electronics */ 71 /* Dick Smith Electronics */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 8e952fdab764..cb2fd01eddae 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func)
720 -ret_val); 720 -ret_val);
721 goto acpiphp_bus_add_out; 721 goto acpiphp_bus_add_out;
722 } 722 }
723 /*
724 * try to start anyway. We could have failed to add
725 * simply because this bus had previously been added
726 * on another add. Don't bother with the return value
727 * we just keep going.
728 */
729 ret_val = acpi_bus_start(device); 723 ret_val = acpi_bus_start(device);
730 724
731acpiphp_bus_add_out: 725acpiphp_bus_add_out:
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c74694345b6e..d58b94030ef3 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); 338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); 339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
340 340
341/*
342 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
343 * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
344 * BAR0 should be 8 bytes; instead, it may be set to something like 8k
345 * (which conflicts w/ BAR1's memory range).
346 */
347static void __devinit quirk_cs5536_vsa(struct pci_dev *dev)
348{
349 if (pci_resource_len(dev, 0) != 8) {
350 struct resource *res = &dev->resource[0];
351 res->end = res->start + 8 - 1;
352 dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
353 "(incorrect header); workaround applied.\n");
354 }
355}
356DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
357
341static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, 358static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
342 unsigned size, int nr, const char *name) 359 unsigned size, int nr, const char *name)
343{ 360{
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 07d14dfdf0b4..226b3e93498c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -934,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev)
934 acer_backlight_device = bd; 934 acer_backlight_device = bd;
935 935
936 bd->props.power = FB_BLANK_UNBLANK; 936 bd->props.power = FB_BLANK_UNBLANK;
937 bd->props.brightness = max_brightness; 937 bd->props.brightness = read_brightness(bd);
938 bd->props.max_brightness = max_brightness; 938 bd->props.max_brightness = max_brightness;
939 backlight_update_status(bd); 939 backlight_update_status(bd);
940 return 0; 940 return 0;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e67e4feb35cb..eb603f1d55ca 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5771,7 +5771,7 @@ static void thermal_exit(void)
5771 case TPACPI_THERMAL_ACPI_TMP07: 5771 case TPACPI_THERMAL_ACPI_TMP07:
5772 case TPACPI_THERMAL_ACPI_UPDT: 5772 case TPACPI_THERMAL_ACPI_UPDT:
5773 sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, 5773 sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
5774 &thermal_temp_input16_group); 5774 &thermal_temp_input8_group);
5775 break; 5775 break;
5776 case TPACPI_THERMAL_NONE: 5776 case TPACPI_THERMAL_NONE:
5777 default: 5777 default:
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index fa39e759a275..6ea3cb5837c7 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
175 dev_err(&dev->dev, "Do not pass platform_data through " 175 dev_err(&dev->dev, "Do not pass platform_data through "
176 "wm97xx_bat_set_pdata!\n"); 176 "wm97xx_bat_set_pdata!\n");
177 return -EINVAL; 177 return -EINVAL;
178 } else 178 }
179 pdata = wmdata->batt_pdata; 179
180 if (!wmdata) {
181 dev_err(&dev->dev, "No platform data supplied\n");
182 return -EINVAL;
183 }
184
185 pdata = wmdata->batt_pdata;
180 186
181 if (dev->id != -1) 187 if (dev->id != -1)
182 return -EINVAL; 188 return -EINVAL;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 686ef270ecf7..b60a4c9f8f16 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -661,7 +661,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
661static void print_constraints(struct regulator_dev *rdev) 661static void print_constraints(struct regulator_dev *rdev)
662{ 662{
663 struct regulation_constraints *constraints = rdev->constraints; 663 struct regulation_constraints *constraints = rdev->constraints;
664 char buf[80]; 664 char buf[80] = "";
665 int count = 0; 665 int count = 0;
666 int ret; 666 int ret;
667 667
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 76d08c282f9c..4f33a0f4a179 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
183 if (vol_map[val] >= min_vol) 183 if (vol_map[val] >= min_vol)
184 break; 184 break;
185 185
186 if (vol_map[val] > max_vol) 186 if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
187 return -EINVAL; 187 return -EINVAL;
188 188
189 return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), 189 return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
@@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
272 if (vol_map[val] >= min_vol) 272 if (vol_map[val] >= min_vol)
273 break; 273 break;
274 274
275 if (vol_map[val] > max_vol) 275 if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
276 return -EINVAL; 276 return -EINVAL;
277 277
278 ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck), 278 ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 999fe80c4051..62b654af9237 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
531 qdio_siga_sync_q(q); 531 qdio_siga_sync_q(q);
532 get_buf_state(q, q->first_to_check, &state, 0); 532 get_buf_state(q, q->first_to_check, &state, 0);
533 533
534 if (state == SLSB_P_INPUT_PRIMED) 534 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
535 /* more work coming */ 535 /* more work coming */
536 return 0; 536 return 0;
537 537
@@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
960 qdio_handle_activate_check(cdev, intparm, cstat, 960 qdio_handle_activate_check(cdev, intparm, cstat,
961 dstat); 961 dstat);
962 break; 962 break;
963 case QDIO_IRQ_STATE_STOPPED:
964 break;
963 default: 965 default:
964 WARN_ON(1); 966 WARN_ON(1);
965 } 967 }
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 0f7b493fb105..271399f62f1b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -671,12 +671,11 @@ static void zfcp_fc_ct_els_job_handler(void *data)
671{ 671{
672 struct fc_bsg_job *job = data; 672 struct fc_bsg_job *job = data;
673 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; 673 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
674 int status = zfcp_ct_els->status; 674 struct fc_bsg_reply *jr = job->reply;
675 int reply_status;
676 675
677 reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; 676 jr->reply_payload_rcv_len = job->reply_payload.payload_len;
678 job->reply->reply_data.ctels_reply.status = reply_status; 677 jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
679 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 678 jr->result = zfcp_ct_els->status ? -EIO : 0;
680 job->job_done(job); 679 job->job_done(job);
681} 680}
682 681
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 477542602284..9e71ac611146 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
2516 if (info->scsi.phase == PHASE_IDLE) 2516 if (info->scsi.phase == PHASE_IDLE)
2517 fas216_kick(info); 2517 fas216_kick(info);
2518 2518
2519 mod_timer(&info->eh_timer, 30 * HZ); 2519 mod_timer(&info->eh_timer, jiffies + 30 * HZ);
2520 spin_unlock_irqrestore(&info->host_lock, flags); 2520 spin_unlock_irqrestore(&info->host_lock, flags);
2521 2521
2522 /* 2522 /*
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 10be9f36a4cc..2f47ae7cce91 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2009,6 +2009,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
2009 fcoe_interface_cleanup(fcoe); 2009 fcoe_interface_cleanup(fcoe);
2010 rtnl_unlock(); 2010 rtnl_unlock();
2011 fcoe_if_destroy(fcoe->ctlr.lp); 2011 fcoe_if_destroy(fcoe->ctlr.lp);
2012 module_put(THIS_MODULE);
2013
2012out_putdev: 2014out_putdev:
2013 dev_put(netdev); 2015 dev_put(netdev);
2014out_nodev: 2016out_nodev:
@@ -2059,6 +2061,11 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
2059 } 2061 }
2060#endif 2062#endif
2061 2063
2064 if (!try_module_get(THIS_MODULE)) {
2065 rc = -EINVAL;
2066 goto out_nomod;
2067 }
2068
2062 rtnl_lock(); 2069 rtnl_lock();
2063 netdev = fcoe_if_to_netdev(buffer); 2070 netdev = fcoe_if_to_netdev(buffer);
2064 if (!netdev) { 2071 if (!netdev) {
@@ -2099,17 +2106,24 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
2099 if (!fcoe_link_ok(lport)) 2106 if (!fcoe_link_ok(lport))
2100 fcoe_ctlr_link_up(&fcoe->ctlr); 2107 fcoe_ctlr_link_up(&fcoe->ctlr);
2101 2108
2102 rc = 0;
2103out_free:
2104 /* 2109 /*
2105 * Release from init in fcoe_interface_create(), on success lport 2110 * Release from init in fcoe_interface_create(), on success lport
2106 * should be holding a reference taken in fcoe_if_create(). 2111 * should be holding a reference taken in fcoe_if_create().
2107 */ 2112 */
2108 fcoe_interface_put(fcoe); 2113 fcoe_interface_put(fcoe);
2114 dev_put(netdev);
2115 rtnl_unlock();
2116 mutex_unlock(&fcoe_config_mutex);
2117
2118 return 0;
2119out_free:
2120 fcoe_interface_put(fcoe);
2109out_putdev: 2121out_putdev:
2110 dev_put(netdev); 2122 dev_put(netdev);
2111out_nodev: 2123out_nodev:
2112 rtnl_unlock(); 2124 rtnl_unlock();
2125 module_put(THIS_MODULE);
2126out_nomod:
2113 mutex_unlock(&fcoe_config_mutex); 2127 mutex_unlock(&fcoe_config_mutex);
2114 return rc; 2128 return rc;
2115} 2129}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 9823291395ad..511cb6b371ee 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -1187,7 +1187,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1187 next_timer = fip->ctlr_ka_time; 1187 next_timer = fip->ctlr_ka_time;
1188 1188
1189 if (time_after_eq(jiffies, fip->port_ka_time)) { 1189 if (time_after_eq(jiffies, fip->port_ka_time)) {
1190 fip->port_ka_time += jiffies + 1190 fip->port_ka_time = jiffies +
1191 msecs_to_jiffies(FIP_VN_KA_PERIOD); 1191 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1192 fip->send_port_ka = 1; 1192 fip->send_port_ka = 1;
1193 } 1193 }
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 19d711cb938c..7f4364770e4a 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1890,7 +1890,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1890 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 1890 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1891 sp->cnt++; 1891 sp->cnt++;
1892 1892
1893 if (ep->xid <= lport->lro_xid) 1893 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
1894 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 1894 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
1895 1895
1896 if (unlikely(lport->tt.frame_send(lport, fp))) 1896 if (unlikely(lport->tt.frame_send(lport, fp)))
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 881d5dfe8c74..6fde2fabfd9b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -298,9 +298,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
298{ 298{
299 struct fc_lport *lport; 299 struct fc_lport *lport;
300 300
301 if (!fsp)
302 return;
303
304 lport = fsp->lp; 301 lport = fsp->lp;
305 if ((fsp->req_flags & FC_SRB_READ) && 302 if ((fsp->req_flags & FC_SRB_READ) &&
306 (lport->lro_enabled) && (lport->tt.ddp_setup)) { 303 (lport->lro_enabled) && (lport->tt.ddp_setup)) {
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 0b165024a219..7ec8ce75007c 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1800,7 +1800,8 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
1800 u32 did; 1800 u32 did;
1801 1801
1802 job->reply->reply_payload_rcv_len = 0; 1802 job->reply->reply_payload_rcv_len = 0;
1803 rsp->resid_len = job->reply_payload.payload_len; 1803 if (rsp)
1804 rsp->resid_len = job->reply_payload.payload_len;
1804 1805
1805 mutex_lock(&lport->lp_mutex); 1806 mutex_lock(&lport->lp_mutex);
1806 1807
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 02300523b234..97923bb07765 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -623,7 +623,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
623 623
624 tov = ntohl(plp->fl_csp.sp_e_d_tov); 624 tov = ntohl(plp->fl_csp.sp_e_d_tov);
625 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) 625 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
626 tov /= 1000; 626 tov /= 1000000;
627 if (tov > rdata->e_d_tov) 627 if (tov > rdata->e_d_tov)
628 rdata->e_d_tov = tov; 628 rdata->e_d_tov = tov;
629 csp_seq = ntohs(plp->fl_csp.sp_tot_seq); 629 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index db6856c138fc..4ad87fd74ddd 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -992,12 +992,10 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
992 if (r2t == NULL) { 992 if (r2t == NULL) {
993 if (kfifo_out(&tcp_task->r2tqueue, 993 if (kfifo_out(&tcp_task->r2tqueue,
994 (void *)&tcp_task->r2t, sizeof(void *)) != 994 (void *)&tcp_task->r2t, sizeof(void *)) !=
995 sizeof(void *)) { 995 sizeof(void *))
996 WARN_ONCE(1, "unexpected fifo state");
997 r2t = NULL; 996 r2t = NULL;
998 } 997 else
999 998 r2t = tcp_task->r2t;
1000 r2t = tcp_task->r2t;
1001 } 999 }
1002 spin_unlock_bh(&session->lock); 1000 spin_unlock_bh(&session->lock);
1003 } 1001 }
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 708ea3157b60..d9b8ca5116bc 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -3781,6 +3781,7 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
3781 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 3781 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
3782 int i; 3782 int i;
3783 int error = 0; 3783 int error = 0;
3784 compat_uptr_t ptr;
3784 3785
3785 if (clear_user(ioc, sizeof(*ioc))) 3786 if (clear_user(ioc, sizeof(*ioc)))
3786 return -EFAULT; 3787 return -EFAULT;
@@ -3793,9 +3794,22 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
3793 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 3794 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
3794 return -EFAULT; 3795 return -EFAULT;
3795 3796
3796 for (i = 0; i < MAX_IOCTL_SGE; i++) { 3797 /*
3797 compat_uptr_t ptr; 3798 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
3799 * sense_len is not null, so prepare the 64bit value under
3800 * the same condition.
3801 */
3802 if (ioc->sense_len) {
3803 void __user **sense_ioc_ptr =
3804 (void __user **)(ioc->frame.raw + ioc->sense_off);
3805 compat_uptr_t *sense_cioc_ptr =
3806 (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
3807 if (get_user(ptr, sense_cioc_ptr) ||
3808 put_user(compat_ptr(ptr), sense_ioc_ptr))
3809 return -EFAULT;
3810 }
3798 3811
3812 for (i = 0; i < MAX_IOCTL_SGE; i++) {
3799 if (get_user(ptr, &cioc->sgl[i].iov_base) || 3813 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
3800 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 3814 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
3801 copy_in_user(&ioc->sgl[i].iov_len, 3815 copy_in_user(&ioc->sgl[i].iov_len,
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f61fb8d01330..8bc6f53691e9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
453extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 453extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
454extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 454extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
455extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 455extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
456extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
457 456
458#endif /* _QLA_GBL_H */ 457#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ffd0efdff40e..6fc63b98818c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1917,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1917 struct rsp_que *rsp; 1917 struct rsp_que *rsp;
1918 struct device_reg_24xx __iomem *reg; 1918 struct device_reg_24xx __iomem *reg;
1919 struct scsi_qla_host *vha; 1919 struct scsi_qla_host *vha;
1920 unsigned long flags;
1920 1921
1921 rsp = (struct rsp_que *) dev_id; 1922 rsp = (struct rsp_que *) dev_id;
1922 if (!rsp) { 1923 if (!rsp) {
@@ -1927,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1927 ha = rsp->hw; 1928 ha = rsp->hw;
1928 reg = &ha->iobase->isp24; 1929 reg = &ha->iobase->isp24;
1929 1930
1930 spin_lock_irq(&ha->hardware_lock); 1931 spin_lock_irqsave(&ha->hardware_lock, flags);
1931 1932
1932 vha = qla25xx_get_host(rsp); 1933 vha = pci_get_drvdata(ha->pdev);
1933 qla24xx_process_response_queue(vha, rsp); 1934 qla24xx_process_response_queue(vha, rsp);
1934 if (!ha->flags.disable_msix_handshake) { 1935 if (!ha->flags.disable_msix_handshake) {
1935 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1936 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1936 RD_REG_DWORD_RELAXED(&reg->hccr); 1937 RD_REG_DWORD_RELAXED(&reg->hccr);
1937 } 1938 }
1938 spin_unlock_irq(&ha->hardware_lock); 1939 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1939 1940
1940 return IRQ_HANDLED; 1941 return IRQ_HANDLED;
1941} 1942}
@@ -1946,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1946 struct qla_hw_data *ha; 1947 struct qla_hw_data *ha;
1947 struct rsp_que *rsp; 1948 struct rsp_que *rsp;
1948 struct device_reg_24xx __iomem *reg; 1949 struct device_reg_24xx __iomem *reg;
1950 unsigned long flags;
1949 1951
1950 rsp = (struct rsp_que *) dev_id; 1952 rsp = (struct rsp_que *) dev_id;
1951 if (!rsp) { 1953 if (!rsp) {
@@ -1958,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1958 /* Clear the interrupt, if enabled, for this response queue */ 1960 /* Clear the interrupt, if enabled, for this response queue */
1959 if (rsp->options & ~BIT_6) { 1961 if (rsp->options & ~BIT_6) {
1960 reg = &ha->iobase->isp24; 1962 reg = &ha->iobase->isp24;
1961 spin_lock_irq(&ha->hardware_lock); 1963 spin_lock_irqsave(&ha->hardware_lock, flags);
1962 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1964 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1963 RD_REG_DWORD_RELAXED(&reg->hccr); 1965 RD_REG_DWORD_RELAXED(&reg->hccr);
1964 spin_unlock_irq(&ha->hardware_lock); 1966 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1965 } 1967 }
1966 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 1968 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1967 1969
@@ -1979,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1979 uint32_t stat; 1981 uint32_t stat;
1980 uint32_t hccr; 1982 uint32_t hccr;
1981 uint16_t mb[4]; 1983 uint16_t mb[4];
1984 unsigned long flags;
1982 1985
1983 rsp = (struct rsp_que *) dev_id; 1986 rsp = (struct rsp_que *) dev_id;
1984 if (!rsp) { 1987 if (!rsp) {
@@ -1990,7 +1993,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1990 reg = &ha->iobase->isp24; 1993 reg = &ha->iobase->isp24;
1991 status = 0; 1994 status = 0;
1992 1995
1993 spin_lock_irq(&ha->hardware_lock); 1996 spin_lock_irqsave(&ha->hardware_lock, flags);
1994 vha = pci_get_drvdata(ha->pdev); 1997 vha = pci_get_drvdata(ha->pdev);
1995 do { 1998 do {
1996 stat = RD_REG_DWORD(&reg->host_status); 1999 stat = RD_REG_DWORD(&reg->host_status);
@@ -2039,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id)
2039 } 2042 }
2040 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2043 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2041 } while (0); 2044 } while (0);
2042 spin_unlock_irq(&ha->hardware_lock); 2045 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2043 2046
2044 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2047 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2045 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2048 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -2277,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2277 msix->rsp = rsp; 2280 msix->rsp = rsp;
2278 return ret; 2281 return ret;
2279} 2282}
2280
2281struct scsi_qla_host *
2282qla25xx_get_host(struct rsp_que *rsp)
2283{
2284 srb_t *sp;
2285 struct qla_hw_data *ha = rsp->hw;
2286 struct scsi_qla_host *vha = NULL;
2287 struct sts_entry_24xx *pkt;
2288 struct req_que *req;
2289 uint16_t que;
2290 uint32_t handle;
2291
2292 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2293 que = MSW(pkt->handle);
2294 handle = (uint32_t) LSW(pkt->handle);
2295 req = ha->req_q_map[que];
2296 if (handle < MAX_OUTSTANDING_COMMANDS) {
2297 sp = req->outstanding_cmds[handle];
2298 if (sp)
2299 return sp->fcport->vha;
2300 else
2301 goto base_que;
2302 }
2303base_que:
2304 vha = pci_get_drvdata(ha->pdev);
2305 return vha;
2306}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index b901aa267e7d..ff17dee28613 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -636,13 +636,15 @@ failed:
636 636
637static void qla_do_work(struct work_struct *work) 637static void qla_do_work(struct work_struct *work)
638{ 638{
639 unsigned long flags;
639 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); 640 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
640 struct scsi_qla_host *vha; 641 struct scsi_qla_host *vha;
642 struct qla_hw_data *ha = rsp->hw;
641 643
642 spin_lock_irq(&rsp->hw->hardware_lock); 644 spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
643 vha = qla25xx_get_host(rsp); 645 vha = pci_get_drvdata(ha->pdev);
644 qla24xx_process_response_queue(vha, rsp); 646 qla24xx_process_response_queue(vha, rsp);
645 spin_unlock_irq(&rsp->hw->hardware_lock); 647 spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
646} 648}
647 649
648/* create response queue */ 650/* create response queue */
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index c3e37c8e7e26..e9b15c3746fa 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
83 83
84#define PASS_LIMIT 256 84#define PASS_LIMIT 256
85 85
86#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
87
88
86/* 89/*
87 * We default to IRQ0 for the "no irq" hack. Some 90 * We default to IRQ0 for the "no irq" hack. Some
88 * machine types want others as well - they're free 91 * machine types want others as well - they're free
@@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
1792 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; 1795 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
1793 spin_unlock_irqrestore(&up->port.lock, flags); 1796 spin_unlock_irqrestore(&up->port.lock, flags);
1794 1797
1795 return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; 1798 return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
1796} 1799}
1797 1800
1798static unsigned int serial8250_get_mctrl(struct uart_port *port) 1801static unsigned int serial8250_get_mctrl(struct uart_port *port)
@@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
1850 spin_unlock_irqrestore(&up->port.lock, flags); 1853 spin_unlock_irqrestore(&up->port.lock, flags);
1851} 1854}
1852 1855
1853#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
1854
1855/* 1856/*
1856 * Wait for transmitter & holding register to empty 1857 * Wait for transmitter & holding register to empty
1857 */ 1858 */
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 5681ebed9c65..03dfd27c4bfb 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
494#endif 494#endif
495 break; 495 break;
496 case SSB_BUSTYPE_SDIO: 496 case SSB_BUSTYPE_SDIO:
497#ifdef CONFIG_SSB_SDIO 497#ifdef CONFIG_SSB_SDIOHOST
498 sdev->irq = bus->host_sdio->dev.irq;
499 dev->parent = &bus->host_sdio->dev; 498 dev->parent = &bus->host_sdio->dev;
500#endif 499#endif
501 break; 500 break;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6e8bcdfd23b4..a678186f218f 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg)
1312 void __user *addr = as->userurb; 1312 void __user *addr = as->userurb;
1313 unsigned int i; 1313 unsigned int i;
1314 1314
1315 if (as->userbuffer) 1315 if (as->userbuffer && urb->actual_length)
1316 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1316 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
1317 urb->transfer_buffer_length)) 1317 urb->actual_length))
1318 goto err_out; 1318 goto err_out;
1319 if (put_user(as->status, &userurb->status)) 1319 if (put_user(as->status, &userurb->status))
1320 goto err_out; 1320 goto err_out;
@@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg)
1334 } 1334 }
1335 } 1335 }
1336 1336
1337 free_async(as);
1338
1339 if (put_user(addr, (void __user * __user *)arg)) 1337 if (put_user(addr, (void __user * __user *)arg))
1340 return -EFAULT; 1338 return -EFAULT;
1341 return 0; 1339 return 0;
1342 1340
1343err_out: 1341err_out:
1344 free_async(as);
1345 return -EFAULT; 1342 return -EFAULT;
1346} 1343}
1347 1344
@@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps)
1371static int proc_reapurb(struct dev_state *ps, void __user *arg) 1368static int proc_reapurb(struct dev_state *ps, void __user *arg)
1372{ 1369{
1373 struct async *as = reap_as(ps); 1370 struct async *as = reap_as(ps);
1374 if (as) 1371 if (as) {
1375 return processcompl(as, (void __user * __user *)arg); 1372 int retval = processcompl(as, (void __user * __user *)arg);
1373 free_async(as);
1374 return retval;
1375 }
1376 if (signal_pending(current)) 1376 if (signal_pending(current))
1377 return -EINTR; 1377 return -EINTR;
1378 return -EIO; 1378 return -EIO;
@@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
1380 1380
1381static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) 1381static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
1382{ 1382{
1383 int retval;
1383 struct async *as; 1384 struct async *as;
1384 1385
1385 if (!(as = async_getcompleted(ps))) 1386 as = async_getcompleted(ps);
1386 return -EAGAIN; 1387 retval = -EAGAIN;
1387 return processcompl(as, (void __user * __user *)arg); 1388 if (as) {
1389 retval = processcompl(as, (void __user * __user *)arg);
1390 free_async(as);
1391 }
1392 return retval;
1388} 1393}
1389 1394
1390#ifdef CONFIG_COMPAT 1395#ifdef CONFIG_COMPAT
@@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1475 void __user *addr = as->userurb; 1480 void __user *addr = as->userurb;
1476 unsigned int i; 1481 unsigned int i;
1477 1482
1478 if (as->userbuffer) 1483 if (as->userbuffer && urb->actual_length)
1479 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1484 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
1480 urb->transfer_buffer_length)) 1485 urb->actual_length))
1481 return -EFAULT; 1486 return -EFAULT;
1482 if (put_user(as->status, &userurb->status)) 1487 if (put_user(as->status, &userurb->status))
1483 return -EFAULT; 1488 return -EFAULT;
@@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1497 } 1502 }
1498 } 1503 }
1499 1504
1500 free_async(as);
1501 if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) 1505 if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
1502 return -EFAULT; 1506 return -EFAULT;
1503 return 0; 1507 return 0;
@@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1506static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) 1510static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
1507{ 1511{
1508 struct async *as = reap_as(ps); 1512 struct async *as = reap_as(ps);
1509 if (as) 1513 if (as) {
1510 return processcompl_compat(as, (void __user * __user *)arg); 1514 int retval = processcompl_compat(as, (void __user * __user *)arg);
1515 free_async(as);
1516 return retval;
1517 }
1511 if (signal_pending(current)) 1518 if (signal_pending(current))
1512 return -EINTR; 1519 return -EINTR;
1513 return -EIO; 1520 return -EIO;
@@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
1515 1522
1516static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) 1523static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
1517{ 1524{
1525 int retval;
1518 struct async *as; 1526 struct async *as;
1519 1527
1520 if (!(as = async_getcompleted(ps))) 1528 retval = -EAGAIN;
1521 return -EAGAIN; 1529 as = async_getcompleted(ps);
1522 return processcompl_compat(as, (void __user * __user *)arg); 1530 if (as) {
1531 retval = processcompl_compat(as, (void __user * __user *)arg);
1532 free_async(as);
1533 }
1534 return retval;
1523} 1535}
1524 1536
1525 1537
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 0a577d5694fd..d4f0db58a8ad 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -358,7 +358,7 @@ done:
358 * b15: bmType (0 == data) 358 * b15: bmType (0 == data)
359 */ 359 */
360 len = skb->len; 360 len = skb->len;
361 put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2)); 361 put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
362 362
363 /* add a zero-length EEM packet, if needed */ 363 /* add a zero-length EEM packet, if needed */
364 if (padlen) 364 if (padlen)
@@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port,
464 } 464 }
465 465
466 /* validate CRC */ 466 /* validate CRC */
467 crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN);
468 if (header & BIT(14)) { 467 if (header & BIT(14)) {
469 crc = get_unaligned_le32(skb->data + len 468 crc = get_unaligned_le32(skb->data + len
470 - ETH_FCS_LEN); 469 - ETH_FCS_LEN);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 429560100b10..76496f5d272c 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -29,7 +29,7 @@
29#if defined USB_ETH_RNDIS 29#if defined USB_ETH_RNDIS
30# undef USB_ETH_RNDIS 30# undef USB_ETH_RNDIS
31#endif 31#endif
32#ifdef CONFIG_USB_ETH_RNDIS 32#ifdef CONFIG_USB_G_MULTI_RNDIS
33# define USB_ETH_RNDIS y 33# define USB_ETH_RNDIS y
34#endif 34#endif
35 35
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index e220fb8091a3..8b45145b9136 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -26,6 +26,7 @@
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/err.h>
29 30
30#include <linux/usb/ch9.h> 31#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h> 32#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 4b5dbd0127f5..5fc80a104150 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2582,6 +2582,7 @@ err:
2582 hsotg->gadget.dev.driver = NULL; 2582 hsotg->gadget.dev.driver = NULL;
2583 return ret; 2583 return ret;
2584} 2584}
2585EXPORT_SYMBOL(usb_gadget_register_driver);
2585 2586
2586int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2587int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2587{ 2588{
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index c75d9270c752..19372673bf09 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -196,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
196 if (hostpc_reg) { 196 if (hostpc_reg) {
197 u32 t3; 197 u32 t3;
198 198
199 spin_unlock_irq(&ehci->lock);
199 msleep(5);/* 5ms for HCD enter low pwr mode */ 200 msleep(5);/* 5ms for HCD enter low pwr mode */
201 spin_lock_irq(&ehci->lock);
200 t3 = ehci_readl(ehci, hostpc_reg); 202 t3 = ehci_readl(ehci, hostpc_reg);
201 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); 203 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
202 t3 = ehci_readl(ehci, hostpc_reg); 204 t3 = ehci_readl(ehci, hostpc_reg);
@@ -904,17 +906,18 @@ static int ehci_hub_control (
904 if ((temp & PORT_PE) == 0 906 if ((temp & PORT_PE) == 0
905 || (temp & PORT_RESET) != 0) 907 || (temp & PORT_RESET) != 0)
906 goto error; 908 goto error;
907 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); 909
908 /* After above check the port must be connected. 910 /* After above check the port must be connected.
909 * Set appropriate bit thus could put phy into low power 911 * Set appropriate bit thus could put phy into low power
910 * mode if we have hostpc feature 912 * mode if we have hostpc feature
911 */ 913 */
914 temp &= ~PORT_WKCONN_E;
915 temp |= PORT_WKDISC_E | PORT_WKOC_E;
916 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
912 if (hostpc_reg) { 917 if (hostpc_reg) {
913 temp &= ~PORT_WKCONN_E; 918 spin_unlock_irqrestore(&ehci->lock, flags);
914 temp |= (PORT_WKDISC_E | PORT_WKOC_E);
915 ehci_writel(ehci, temp | PORT_SUSPEND,
916 status_reg);
917 msleep(5);/* 5ms for HCD enter low pwr mode */ 919 msleep(5);/* 5ms for HCD enter low pwr mode */
920 spin_lock_irqsave(&ehci->lock, flags);
918 temp1 = ehci_readl(ehci, hostpc_reg); 921 temp1 = ehci_readl(ehci, hostpc_reg);
919 ehci_writel(ehci, temp1 | HOSTPC_PHCD, 922 ehci_writel(ehci, temp1 | HOSTPC_PHCD,
920 hostpc_reg); 923 hostpc_reg);
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index d224ab467a40..e1232890c78b 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -105,7 +105,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
105 if (ep->td_base) 105 if (ep->td_base)
106 cpm_muram_free(cpm_muram_offset(ep->td_base)); 106 cpm_muram_free(cpm_muram_offset(ep->td_base));
107 107
108 if (ep->conf_frame_Q) { 108 if (kfifo_initialized(&ep->conf_frame_Q)) {
109 size = cq_howmany(&ep->conf_frame_Q); 109 size = cq_howmany(&ep->conf_frame_Q);
110 for (; size; size--) { 110 for (; size; size--) {
111 struct packet *pkt = cq_get(&ep->conf_frame_Q); 111 struct packet *pkt = cq_get(&ep->conf_frame_Q);
@@ -115,7 +115,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
115 cq_delete(&ep->conf_frame_Q); 115 cq_delete(&ep->conf_frame_Q);
116 } 116 }
117 117
118 if (ep->empty_frame_Q) { 118 if (kfifo_initialized(&ep->empty_frame_Q)) {
119 size = cq_howmany(&ep->empty_frame_Q); 119 size = cq_howmany(&ep->empty_frame_Q);
120 for (; size; size--) { 120 for (; size; size--) {
121 struct packet *pkt = cq_get(&ep->empty_frame_Q); 121 struct packet *pkt = cq_get(&ep->empty_frame_Q);
@@ -125,7 +125,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
125 cq_delete(&ep->empty_frame_Q); 125 cq_delete(&ep->empty_frame_Q);
126 } 126 }
127 127
128 if (ep->dummy_packets_Q) { 128 if (kfifo_initialized(&ep->dummy_packets_Q)) {
129 size = cq_howmany(&ep->dummy_packets_Q); 129 size = cq_howmany(&ep->dummy_packets_Q);
130 for (; size; size--) { 130 for (; size; size--) {
131 u8 *buff = cq_get(&ep->dummy_packets_Q); 131 u8 *buff = cq_get(&ep->dummy_packets_Q);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 0ceec123ddfd..bee558aed427 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -35,7 +35,9 @@
35#include <linux/usb.h> 35#include <linux/usb.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/mm.h>
38#include <linux/irq.h> 39#include <linux/irq.h>
40#include <asm/cacheflush.h>
39 41
40#include "../core/hcd.h" 42#include "../core/hcd.h"
41#include "r8a66597.h" 43#include "r8a66597.h"
@@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
820 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); 822 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
821} 823}
822 824
825static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
826 int status)
827__releases(r8a66597->lock)
828__acquires(r8a66597->lock)
829{
830 if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
831 void *ptr;
832
833 for (ptr = urb->transfer_buffer;
834 ptr < urb->transfer_buffer + urb->transfer_buffer_length;
835 ptr += PAGE_SIZE)
836 flush_dcache_page(virt_to_page(ptr));
837 }
838
839 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
840 spin_unlock(&r8a66597->lock);
841 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
842 spin_lock(&r8a66597->lock);
843}
844
823/* this function must be called with interrupt disabled */ 845/* this function must be called with interrupt disabled */
824static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) 846static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
825{ 847{
@@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
838 list_del(&td->queue); 860 list_del(&td->queue);
839 kfree(td); 861 kfree(td);
840 862
841 if (urb) { 863 if (urb)
842 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), 864 r8a66597_urb_done(r8a66597, urb, -ENODEV);
843 urb);
844 865
845 spin_unlock(&r8a66597->lock);
846 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
847 -ENODEV);
848 spin_lock(&r8a66597->lock);
849 }
850 break; 866 break;
851 } 867 }
852} 868}
@@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
1006/* this function must be called with interrupt disabled */ 1022/* this function must be called with interrupt disabled */
1007static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, 1023static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1008 u16 syssts) 1024 u16 syssts)
1025__releases(r8a66597->lock)
1026__acquires(r8a66597->lock)
1009{ 1027{
1010 if (syssts == SE0) { 1028 if (syssts == SE0) {
1011 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); 1029 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1023 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); 1041 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
1024 } 1042 }
1025 1043
1044 spin_unlock(&r8a66597->lock);
1026 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); 1045 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
1046 spin_lock(&r8a66597->lock);
1027} 1047}
1028 1048
1029/* this function must be called with interrupt disabled */ 1049/* this function must be called with interrupt disabled */
@@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
1283 if (usb_pipeisoc(urb->pipe)) 1303 if (usb_pipeisoc(urb->pipe))
1284 urb->start_frame = r8a66597_get_frame(hcd); 1304 urb->start_frame = r8a66597_get_frame(hcd);
1285 1305
1286 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); 1306 r8a66597_urb_done(r8a66597, urb, status);
1287 spin_unlock(&r8a66597->lock);
1288 usb_hcd_giveback_urb(hcd, urb, status);
1289 spin_lock(&r8a66597->lock);
1290 } 1307 }
1291 1308
1292 if (restart) { 1309 if (restart) {
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 0025847743f3..8b37a4b9839e 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = {
3245 { USB_DEVICE(0x0711, 0x0902) }, 3245 { USB_DEVICE(0x0711, 0x0902) },
3246 { USB_DEVICE(0x0711, 0x0903) }, 3246 { USB_DEVICE(0x0711, 0x0903) },
3247 { USB_DEVICE(0x0711, 0x0918) }, 3247 { USB_DEVICE(0x0711, 0x0918) },
3248 { USB_DEVICE(0x0711, 0x0920) },
3248 { USB_DEVICE(0x182d, 0x021c) }, 3249 { USB_DEVICE(0x182d, 0x021c) },
3249 { USB_DEVICE(0x182d, 0x0269) }, 3250 { USB_DEVICE(0x182d, 0x0269) },
3250 { } 3251 { }
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index de56b3d743d7..3d2d3e549bd1 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -44,6 +44,7 @@ config ISP1301_OMAP
44config USB_ULPI 44config USB_ULPI
45 bool "Generic ULPI Transceiver Driver" 45 bool "Generic ULPI Transceiver Driver"
46 depends on ARM 46 depends on ARM
47 select USB_OTG_UTILS
47 help 48 help
48 Enable this to support ULPI connected USB OTG transceivers which 49 Enable this to support ULPI connected USB OTG transceivers which
49 are likely found on embedded boards. 50 are likely found on embedded boards.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 216f187582ab..7638828e7317 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -50,7 +50,7 @@
50 * Version Information 50 * Version Information
51 */ 51 */
52#define DRIVER_VERSION "v1.5.0" 52#define DRIVER_VERSION "v1.5.0"
53#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" 53#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
54#define DRIVER_DESC "USB FTDI Serial Converters Driver" 54#define DRIVER_DESC "USB FTDI Serial Converters Driver"
55 55
56static int debug; 56static int debug;
@@ -145,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
145 145
146 146
147 147
148/*
149 * Device ID not listed? Test via module params product/vendor or
150 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
151 */
148static struct usb_device_id id_table_combined [] = { 152static struct usb_device_id id_table_combined [] = {
149 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 153 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
150 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 154 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
151 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, 155 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
156 { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
152 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, 157 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
153 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, 158 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
154 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, 159 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -552,9 +557,16 @@ static struct usb_device_id id_table_combined [] = {
552 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, 557 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
553 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, 558 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
554 /* 559 /*
555 * Due to many user requests for multiple ELV devices we enable 560 * ELV devices:
556 * them by default.
557 */ 561 */
562 { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
563 { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
564 { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
565 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
566 { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
567 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
568 { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
569 { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
558 { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, 570 { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
559 { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, 571 { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
560 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, 572 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
@@ -571,11 +583,17 @@ static struct usb_device_id id_table_combined [] = {
571 { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, 583 { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
572 { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, 584 { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
573 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, 585 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
586 { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
574 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, 587 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
588 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
575 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, 589 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
576 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, 590 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
577 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, 591 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
578 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, 592 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
593 { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
594 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
595 { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
596 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
579 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, 597 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
580 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, 598 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
581 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, 599 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -697,6 +715,7 @@ static struct usb_device_id id_table_combined [] = {
697 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 715 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
698 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 716 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
699 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, 717 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
718 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
700 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, 719 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
701 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, 720 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
702 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, 721 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index da92b4952ffb..c8951aeed983 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -38,6 +38,8 @@
38/* www.candapter.com Ewert Energy Systems CANdapter device */ 38/* www.candapter.com Ewert Energy Systems CANdapter device */
39#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ 39#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
40 40
41#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
42
41/* OOCDlink by Joern Kaipf <joernk@web.de> 43/* OOCDlink by Joern Kaipf <joernk@web.de>
42 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ 44 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
43#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ 45#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
@@ -161,22 +163,37 @@
161/* 163/*
162 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). 164 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
163 * All of these devices use FTDI's vendor ID (0x0403). 165 * All of these devices use FTDI's vendor ID (0x0403).
166 * Further IDs taken from ELV Windows .inf file.
164 * 167 *
165 * The previously included PID for the UO 100 module was incorrect. 168 * The previously included PID for the UO 100 module was incorrect.
166 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). 169 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
167 * 170 *
168 * Armin Laeuger originally sent the PID for the UM 100 module. 171 * Armin Laeuger originally sent the PID for the UM 100 module.
169 */ 172 */
173#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
174#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
175#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
176#define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */
177#define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */
178#define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */
179#define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */
180#define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */
170#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ 181#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
171#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ 182#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
172#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ 183#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
184#define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */
185#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
186#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
187#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
173#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ 188#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
174#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ 189#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
175#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ 190#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
176#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ 191#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
177#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ 192#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
178#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ 193#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
194#define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */
179#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ 195#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
196#define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */
180#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ 197#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
181#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ 198#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
182#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ 199#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
@@ -968,6 +985,7 @@
968#define PAPOUCH_VID 0x5050 /* Vendor ID */ 985#define PAPOUCH_VID 0x5050 /* Vendor ID */
969#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ 986#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
970#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ 987#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
988#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
971 989
972/* 990/*
973 * Marvell SheevaPlug 991 * Marvell SheevaPlug
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ac1b6449fb6a..3eb6143bb646 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = {
298 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 298 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
300 }, 300 },
301 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
301 302
302 { } 303 { }
303}; 304};
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c932f9053188..49575fba3756 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -941,7 +941,7 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999,
941UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, 941UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
942 "Microtech", 942 "Microtech",
943 "USB-SCSI-DB25", 943 "USB-SCSI-DB25",
944 US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, 944 US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
945 US_FL_SCM_MULT_TARG ), 945 US_FL_SCM_MULT_TARG ),
946 946
947UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, 947UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index eb12182b2059..d25df51bb0d2 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
161 return 0; 161 return 0;
162} 162}
163 163
164static void efifb_destroy(struct fb_info *info)
165{
166 if (info->screen_base)
167 iounmap(info->screen_base);
168 release_mem_region(info->aperture_base, info->aperture_size);
169 framebuffer_release(info);
170}
171
164static struct fb_ops efifb_ops = { 172static struct fb_ops efifb_ops = {
165 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
174 .fb_destroy = efifb_destroy,
166 .fb_setcolreg = efifb_setcolreg, 175 .fb_setcolreg = efifb_setcolreg,
167 .fb_fillrect = cfb_fillrect, 176 .fb_fillrect = cfb_fillrect,
168 .fb_copyarea = cfb_copyarea, 177 .fb_copyarea = cfb_copyarea,
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
281 info->par = NULL; 290 info->par = NULL;
282 291
283 info->aperture_base = efifb_fix.smem_start; 292 info->aperture_base = efifb_fix.smem_start;
284 info->aperture_size = size_total; 293 info->aperture_size = size_remap;
285 294
286 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 295 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
287 if (!info->screen_base) { 296 if (!info->screen_base) {
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index c7b3f9df2317..2159e668751c 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -1,9 +1,8 @@
1/* 1/*
2 * Blackfin On-Chip Watchdog Driver 2 * Blackfin On-Chip Watchdog Driver
3 * Supports BF53[123]/BF53[467]/BF54[2489]/BF561
4 * 3 *
5 * Originally based on softdog.c 4 * Originally based on softdog.c
6 * Copyright 2006-2007 Analog Devices Inc. 5 * Copyright 2006-2010 Analog Devices Inc.
7 * Copyright 2006-2007 Michele d'Amico 6 * Copyright 2006-2007 Michele d'Amico
8 * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk> 7 * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
9 * 8 *
@@ -137,13 +136,15 @@ static int bfin_wdt_running(void)
137 */ 136 */
138static int bfin_wdt_set_timeout(unsigned long t) 137static int bfin_wdt_set_timeout(unsigned long t)
139{ 138{
140 u32 cnt; 139 u32 cnt, max_t, sclk;
141 unsigned long flags; 140 unsigned long flags;
142 141
143 stampit(); 142 sclk = get_sclk();
143 max_t = -1 / sclk;
144 cnt = t * sclk;
145 stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt);
144 146
145 cnt = t * get_sclk(); 147 if (t > max_t) {
146 if (cnt < get_sclk()) {
147 printk(KERN_WARNING PFX "timeout value is too large\n"); 148 printk(KERN_WARNING PFX "timeout value is too large\n");
148 return -EINVAL; 149 return -EINVAL;
149 } 150 }
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index cf62b05e296a..7d6c2139891d 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -84,7 +84,7 @@ static const match_table_t tokens = {
84 84
85static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) 85static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
86{ 86{
87 char *options; 87 char *options, *tmp_options;
88 substring_t args[MAX_OPT_ARGS]; 88 substring_t args[MAX_OPT_ARGS];
89 char *p; 89 char *p;
90 int option = 0; 90 int option = 0;
@@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
102 if (!opts) 102 if (!opts)
103 return 0; 103 return 0;
104 104
105 options = kstrdup(opts, GFP_KERNEL); 105 tmp_options = kstrdup(opts, GFP_KERNEL);
106 if (!options) 106 if (!tmp_options) {
107 ret = -ENOMEM;
107 goto fail_option_alloc; 108 goto fail_option_alloc;
109 }
110 options = tmp_options;
108 111
109 while ((p = strsep(&options, ",")) != NULL) { 112 while ((p = strsep(&options, ",")) != NULL) {
110 int token; 113 int token;
@@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
159 break; 162 break;
160 case Opt_cache: 163 case Opt_cache:
161 s = match_strdup(&args[0]); 164 s = match_strdup(&args[0]);
162 if (!s) 165 if (!s) {
163 goto fail_option_alloc; 166 ret = -ENOMEM;
167 P9_DPRINTK(P9_DEBUG_ERROR,
168 "problem allocating copy of cache arg\n");
169 goto free_and_return;
170 }
164 171
165 if (strcmp(s, "loose") == 0) 172 if (strcmp(s, "loose") == 0)
166 v9ses->cache = CACHE_LOOSE; 173 v9ses->cache = CACHE_LOOSE;
@@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
173 180
174 case Opt_access: 181 case Opt_access:
175 s = match_strdup(&args[0]); 182 s = match_strdup(&args[0]);
176 if (!s) 183 if (!s) {
177 goto fail_option_alloc; 184 ret = -ENOMEM;
185 P9_DPRINTK(P9_DEBUG_ERROR,
186 "problem allocating copy of access arg\n");
187 goto free_and_return;
188 }
178 189
179 v9ses->flags &= ~V9FS_ACCESS_MASK; 190 v9ses->flags &= ~V9FS_ACCESS_MASK;
180 if (strcmp(s, "user") == 0) 191 if (strcmp(s, "user") == 0)
@@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
194 continue; 205 continue;
195 } 206 }
196 } 207 }
197 kfree(options);
198 return ret;
199 208
209free_and_return:
210 kfree(tmp_options);
200fail_option_alloc: 211fail_option_alloc:
201 P9_DPRINTK(P9_DEBUG_ERROR, 212 return ret;
202 "failed to allocate copy of option argument\n");
203 return -ENOMEM;
204} 213}
205 214
206/** 215/**
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 3a7560e35865..ed835836e0dc 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *);
60int v9fs_uflags2omode(int uflags, int extended); 60int v9fs_uflags2omode(int uflags, int extended);
61 61
62ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); 62ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
63void v9fs_blank_wstat(struct p9_wstat *wstat);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 3902bf43a088..74a0461a9ac0 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data,
257 return total; 257 return total;
258} 258}
259 259
260static int v9fs_file_fsync(struct file *filp, struct dentry *dentry,
261 int datasync)
262{
263 struct p9_fid *fid;
264 struct p9_wstat wstat;
265 int retval;
266
267 P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp,
268 dentry, datasync);
269
270 fid = filp->private_data;
271 v9fs_blank_wstat(&wstat);
272
273 retval = p9_client_wstat(fid, &wstat);
274 return retval;
275}
276
260static const struct file_operations v9fs_cached_file_operations = { 277static const struct file_operations v9fs_cached_file_operations = {
261 .llseek = generic_file_llseek, 278 .llseek = generic_file_llseek,
262 .read = do_sync_read, 279 .read = do_sync_read,
@@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = {
266 .release = v9fs_dir_release, 283 .release = v9fs_dir_release,
267 .lock = v9fs_file_lock, 284 .lock = v9fs_file_lock,
268 .mmap = generic_file_readonly_mmap, 285 .mmap = generic_file_readonly_mmap,
286 .fsync = v9fs_file_fsync,
269}; 287};
270 288
271const struct file_operations v9fs_file_operations = { 289const struct file_operations v9fs_file_operations = {
@@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = {
276 .release = v9fs_dir_release, 294 .release = v9fs_dir_release,
277 .lock = v9fs_file_lock, 295 .lock = v9fs_file_lock,
278 .mmap = generic_file_readonly_mmap, 296 .mmap = generic_file_readonly_mmap,
297 .fsync = v9fs_file_fsync,
279}; 298};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 9d03d1ebca6f..a407fa3388c0 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended)
176 * 176 *
177 */ 177 */
178 178
179static void 179void
180v9fs_blank_wstat(struct p9_wstat *wstat) 180v9fs_blank_wstat(struct p9_wstat *wstat)
181{ 181{
182 wstat->type = ~0; 182 wstat->type = ~0;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 33baf27fac78..34ddda888e63 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
873 brelse(bh); 873 brelse(bh);
874 874
875 unacquire_priv_sbp: 875 unacquire_priv_sbp:
876 kfree(befs_sb->mount_opts.iocharset);
876 kfree(sb->s_fs_info); 877 kfree(sb->s_fs_info);
877 878
878 unacquire_none: 879 unacquire_none:
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 73d6a735b8f3..d11d0289f3d2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
246 if (!sb) 246 if (!sb)
247 goto out; 247 goto out;
248 if (sb->s_flags & MS_RDONLY) { 248 if (sb->s_flags & MS_RDONLY) {
249 deactivate_locked_super(sb); 249 sb->s_frozen = SB_FREEZE_TRANS;
250 up_write(&sb->s_umount);
250 mutex_unlock(&bdev->bd_fsfreeze_mutex); 251 mutex_unlock(&bdev->bd_fsfreeze_mutex);
251 return sb; 252 return sb;
252 } 253 }
@@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
307 BUG_ON(sb->s_bdev != bdev); 308 BUG_ON(sb->s_bdev != bdev);
308 down_write(&sb->s_umount); 309 down_write(&sb->s_umount);
309 if (sb->s_flags & MS_RDONLY) 310 if (sb->s_flags & MS_RDONLY)
310 goto out_deactivate; 311 goto out_unfrozen;
311 312
312 if (sb->s_op->unfreeze_fs) { 313 if (sb->s_op->unfreeze_fs) {
313 error = sb->s_op->unfreeze_fs(sb); 314 error = sb->s_op->unfreeze_fs(sb);
@@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
321 } 322 }
322 } 323 }
323 324
325out_unfrozen:
324 sb->s_frozen = SB_UNFROZEN; 326 sb->s_frozen = SB_UNFROZEN;
325 smp_wmb(); 327 smp_wmb();
326 wake_up(&sb->s_wait_unfrozen); 328 wake_up(&sb->s_wait_unfrozen);
327 329
328out_deactivate:
329 if (sb) 330 if (sb)
330 deactivate_locked_super(sb); 331 deactivate_locked_super(sb);
331out_unlock: 332out_unlock:
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 87b25543d7d1..2b59201b955c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1982 1982
1983 if (!(sb->s_flags & MS_RDONLY)) { 1983 if (!(sb->s_flags & MS_RDONLY)) {
1984 ret = btrfs_recover_relocation(tree_root); 1984 ret = btrfs_recover_relocation(tree_root);
1985 BUG_ON(ret); 1985 if (ret < 0) {
1986 printk(KERN_WARNING
1987 "btrfs: failed to recover relocation\n");
1988 err = -EINVAL;
1989 goto fail_trans_kthread;
1990 }
1986 } 1991 }
1987 1992
1988 location.objectid = BTRFS_FS_TREE_OBJECTID; 1993 location.objectid = BTRFS_FS_TREE_OBJECTID;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 432a2da4641e..559f72489b3b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5402 int ret; 5402 int ret;
5403 5403
5404 while (level >= 0) { 5404 while (level >= 0) {
5405 if (path->slots[level] >=
5406 btrfs_header_nritems(path->nodes[level]))
5407 break;
5408
5409 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5405 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5410 if (ret > 0) 5406 if (ret > 0)
5411 break; 5407 break;
@@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5413 if (level == 0) 5409 if (level == 0)
5414 break; 5410 break;
5415 5411
5412 if (path->slots[level] >=
5413 btrfs_header_nritems(path->nodes[level]))
5414 break;
5415
5416 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5416 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5417 if (ret > 0) { 5417 if (ret > 0) {
5418 path->slots[level]++; 5418 path->slots[level]++;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 96577e8bf9fd..b177ed319612 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3165 spin_unlock(&tree->buffer_lock); 3165 spin_unlock(&tree->buffer_lock);
3166 goto free_eb; 3166 goto free_eb;
3167 } 3167 }
3168 spin_unlock(&tree->buffer_lock);
3169
3170 /* add one reference for the tree */ 3168 /* add one reference for the tree */
3171 atomic_inc(&eb->refs); 3169 atomic_inc(&eb->refs);
3170 spin_unlock(&tree->buffer_lock);
3172 return eb; 3171 return eb;
3173 3172
3174free_eb: 3173free_eb:
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c02033596f02..6ed434ac037f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -720,13 +720,15 @@ again:
720 inode->i_ino, orig_offset); 720 inode->i_ino, orig_offset);
721 BUG_ON(ret); 721 BUG_ON(ret);
722 } 722 }
723 fi = btrfs_item_ptr(leaf, path->slots[0],
724 struct btrfs_file_extent_item);
725 if (del_nr == 0) { 723 if (del_nr == 0) {
724 fi = btrfs_item_ptr(leaf, path->slots[0],
725 struct btrfs_file_extent_item);
726 btrfs_set_file_extent_type(leaf, fi, 726 btrfs_set_file_extent_type(leaf, fi,
727 BTRFS_FILE_EXTENT_REG); 727 BTRFS_FILE_EXTENT_REG);
728 btrfs_mark_buffer_dirty(leaf); 728 btrfs_mark_buffer_dirty(leaf);
729 } else { 729 } else {
730 fi = btrfs_item_ptr(leaf, del_slot - 1,
731 struct btrfs_file_extent_item);
730 btrfs_set_file_extent_type(leaf, fi, 732 btrfs_set_file_extent_type(leaf, fi,
731 BTRFS_FILE_EXTENT_REG); 733 BTRFS_FILE_EXTENT_REG);
732 btrfs_set_file_extent_num_bytes(leaf, fi, 734 btrfs_set_file_extent_num_bytes(leaf, fi,
@@ -1133,7 +1135,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1133 } 1135 }
1134 mutex_lock(&dentry->d_inode->i_mutex); 1136 mutex_lock(&dentry->d_inode->i_mutex);
1135out: 1137out:
1136 return ret > 0 ? EIO : ret; 1138 return ret > 0 ? -EIO : ret;
1137} 1139}
1138 1140
1139static const struct vm_operations_struct btrfs_file_vm_ops = { 1141static const struct vm_operations_struct btrfs_file_vm_ops = {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8cd109972fa6..4deb280f8969 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1681 * before we start the transaction. It limits the amount of btree 1681 * before we start the transaction. It limits the amount of btree
1682 * reads required while inside the transaction. 1682 * reads required while inside the transaction.
1683 */ 1683 */
1684static noinline void reada_csum(struct btrfs_root *root,
1685 struct btrfs_path *path,
1686 struct btrfs_ordered_extent *ordered_extent)
1687{
1688 struct btrfs_ordered_sum *sum;
1689 u64 bytenr;
1690
1691 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1692 list);
1693 bytenr = sum->sums[0].bytenr;
1694
1695 /*
1696 * we don't care about the results, the point of this search is
1697 * just to get the btree leaves into ram
1698 */
1699 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1700}
1701
1702/* as ordered data IO finishes, this gets called so we can finish 1684/* as ordered data IO finishes, this gets called so we can finish
1703 * an ordered extent if the range of bytes in the file it covers are 1685 * an ordered extent if the range of bytes in the file it covers are
1704 * fully written. 1686 * fully written.
@@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1709 struct btrfs_trans_handle *trans; 1691 struct btrfs_trans_handle *trans;
1710 struct btrfs_ordered_extent *ordered_extent = NULL; 1692 struct btrfs_ordered_extent *ordered_extent = NULL;
1711 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1693 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1712 struct btrfs_path *path;
1713 int compressed = 0; 1694 int compressed = 0;
1714 int ret; 1695 int ret;
1715 1696
@@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1717 if (!ret) 1698 if (!ret)
1718 return 0; 1699 return 0;
1719 1700
1720 /* 1701 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1721 * before we join the transaction, try to do some of our IO.
1722 * This will limit the amount of IO that we have to do with
1723 * the transaction running. We're unlikely to need to do any
1724 * IO if the file extents are new, the disk_i_size checks
1725 * covers the most common case.
1726 */
1727 if (start < BTRFS_I(inode)->disk_i_size) {
1728 path = btrfs_alloc_path();
1729 if (path) {
1730 ret = btrfs_lookup_file_extent(NULL, root, path,
1731 inode->i_ino,
1732 start, 0);
1733 ordered_extent = btrfs_lookup_ordered_extent(inode,
1734 start);
1735 if (!list_empty(&ordered_extent->list)) {
1736 btrfs_release_path(root, path);
1737 reada_csum(root, path, ordered_extent);
1738 }
1739 btrfs_free_path(path);
1740 }
1741 }
1742
1743 if (!ordered_extent)
1744 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1745 BUG_ON(!ordered_extent); 1702 BUG_ON(!ordered_extent);
1703
1746 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1704 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1747 BUG_ON(!list_empty(&ordered_extent->list)); 1705 BUG_ON(!list_empty(&ordered_extent->list));
1748 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1706 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
5841 inode->i_ctime = CURRENT_TIME; 5799 inode->i_ctime = CURRENT_TIME;
5842 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 5800 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5843 if (!(mode & FALLOC_FL_KEEP_SIZE) && 5801 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5844 cur_offset > inode->i_size) { 5802 (actual_len > inode->i_size) &&
5803 (cur_offset > inode->i_size)) {
5804
5845 if (cur_offset > actual_len) 5805 if (cur_offset > actual_len)
5846 i_size = actual_len; 5806 i_size = actual_len;
5847 else 5807 else
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ed3e4a2ec2c8..ab7ab5318745 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3764,7 +3764,8 @@ out:
3764 BTRFS_DATA_RELOC_TREE_OBJECTID); 3764 BTRFS_DATA_RELOC_TREE_OBJECTID);
3765 if (IS_ERR(fs_root)) 3765 if (IS_ERR(fs_root))
3766 err = PTR_ERR(fs_root); 3766 err = PTR_ERR(fs_root);
3767 btrfs_orphan_cleanup(fs_root); 3767 else
3768 btrfs_orphan_cleanup(fs_root);
3768 } 3769 }
3769 return err; 3770 return err;
3770} 3771}
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 14ac4806e291..eeb4986ea7db 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -348,7 +348,17 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
348 dir = dget_parent(object->dentry); 348 dir = dget_parent(object->dentry);
349 349
350 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); 350 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
351 ret = cachefiles_bury_object(cache, dir, object->dentry); 351
352 /* we need to check that our parent is _still_ our parent - it may have
353 * been renamed */
354 if (dir == object->dentry->d_parent) {
355 ret = cachefiles_bury_object(cache, dir, object->dentry);
356 } else {
357 /* it got moved, presumably by cachefilesd culling it, so it's
358 * no longer in the key path and we can ignore it */
359 mutex_unlock(&dir->d_inode->i_mutex);
360 ret = 0;
361 }
352 362
353 dput(dir); 363 dput(dir);
354 _leave(" = %d", ret); 364 _leave(" = %d", ret);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7b2600b380d7..49503d2edc7e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,7 @@
1Version 1.62
2------------
3Add sockopt=TCP_NODELAY mount option.
4
1Version 1.61 5Version 1.61
2------------ 6------------
3Fix append problem to Samba servers (files opened with O_APPEND could 7Fix append problem to Samba servers (files opened with O_APPEND could
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ac2b24c192f8..78c1b86d55f6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
113extern const struct export_operations cifs_export_ops; 113extern const struct export_operations cifs_export_ops;
114#endif /* EXPERIMENTAL */ 114#endif /* EXPERIMENTAL */
115 115
116#define CIFS_VERSION "1.61" 116#define CIFS_VERSION "1.62"
117#endif /* _CIFSFS_H */ 117#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4b35f7ec0583..ed751bb657db 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
149 bool svlocal:1; /* local server or remote */ 149 bool svlocal:1; /* local server or remote */
150 bool noblocksnd; /* use blocking sendmsg */ 150 bool noblocksnd; /* use blocking sendmsg */
151 bool noautotune; /* do not autotune send buf sizes */ 151 bool noautotune; /* do not autotune send buf sizes */
152 bool tcp_nodelay;
152 atomic_t inFlight; /* number of requests on the wire to server */ 153 atomic_t inFlight; /* number of requests on the wire to server */
153#ifdef CONFIG_CIFS_STATS2 154#ifdef CONFIG_CIFS_STATS2
154 atomic_t inSend; /* requests trying to send */ 155 atomic_t inSend; /* requests trying to send */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 3bbcaa716b3c..2e9e09ca0e30 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -98,7 +98,7 @@ struct smb_vol {
98 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ 98 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
99 unsigned int rsize; 99 unsigned int rsize;
100 unsigned int wsize; 100 unsigned int wsize;
101 unsigned int sockopt; 101 bool sockopt_tcp_nodelay:1;
102 unsigned short int port; 102 unsigned short int port;
103 char *prepath; 103 char *prepath;
104}; 104};
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
1142 simple_strtoul(value, &value, 0); 1142 simple_strtoul(value, &value, 0);
1143 } 1143 }
1144 } else if (strnicmp(data, "sockopt", 5) == 0) { 1144 } else if (strnicmp(data, "sockopt", 5) == 0) {
1145 if (value && *value) { 1145 if (!value || !*value) {
1146 vol->sockopt = 1146 cERROR(1, ("no socket option specified"));
1147 simple_strtoul(value, &value, 0); 1147 continue;
1148 } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
1149 vol->sockopt_tcp_nodelay = 1;
1148 } 1150 }
1149 } else if (strnicmp(data, "netbiosname", 4) == 0) { 1151 } else if (strnicmp(data, "netbiosname", 4) == 0) {
1150 if (!value || !*value || (*value == ' ')) { 1152 if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1514 1516
1515 tcp_ses->noblocksnd = volume_info->noblocksnd; 1517 tcp_ses->noblocksnd = volume_info->noblocksnd;
1516 tcp_ses->noautotune = volume_info->noautotune; 1518 tcp_ses->noautotune = volume_info->noautotune;
1519 tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
1517 atomic_set(&tcp_ses->inFlight, 0); 1520 atomic_set(&tcp_ses->inFlight, 0);
1518 init_waitqueue_head(&tcp_ses->response_q); 1521 init_waitqueue_head(&tcp_ses->response_q);
1519 init_waitqueue_head(&tcp_ses->request_q); 1522 init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
1764ipv4_connect(struct TCP_Server_Info *server) 1767ipv4_connect(struct TCP_Server_Info *server)
1765{ 1768{
1766 int rc = 0; 1769 int rc = 0;
1770 int val;
1767 bool connected = false; 1771 bool connected = false;
1768 __be16 orig_port = 0; 1772 __be16 orig_port = 0;
1769 struct socket *socket = server->ssocket; 1773 struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
1845 socket->sk->sk_rcvbuf = 140 * 1024; 1849 socket->sk->sk_rcvbuf = 140 * 1024;
1846 } 1850 }
1847 1851
1852 if (server->tcp_nodelay) {
1853 val = 1;
1854 rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
1855 (char *)&val, sizeof(val));
1856 if (rc)
1857 cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
1858 }
1859
1848 cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", 1860 cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
1849 socket->sk->sk_sndbuf, 1861 socket->sk->sk_sndbuf,
1850 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); 1862 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
1916ipv6_connect(struct TCP_Server_Info *server) 1928ipv6_connect(struct TCP_Server_Info *server)
1917{ 1929{
1918 int rc = 0; 1930 int rc = 0;
1931 int val;
1919 bool connected = false; 1932 bool connected = false;
1920 __be16 orig_port = 0; 1933 __be16 orig_port = 0;
1921 struct socket *socket = server->ssocket; 1934 struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
1987 */ 2000 */
1988 socket->sk->sk_rcvtimeo = 7 * HZ; 2001 socket->sk->sk_rcvtimeo = 7 * HZ;
1989 socket->sk->sk_sndtimeo = 5 * HZ; 2002 socket->sk->sk_sndtimeo = 5 * HZ;
2003
2004 if (server->tcp_nodelay) {
2005 val = 1;
2006 rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
2007 (char *)&val, sizeof(val));
2008 if (rc)
2009 cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
2010 }
2011
1990 server->ssocket = socket; 2012 server->ssocket = socket;
1991 2013
1992 return rc; 2014 return rc;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index cf18ee765590..e3fda978f481 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
1762 CIFS_MOUNT_MAP_SPECIAL_CHR); 1762 CIFS_MOUNT_MAP_SPECIAL_CHR);
1763 } 1763 }
1764 1764
1765 if (!rc) 1765 if (!rc) {
1766 rc = inode_setattr(inode, attrs); 1766 rc = inode_setattr(inode, attrs);
1767
1768 /* force revalidate when any of these times are set since some
1769 of the fs types (eg ext3, fat) do not have fine enough
1770 time granularity to match protocol, and we do not have a
1771 a way (yet) to query the server fs's time granularity (and
1772 whether it rounds times down).
1773 */
1774 if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
1775 cifsInode->time = 0;
1776 }
1767out: 1777out:
1768 kfree(args); 1778 kfree(args);
1769 kfree(full_path); 1779 kfree(full_path);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f84062f9a985..c343b14ba2d3 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
77 77
78 cFYI(1, ("For %s", name->name)); 78 cFYI(1, ("For %s", name->name));
79 79
80 if (parent->d_op && parent->d_op->d_hash)
81 parent->d_op->d_hash(parent, name);
82 else
83 name->hash = full_name_hash(name->name, name->len);
84
80 dentry = d_lookup(parent, name); 85 dentry = d_lookup(parent, name);
81 if (dentry) { 86 if (dentry) {
82 /* FIXME: check for inode number changes? */ 87 /* FIXME: check for inode number changes? */
@@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
666 min(len, max_len), nlt, 671 min(len, max_len), nlt,
667 cifs_sb->mnt_cifs_flags & 672 cifs_sb->mnt_cifs_flags &
668 CIFS_MOUNT_MAP_SPECIAL_CHR); 673 CIFS_MOUNT_MAP_SPECIAL_CHR);
674 pqst->len -= nls_nullsize(nlt);
669 } else { 675 } else {
670 pqst->name = filename; 676 pqst->name = filename;
671 pqst->len = len; 677 pqst->len = len;
672 } 678 }
673 pqst->hash = full_name_hash(pqst->name, pqst->len);
674/* cFYI(1, ("filldir on %s",pqst->name)); */
675 return rc; 679 return rc;
676} 680}
677 681
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7085a6275c4c..aaa9c1c5a5bd 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
223 /* null user mount */ 223 /* null user mount */
224 *bcc_ptr = 0; 224 *bcc_ptr = 0;
225 *(bcc_ptr+1) = 0; 225 *(bcc_ptr+1) = 0;
226 } else { /* 300 should be long enough for any conceivable user name */ 226 } else {
227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, 227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
228 300, nls_cp); 228 MAX_USERNAME_SIZE, nls_cp);
229 } 229 }
230 bcc_ptr += 2 * bytes_ret; 230 bcc_ptr += 2 * bytes_ret;
231 bcc_ptr += 2; /* account for null termination */ 231 bcc_ptr += 2; /* account for null termination */
@@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
246 /* copy user */ 246 /* copy user */
247 if (ses->userName == NULL) { 247 if (ses->userName == NULL) {
248 /* BB what about null user mounts - check that we do this BB */ 248 /* BB what about null user mounts - check that we do this BB */
249 } else { /* 300 should be long enough for any conceivable user name */ 249 } else {
250 strncpy(bcc_ptr, ses->userName, 300); 250 strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
251 } 251 }
252 /* BB improve check for overflow */ 252 bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
253 bcc_ptr += strnlen(ses->userName, 300);
254 *bcc_ptr = 0; 253 *bcc_ptr = 0;
255 bcc_ptr++; /* account for null termination */ 254 bcc_ptr++; /* account for null termination */
256 255
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c5c45de1a2ee..0ca9ec4a79c3 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -301,6 +301,12 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
301 u32 data; 301 u32 data;
302 void __user *dxferp; 302 void __user *dxferp;
303 int err; 303 int err;
304 int interface_id;
305
306 if (get_user(interface_id, &sgio32->interface_id))
307 return -EFAULT;
308 if (interface_id != 'S')
309 return sys_ioctl(fd, cmd, (unsigned long)sgio32);
304 310
305 if (get_user(iovec_count, &sgio32->iovec_count)) 311 if (get_user(iovec_count, &sgio32->iovec_count))
306 return -EFAULT; 312 return -EFAULT;
@@ -936,6 +942,7 @@ COMPATIBLE_IOCTL(TCSETSF)
936COMPATIBLE_IOCTL(TIOCLINUX) 942COMPATIBLE_IOCTL(TIOCLINUX)
937COMPATIBLE_IOCTL(TIOCSBRK) 943COMPATIBLE_IOCTL(TIOCSBRK)
938COMPATIBLE_IOCTL(TIOCCBRK) 944COMPATIBLE_IOCTL(TIOCCBRK)
945COMPATIBLE_IOCTL(TIOCGSID)
939COMPATIBLE_IOCTL(TIOCGICOUNT) 946COMPATIBLE_IOCTL(TIOCGICOUNT)
940/* Little t */ 947/* Little t */
941COMPATIBLE_IOCTL(TIOCGETD) 948COMPATIBLE_IOCTL(TIOCGETD)
@@ -1038,6 +1045,8 @@ COMPATIBLE_IOCTL(FIOQSIZE)
1038#ifdef CONFIG_BLOCK 1045#ifdef CONFIG_BLOCK
1039/* loop */ 1046/* loop */
1040IGNORE_IOCTL(LOOP_CLR_FD) 1047IGNORE_IOCTL(LOOP_CLR_FD)
1048/* md calls this on random blockdevs */
1049IGNORE_IOCTL(RAID_VERSION)
1041/* SG stuff */ 1050/* SG stuff */
1042COMPATIBLE_IOCTL(SG_SET_TIMEOUT) 1051COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
1043COMPATIBLE_IOCTL(SG_GET_TIMEOUT) 1052COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
diff --git a/fs/exec.c b/fs/exec.c
index 0790a107ff7e..cce6bbdbdbb1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
571 struct vm_area_struct *prev = NULL; 571 struct vm_area_struct *prev = NULL;
572 unsigned long vm_flags; 572 unsigned long vm_flags;
573 unsigned long stack_base; 573 unsigned long stack_base;
574 unsigned long stack_size;
575 unsigned long stack_expand;
576 unsigned long rlim_stack;
574 577
575#ifdef CONFIG_STACK_GROWSUP 578#ifdef CONFIG_STACK_GROWSUP
576 /* Limit stack size to 1GB */ 579 /* Limit stack size to 1GB */
@@ -627,10 +630,23 @@ int setup_arg_pages(struct linux_binprm *bprm,
627 goto out_unlock; 630 goto out_unlock;
628 } 631 }
629 632
633 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
634 stack_size = vma->vm_end - vma->vm_start;
635 /*
636 * Align this down to a page boundary as expand_stack
637 * will align it up.
638 */
639 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
630#ifdef CONFIG_STACK_GROWSUP 640#ifdef CONFIG_STACK_GROWSUP
631 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE; 641 if (stack_size + stack_expand > rlim_stack)
642 stack_base = vma->vm_start + rlim_stack;
643 else
644 stack_base = vma->vm_end + stack_expand;
632#else 645#else
633 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE; 646 if (stack_size + stack_expand > rlim_stack)
647 stack_base = vma->vm_end - rlim_stack;
648 else
649 stack_base = vma->vm_start - stack_expand;
634#endif 650#endif
635 ret = expand_stack(vma, stack_base); 651 ret = expand_stack(vma, stack_base);
636 if (ret) 652 if (ret)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 5ef953e6f908..97e01dc0d95f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -199,9 +199,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
199static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, 199static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
200 int force) 200 int force)
201{ 201{
202 unsigned long flags; 202 write_lock_irq(&filp->f_owner.lock);
203
204 write_lock_irqsave(&filp->f_owner.lock, flags);
205 if (force || !filp->f_owner.pid) { 203 if (force || !filp->f_owner.pid) {
206 put_pid(filp->f_owner.pid); 204 put_pid(filp->f_owner.pid);
207 filp->f_owner.pid = get_pid(pid); 205 filp->f_owner.pid = get_pid(pid);
@@ -213,7 +211,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
213 filp->f_owner.euid = cred->euid; 211 filp->f_owner.euid = cred->euid;
214 } 212 }
215 } 213 }
216 write_unlock_irqrestore(&filp->f_owner.lock, flags); 214 write_unlock_irq(&filp->f_owner.lock);
217} 215}
218 216
219int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, 217int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
diff --git a/fs/file_table.c b/fs/file_table.c
index 69652c5bd5f0..b98404b54383 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -253,6 +253,7 @@ void __fput(struct file *file)
253 if (file->f_op && file->f_op->release) 253 if (file->f_op && file->f_op->release)
254 file->f_op->release(inode, file); 254 file->f_op->release(inode, file);
255 security_file_free(file); 255 security_file_free(file);
256 ima_file_free(file);
256 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) 257 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
257 cdev_put(inode->i_cdev); 258 cdev_put(inode->i_cdev);
258 fops_put(file->f_op); 259 fops_put(file->f_op);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 6d47379e794b..583e823307ae 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
541 *ptr++ = cpu_to_be64(bn++); 541 *ptr++ = cpu_to_be64(bn++);
542 break; 542 break;
543 } 543 }
544 } while (state != ALLOC_DATA); 544 } while ((state != ALLOC_DATA) || !dblock);
545 545
546 ip->i_height = height; 546 ip->i_height = height;
547 gfs2_add_inode_blocks(&ip->i_inode, alloced); 547 gfs2_add_inode_blocks(&ip->i_inode, alloced);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f455a03a09e2..f42663325931 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -769,6 +769,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
769 if (!gl) 769 if (!gl)
770 return -ENOMEM; 770 return -ENOMEM;
771 771
772 atomic_inc(&sdp->sd_glock_disposal);
772 gl->gl_flags = 0; 773 gl->gl_flags = 0;
773 gl->gl_name = name; 774 gl->gl_name = name;
774 atomic_set(&gl->gl_ref, 1); 775 atomic_set(&gl->gl_ref, 1);
@@ -1538,6 +1539,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1538 up_write(&gfs2_umount_flush_sem); 1539 up_write(&gfs2_umount_flush_sem);
1539 msleep(10); 1540 msleep(10);
1540 } 1541 }
1542 flush_workqueue(glock_workqueue);
1543 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1544 gfs2_dump_lockstate(sdp);
1541} 1545}
1542 1546
1543void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 1547void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 13f0bd228132..c0262faf4725 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -123,7 +123,7 @@ struct lm_lockops {
123 int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname); 123 int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
124 void (*lm_unmount) (struct gfs2_sbd *sdp); 124 void (*lm_unmount) (struct gfs2_sbd *sdp);
125 void (*lm_withdraw) (struct gfs2_sbd *sdp); 125 void (*lm_withdraw) (struct gfs2_sbd *sdp);
126 void (*lm_put_lock) (struct kmem_cache *cachep, void *gl); 126 void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl);
127 unsigned int (*lm_lock) (struct gfs2_glock *gl, 127 unsigned int (*lm_lock) (struct gfs2_glock *gl,
128 unsigned int req_state, unsigned int flags); 128 unsigned int req_state, unsigned int flags);
129 void (*lm_cancel) (struct gfs2_glock *gl); 129 void (*lm_cancel) (struct gfs2_glock *gl);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 4792200978c8..bc0ad158e6b4 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -544,6 +544,8 @@ struct gfs2_sbd {
544 struct gfs2_holder sd_live_gh; 544 struct gfs2_holder sd_live_gh;
545 struct gfs2_glock *sd_rename_gl; 545 struct gfs2_glock *sd_rename_gl;
546 struct gfs2_glock *sd_trans_gl; 546 struct gfs2_glock *sd_trans_gl;
547 wait_queue_head_t sd_glock_wait;
548 atomic_t sd_glock_disposal;
547 549
548 /* Inode Stuff */ 550 /* Inode Stuff */
549 551
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 46df988323bc..0e5e0e7022e5 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -21,6 +21,7 @@ static void gdlm_ast(void *arg)
21{ 21{
22 struct gfs2_glock *gl = arg; 22 struct gfs2_glock *gl = arg;
23 unsigned ret = gl->gl_state; 23 unsigned ret = gl->gl_state;
24 struct gfs2_sbd *sdp = gl->gl_sbd;
24 25
25 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); 26 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
26 27
@@ -30,6 +31,8 @@ static void gdlm_ast(void *arg)
30 switch (gl->gl_lksb.sb_status) { 31 switch (gl->gl_lksb.sb_status) {
31 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ 32 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
32 kmem_cache_free(gfs2_glock_cachep, gl); 33 kmem_cache_free(gfs2_glock_cachep, gl);
34 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
35 wake_up(&sdp->sd_glock_wait);
33 return; 36 return;
34 case -DLM_ECANCEL: /* Cancel while getting lock */ 37 case -DLM_ECANCEL: /* Cancel while getting lock */
35 ret |= LM_OUT_CANCELED; 38 ret |= LM_OUT_CANCELED;
@@ -164,14 +167,16 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl,
164 return LM_OUT_ASYNC; 167 return LM_OUT_ASYNC;
165} 168}
166 169
167static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr) 170static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
168{ 171{
169 struct gfs2_glock *gl = ptr; 172 struct gfs2_sbd *sdp = gl->gl_sbd;
170 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 173 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
171 int error; 174 int error;
172 175
173 if (gl->gl_lksb.sb_lkid == 0) { 176 if (gl->gl_lksb.sb_lkid == 0) {
174 kmem_cache_free(cachep, gl); 177 kmem_cache_free(cachep, gl);
178 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
179 wake_up(&sdp->sd_glock_wait);
175 return; 180 return;
176 } 181 }
177 182
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index edfee24f3636..a86ed6381566 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -82,6 +82,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
82 82
83 gfs2_tune_init(&sdp->sd_tune); 83 gfs2_tune_init(&sdp->sd_tune);
84 84
85 init_waitqueue_head(&sdp->sd_glock_wait);
86 atomic_set(&sdp->sd_glock_disposal, 0);
85 spin_lock_init(&sdp->sd_statfs_spin); 87 spin_lock_init(&sdp->sd_statfs_spin);
86 88
87 spin_lock_init(&sdp->sd_rindex_spin); 89 spin_lock_init(&sdp->sd_rindex_spin);
@@ -723,7 +725,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
723 goto fail; 725 goto fail;
724 } 726 }
725 727
726 error = -EINVAL; 728 error = -EUSERS;
727 if (!gfs2_jindex_size(sdp)) { 729 if (!gfs2_jindex_size(sdp)) {
728 fs_err(sdp, "no journals!\n"); 730 fs_err(sdp, "no journals!\n");
729 goto fail_jindex; 731 goto fail_jindex;
@@ -983,9 +985,17 @@ static const match_table_t nolock_tokens = {
983 { Opt_err, NULL }, 985 { Opt_err, NULL },
984}; 986};
985 987
988static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
989{
990 struct gfs2_sbd *sdp = gl->gl_sbd;
991 kmem_cache_free(cachep, gl);
992 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
993 wake_up(&sdp->sd_glock_wait);
994}
995
986static const struct lm_lockops nolock_ops = { 996static const struct lm_lockops nolock_ops = {
987 .lm_proto_name = "lock_nolock", 997 .lm_proto_name = "lock_nolock",
988 .lm_put_lock = kmem_cache_free, 998 .lm_put_lock = nolock_put_lock,
989 .lm_tokens = &nolock_tokens, 999 .lm_tokens = &nolock_tokens,
990}; 1000};
991 1001
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c282ad41f3d1..b9dd3da22c0a 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -21,6 +21,7 @@
21#include <linux/gfs2_ondisk.h> 21#include <linux/gfs2_ondisk.h>
22#include <linux/crc32.h> 22#include <linux/crc32.h>
23#include <linux/time.h> 23#include <linux/time.h>
24#include <linux/wait.h>
24 25
25#include "gfs2.h" 26#include "gfs2.h"
26#include "incore.h" 27#include "incore.h"
diff --git a/fs/namei.c b/fs/namei.c
index 94a5e60779f9..a4855af776a8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -823,6 +823,17 @@ fail:
823} 823}
824 824
825/* 825/*
826 * This is a temporary kludge to deal with "automount" symlinks; proper
827 * solution is to trigger them on follow_mount(), so that do_lookup()
828 * would DTRT. To be killed before 2.6.34-final.
829 */
830static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
831{
832 return inode && unlikely(inode->i_op->follow_link) &&
833 ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
834}
835
836/*
826 * Name resolution. 837 * Name resolution.
827 * This is the basic name resolution function, turning a pathname into 838 * This is the basic name resolution function, turning a pathname into
828 * the final dentry. We expect 'base' to be positive and a directory. 839 * the final dentry. We expect 'base' to be positive and a directory.
@@ -942,8 +953,7 @@ last_component:
942 if (err) 953 if (err)
943 break; 954 break;
944 inode = next.dentry->d_inode; 955 inode = next.dentry->d_inode;
945 if ((lookup_flags & LOOKUP_FOLLOW) 956 if (follow_on_final(inode, lookup_flags)) {
946 && inode && inode->i_op->follow_link) {
947 err = do_follow_link(&next, nd); 957 err = do_follow_link(&next, nd);
948 if (err) 958 if (err)
949 goto return_err; 959 goto return_err;
@@ -1736,8 +1746,7 @@ do_last:
1736 if (nd.root.mnt) 1746 if (nd.root.mnt)
1737 path_put(&nd.root); 1747 path_put(&nd.root);
1738 if (!IS_ERR(filp)) { 1748 if (!IS_ERR(filp)) {
1739 error = ima_path_check(&filp->f_path, filp->f_mode & 1749 error = ima_file_check(filp, acc_mode);
1740 (MAY_READ | MAY_WRITE | MAY_EXEC));
1741 if (error) { 1750 if (error) {
1742 fput(filp); 1751 fput(filp);
1743 filp = ERR_PTR(error); 1752 filp = ERR_PTR(error);
@@ -1797,8 +1806,7 @@ ok:
1797 } 1806 }
1798 filp = nameidata_to_filp(&nd); 1807 filp = nameidata_to_filp(&nd);
1799 if (!IS_ERR(filp)) { 1808 if (!IS_ERR(filp)) {
1800 error = ima_path_check(&filp->f_path, filp->f_mode & 1809 error = ima_file_check(filp, acc_mode);
1801 (MAY_READ | MAY_WRITE | MAY_EXEC));
1802 if (error) { 1810 if (error) {
1803 fput(filp); 1811 fput(filp);
1804 filp = ERR_PTR(error); 1812 filp = ERR_PTR(error);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e1d415e97849..0d289823e856 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -342,6 +342,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
342 data->res.fattr = &data->fattr; 342 data->res.fattr = &data->fattr;
343 data->res.eof = 0; 343 data->res.eof = 0;
344 data->res.count = bytes; 344 data->res.count = bytes;
345 nfs_fattr_init(&data->fattr);
345 msg.rpc_argp = &data->args; 346 msg.rpc_argp = &data->args;
346 msg.rpc_resp = &data->res; 347 msg.rpc_resp = &data->res;
347 348
@@ -575,6 +576,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
575 data->res.count = 0; 576 data->res.count = 0;
576 data->res.fattr = &data->fattr; 577 data->res.fattr = &data->fattr;
577 data->res.verf = &data->verf; 578 data->res.verf = &data->verf;
579 nfs_fattr_init(&data->fattr);
578 580
579 NFS_PROTO(data->inode)->commit_setup(data, &msg); 581 NFS_PROTO(data->inode)->commit_setup(data, &msg);
580 582
@@ -766,6 +768,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
766 data->res.fattr = &data->fattr; 768 data->res.fattr = &data->fattr;
767 data->res.count = bytes; 769 data->res.count = bytes;
768 data->res.verf = &data->verf; 770 data->res.verf = &data->verf;
771 nfs_fattr_init(&data->fattr);
769 772
770 task_setup_data.task = &data->task; 773 task_setup_data.task = &data->task;
771 task_setup_data.callback_data = data; 774 task_setup_data.callback_data = data;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6b891328f332..63f2071d6445 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -486,6 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
486{ 486{
487 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); 487 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
488 488
489 if (gfp & __GFP_WAIT)
490 nfs_wb_page(page->mapping->host, page);
489 /* If PagePrivate() is set, then the page is not freeable */ 491 /* If PagePrivate() is set, then the page is not freeable */
490 if (PagePrivate(page)) 492 if (PagePrivate(page))
491 return 0; 493 return 0;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index fa588006588d..237874f1af23 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
354 */ 354 */
355int nfs_fscache_release_page(struct page *page, gfp_t gfp) 355int nfs_fscache_release_page(struct page *page, gfp_t gfp)
356{ 356{
357 struct nfs_inode *nfsi = NFS_I(page->mapping->host);
358 struct fscache_cookie *cookie = nfsi->fscache;
359
360 BUG_ON(!cookie);
361
362 if (PageFsCache(page)) { 357 if (PageFsCache(page)) {
358 struct nfs_inode *nfsi = NFS_I(page->mapping->host);
359 struct fscache_cookie *cookie = nfsi->fscache;
360
361 BUG_ON(!cookie);
363 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", 362 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
364 cookie, page, nfsi); 363 cookie, page, nfsi);
365 364
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index faa091865ad0..f141bde7756a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1261,8 +1261,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1261 1261
1262 if (fattr->valid & NFS_ATTR_FATTR_MODE) { 1262 if (fattr->valid & NFS_ATTR_FATTR_MODE) {
1263 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) { 1263 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
1264 umode_t newmode = inode->i_mode & S_IFMT;
1265 newmode |= fattr->mode & S_IALLUGO;
1266 inode->i_mode = newmode;
1264 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 1267 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
1265 inode->i_mode = fattr->mode;
1266 } 1268 }
1267 } else if (server->caps & NFS_CAP_MODE) 1269 } else if (server->caps & NFS_CAP_MODE)
1268 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR 1270 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 0adefc40cc89..59047f8d7d72 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -120,7 +120,7 @@ static struct {
120 { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, 120 { .status = MNT3ERR_INVAL, .errno = -EINVAL, },
121 { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, 121 { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
122 { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, 122 { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
123 { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, }, 123 { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, },
124}; 124};
125 125
126struct mountres { 126struct mountres {
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5e078b222b4e..7bc2da8efd4a 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -699,7 +699,7 @@ static struct {
699 { NFSERR_BAD_COOKIE, -EBADCOOKIE }, 699 { NFSERR_BAD_COOKIE, -EBADCOOKIE },
700 { NFSERR_NOTSUPP, -ENOTSUPP }, 700 { NFSERR_NOTSUPP, -ENOTSUPP },
701 { NFSERR_TOOSMALL, -ETOOSMALL }, 701 { NFSERR_TOOSMALL, -ETOOSMALL },
702 { NFSERR_SERVERFAULT, -ESERVERFAULT }, 702 { NFSERR_SERVERFAULT, -EREMOTEIO },
703 { NFSERR_BADTYPE, -EBADTYPE }, 703 { NFSERR_BADTYPE, -EBADTYPE },
704 { NFSERR_JUKEBOX, -EJUKEBOX }, 704 { NFSERR_JUKEBOX, -EJUKEBOX },
705 { -1, -EIO } 705 { -1, -EIO }
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 865265bdca03..0c6fda33d66e 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -146,6 +146,7 @@ enum {
146 NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ 146 NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
147 NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */ 147 NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
148 NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ 148 NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
149 NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
149}; 150};
150 151
151struct nfs4_state { 152struct nfs4_state {
@@ -277,6 +278,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
277extern void nfs4_schedule_state_recovery(struct nfs_client *); 278extern void nfs4_schedule_state_recovery(struct nfs_client *);
278extern void nfs4_schedule_state_manager(struct nfs_client *); 279extern void nfs4_schedule_state_manager(struct nfs_client *);
279extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); 280extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
281extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
280extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 282extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
281extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 283extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
282extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 284extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 198d51d17c13..375f0fae2c6a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -249,19 +249,15 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
249 if (state == NULL) 249 if (state == NULL)
250 break; 250 break;
251 nfs4_state_mark_reclaim_nograce(clp, state); 251 nfs4_state_mark_reclaim_nograce(clp, state);
252 case -NFS4ERR_STALE_CLIENTID: 252 goto do_state_recovery;
253 case -NFS4ERR_STALE_STATEID: 253 case -NFS4ERR_STALE_STATEID:
254 case -NFS4ERR_EXPIRED: 254 if (state == NULL)
255 nfs4_schedule_state_recovery(clp);
256 ret = nfs4_wait_clnt_recover(clp);
257 if (ret == 0)
258 exception->retry = 1;
259#if !defined(CONFIG_NFS_V4_1)
260 break;
261#else /* !defined(CONFIG_NFS_V4_1) */
262 if (!nfs4_has_session(server->nfs_client))
263 break; 255 break;
264 /* FALLTHROUGH */ 256 nfs4_state_mark_reclaim_reboot(clp, state);
257 case -NFS4ERR_STALE_CLIENTID:
258 case -NFS4ERR_EXPIRED:
259 goto do_state_recovery;
260#if defined(CONFIG_NFS_V4_1)
265 case -NFS4ERR_BADSESSION: 261 case -NFS4ERR_BADSESSION:
266 case -NFS4ERR_BADSLOT: 262 case -NFS4ERR_BADSLOT:
267 case -NFS4ERR_BAD_HIGH_SLOT: 263 case -NFS4ERR_BAD_HIGH_SLOT:
@@ -274,7 +270,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
274 nfs4_schedule_state_recovery(clp); 270 nfs4_schedule_state_recovery(clp);
275 exception->retry = 1; 271 exception->retry = 1;
276 break; 272 break;
277#endif /* !defined(CONFIG_NFS_V4_1) */ 273#endif /* defined(CONFIG_NFS_V4_1) */
278 case -NFS4ERR_FILE_OPEN: 274 case -NFS4ERR_FILE_OPEN:
279 if (exception->timeout > HZ) { 275 if (exception->timeout > HZ) {
280 /* We have retried a decent amount, time to 276 /* We have retried a decent amount, time to
@@ -293,6 +289,12 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
293 } 289 }
294 /* We failed to handle the error */ 290 /* We failed to handle the error */
295 return nfs4_map_errors(ret); 291 return nfs4_map_errors(ret);
292do_state_recovery:
293 nfs4_schedule_state_recovery(clp);
294 ret = nfs4_wait_clnt_recover(clp);
295 if (ret == 0)
296 exception->retry = 1;
297 return ret;
296} 298}
297 299
298 300
@@ -1658,6 +1660,8 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
1658 status = PTR_ERR(state); 1660 status = PTR_ERR(state);
1659 if (IS_ERR(state)) 1661 if (IS_ERR(state))
1660 goto err_opendata_put; 1662 goto err_opendata_put;
1663 if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
1664 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1661 nfs4_opendata_put(opendata); 1665 nfs4_opendata_put(opendata);
1662 nfs4_put_state_owner(sp); 1666 nfs4_put_state_owner(sp);
1663 *res = state; 1667 *res = state;
@@ -3422,15 +3426,14 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3422 if (state == NULL) 3426 if (state == NULL)
3423 break; 3427 break;
3424 nfs4_state_mark_reclaim_nograce(clp, state); 3428 nfs4_state_mark_reclaim_nograce(clp, state);
3425 case -NFS4ERR_STALE_CLIENTID: 3429 goto do_state_recovery;
3426 case -NFS4ERR_STALE_STATEID: 3430 case -NFS4ERR_STALE_STATEID:
3431 if (state == NULL)
3432 break;
3433 nfs4_state_mark_reclaim_reboot(clp, state);
3434 case -NFS4ERR_STALE_CLIENTID:
3427 case -NFS4ERR_EXPIRED: 3435 case -NFS4ERR_EXPIRED:
3428 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 3436 goto do_state_recovery;
3429 nfs4_schedule_state_recovery(clp);
3430 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3431 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3432 task->tk_status = 0;
3433 return -EAGAIN;
3434#if defined(CONFIG_NFS_V4_1) 3437#if defined(CONFIG_NFS_V4_1)
3435 case -NFS4ERR_BADSESSION: 3438 case -NFS4ERR_BADSESSION:
3436 case -NFS4ERR_BADSLOT: 3439 case -NFS4ERR_BADSLOT:
@@ -3458,6 +3461,13 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3458 } 3461 }
3459 task->tk_status = nfs4_map_errors(task->tk_status); 3462 task->tk_status = nfs4_map_errors(task->tk_status);
3460 return 0; 3463 return 0;
3464do_state_recovery:
3465 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3466 nfs4_schedule_state_recovery(clp);
3467 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3468 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3469 task->tk_status = 0;
3470 return -EAGAIN;
3461} 3471}
3462 3472
3463static int 3473static int
@@ -4088,6 +4098,28 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = {
4088 .rpc_release = nfs4_lock_release, 4098 .rpc_release = nfs4_lock_release,
4089}; 4099};
4090 4100
4101static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4102{
4103 struct nfs_client *clp = server->nfs_client;
4104 struct nfs4_state *state = lsp->ls_state;
4105
4106 switch (error) {
4107 case -NFS4ERR_ADMIN_REVOKED:
4108 case -NFS4ERR_BAD_STATEID:
4109 case -NFS4ERR_EXPIRED:
4110 if (new_lock_owner != 0 ||
4111 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4112 nfs4_state_mark_reclaim_nograce(clp, state);
4113 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4114 break;
4115 case -NFS4ERR_STALE_STATEID:
4116 if (new_lock_owner != 0 ||
4117 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4118 nfs4_state_mark_reclaim_reboot(clp, state);
4119 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4120 };
4121}
4122
4091static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 4123static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4092{ 4124{
4093 struct nfs4_lockdata *data; 4125 struct nfs4_lockdata *data;
@@ -4126,6 +4158,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
4126 ret = nfs4_wait_for_completion_rpc_task(task); 4158 ret = nfs4_wait_for_completion_rpc_task(task);
4127 if (ret == 0) { 4159 if (ret == 0) {
4128 ret = data->rpc_status; 4160 ret = data->rpc_status;
4161 if (ret)
4162 nfs4_handle_setlk_error(data->server, data->lsp,
4163 data->arg.new_lock_owner, ret);
4129 } else 4164 } else
4130 data->cancelled = 1; 4165 data->cancelled = 1;
4131 rpc_put_task(task); 4166 rpc_put_task(task);
@@ -4181,8 +4216,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
4181{ 4216{
4182 struct nfs_inode *nfsi = NFS_I(state->inode); 4217 struct nfs_inode *nfsi = NFS_I(state->inode);
4183 unsigned char fl_flags = request->fl_flags; 4218 unsigned char fl_flags = request->fl_flags;
4184 int status; 4219 int status = -ENOLCK;
4185 4220
4221 if ((fl_flags & FL_POSIX) &&
4222 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4223 goto out;
4186 /* Is this a delegated open? */ 4224 /* Is this a delegated open? */
4187 status = nfs4_set_lock_state(state, request); 4225 status = nfs4_set_lock_state(state, request);
4188 if (status != 0) 4226 if (status != 0)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6d263ed79e92..c1e2733f4fa4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -901,7 +901,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
901 nfs4_schedule_state_manager(clp); 901 nfs4_schedule_state_manager(clp);
902} 902}
903 903
904static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 904int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
905{ 905{
906 906
907 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 907 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e437fd6a819f..5cd5184b56db 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4631,7 +4631,7 @@ static int decode_sequence(struct xdr_stream *xdr,
4631 * If the server returns different values for sessionID, slotID or 4631 * If the server returns different values for sessionID, slotID or
4632 * sequence number, the server is looney tunes. 4632 * sequence number, the server is looney tunes.
4633 */ 4633 */
4634 status = -ESERVERFAULT; 4634 status = -EREMOTEIO;
4635 4635
4636 if (memcmp(id.data, res->sr_session->sess_id.data, 4636 if (memcmp(id.data, res->sr_session->sess_id.data,
4637 NFS4_MAX_SESSIONID_LEN)) { 4637 NFS4_MAX_SESSIONID_LEN)) {
@@ -5774,7 +5774,7 @@ static struct {
5774 { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, 5774 { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
5775 { NFS4ERR_NOTSUPP, -ENOTSUPP }, 5775 { NFS4ERR_NOTSUPP, -ENOTSUPP },
5776 { NFS4ERR_TOOSMALL, -ETOOSMALL }, 5776 { NFS4ERR_TOOSMALL, -ETOOSMALL },
5777 { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, 5777 { NFS4ERR_SERVERFAULT, -EREMOTEIO },
5778 { NFS4ERR_BADTYPE, -EBADTYPE }, 5778 { NFS4ERR_BADTYPE, -EBADTYPE },
5779 { NFS4ERR_LOCKED, -EAGAIN }, 5779 { NFS4ERR_LOCKED, -EAGAIN },
5780 { NFS4ERR_SYMLINK, -ELOOP }, 5780 { NFS4ERR_SYMLINK, -ELOOP },
@@ -5801,7 +5801,7 @@ nfs4_stat_to_errno(int stat)
5801 } 5801 }
5802 if (stat <= 10000 || stat > 10100) { 5802 if (stat <= 10000 || stat > 10100) {
5803 /* The server is looney tunes. */ 5803 /* The server is looney tunes. */
5804 return -ESERVERFAULT; 5804 return -EREMOTEIO;
5805 } 5805 }
5806 /* If we cannot translate the error, the recovery routines should 5806 /* If we cannot translate the error, the recovery routines should
5807 * handle it. 5807 * handle it.
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e2975939126a..a12c45b65dd4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -176,6 +176,12 @@ void nfs_release_request(struct nfs_page *req)
176 kref_put(&req->wb_kref, nfs_free_request); 176 kref_put(&req->wb_kref, nfs_free_request);
177} 177}
178 178
179static int nfs_wait_bit_uninterruptible(void *word)
180{
181 io_schedule();
182 return 0;
183}
184
179/** 185/**
180 * nfs_wait_on_request - Wait for a request to complete. 186 * nfs_wait_on_request - Wait for a request to complete.
181 * @req: request to wait upon. 187 * @req: request to wait upon.
@@ -186,14 +192,9 @@ void nfs_release_request(struct nfs_page *req)
186int 192int
187nfs_wait_on_request(struct nfs_page *req) 193nfs_wait_on_request(struct nfs_page *req)
188{ 194{
189 int ret = 0; 195 return wait_on_bit(&req->wb_flags, PG_BUSY,
190 196 nfs_wait_bit_uninterruptible,
191 if (!test_bit(PG_BUSY, &req->wb_flags)) 197 TASK_UNINTERRUPTIBLE);
192 goto out;
193 ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
194 nfs_wait_bit_killable, TASK_KILLABLE);
195out:
196 return ret;
197} 198}
198 199
199/** 200/**
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ce907efc5508..f1afee4eea77 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -243,6 +243,7 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *);
243static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *); 243static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *);
244static int nfs_xdev_get_sb(struct file_system_type *fs_type, 244static int nfs_xdev_get_sb(struct file_system_type *fs_type,
245 int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt); 245 int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
246static void nfs_put_super(struct super_block *);
246static void nfs_kill_super(struct super_block *); 247static void nfs_kill_super(struct super_block *);
247static int nfs_remount(struct super_block *sb, int *flags, char *raw_data); 248static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
248 249
@@ -266,6 +267,7 @@ static const struct super_operations nfs_sops = {
266 .alloc_inode = nfs_alloc_inode, 267 .alloc_inode = nfs_alloc_inode,
267 .destroy_inode = nfs_destroy_inode, 268 .destroy_inode = nfs_destroy_inode,
268 .write_inode = nfs_write_inode, 269 .write_inode = nfs_write_inode,
270 .put_super = nfs_put_super,
269 .statfs = nfs_statfs, 271 .statfs = nfs_statfs,
270 .clear_inode = nfs_clear_inode, 272 .clear_inode = nfs_clear_inode,
271 .umount_begin = nfs_umount_begin, 273 .umount_begin = nfs_umount_begin,
@@ -335,6 +337,7 @@ static const struct super_operations nfs4_sops = {
335 .alloc_inode = nfs_alloc_inode, 337 .alloc_inode = nfs_alloc_inode,
336 .destroy_inode = nfs_destroy_inode, 338 .destroy_inode = nfs_destroy_inode,
337 .write_inode = nfs_write_inode, 339 .write_inode = nfs_write_inode,
340 .put_super = nfs_put_super,
338 .statfs = nfs_statfs, 341 .statfs = nfs_statfs,
339 .clear_inode = nfs4_clear_inode, 342 .clear_inode = nfs4_clear_inode,
340 .umount_begin = nfs_umount_begin, 343 .umount_begin = nfs_umount_begin,
@@ -2258,6 +2261,17 @@ error_splat_super:
2258} 2261}
2259 2262
2260/* 2263/*
2264 * Ensure that we unregister the bdi before kill_anon_super
2265 * releases the device name
2266 */
2267static void nfs_put_super(struct super_block *s)
2268{
2269 struct nfs_server *server = NFS_SB(s);
2270
2271 bdi_unregister(&server->backing_dev_info);
2272}
2273
2274/*
2261 * Destroy an NFS2/3 superblock 2275 * Destroy an NFS2/3 superblock
2262 */ 2276 */
2263static void nfs_kill_super(struct super_block *s) 2277static void nfs_kill_super(struct super_block *s)
@@ -2265,7 +2279,6 @@ static void nfs_kill_super(struct super_block *s)
2265 struct nfs_server *server = NFS_SB(s); 2279 struct nfs_server *server = NFS_SB(s);
2266 2280
2267 kill_anon_super(s); 2281 kill_anon_super(s);
2268 bdi_unregister(&server->backing_dev_info);
2269 nfs_fscache_release_super_cookie(s); 2282 nfs_fscache_release_super_cookie(s);
2270 nfs_free_server(server); 2283 nfs_free_server(server);
2271} 2284}
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index 70e1fbbaaeab..ad4d2e787b20 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -15,8 +15,10 @@
15 15
16#include "callback.h" 16#include "callback.h"
17 17
18#ifdef CONFIG_NFS_V4
18static const int nfs_set_port_min = 0; 19static const int nfs_set_port_min = 0;
19static const int nfs_set_port_max = 65535; 20static const int nfs_set_port_max = 65535;
21#endif
20static struct ctl_table_header *nfs_callback_sysctl_table; 22static struct ctl_table_header *nfs_callback_sysctl_table;
21 23
22static ctl_table nfs_cb_sysctls[] = { 24static ctl_table nfs_cb_sysctls[] = {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d171696017f4..d63d964a0392 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1233,7 +1233,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1233 1233
1234 1234
1235#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1235#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1236void nfs_commitdata_release(void *data) 1236static void nfs_commitdata_release(void *data)
1237{ 1237{
1238 struct nfs_write_data *wdata = data; 1238 struct nfs_write_data *wdata = data;
1239 1239
@@ -1541,6 +1541,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1541 break; 1541 break;
1542 } 1542 }
1543 ret = nfs_wait_on_request(req); 1543 ret = nfs_wait_on_request(req);
1544 nfs_release_request(req);
1544 if (ret < 0) 1545 if (ret < 0)
1545 goto out; 1546 goto out;
1546 } 1547 }
@@ -1597,8 +1598,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1597 struct nfs_page *req; 1598 struct nfs_page *req;
1598 int ret; 1599 int ret;
1599 1600
1600 if (PageFsCache(page)) 1601 nfs_fscache_release_page(page, GFP_KERNEL);
1601 nfs_fscache_release_page(page, GFP_KERNEL);
1602 1602
1603 req = nfs_find_and_lock_request(page); 1603 req = nfs_find_and_lock_request(page);
1604 ret = PTR_ERR(req); 1604 ret = PTR_ERR(req);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c487810a2366..a0c4016413f1 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
1316 1316
1317static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp) 1317static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
1318{ 1318{
1319 struct svc_export *exp;
1320 u32 fsidv[2]; 1319 u32 fsidv[2];
1321 1320
1322 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); 1321 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
1323 1322
1324 exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); 1323 return rqst_exp_find(rqstp, FSID_NUM, fsidv);
1325 /*
1326 * We shouldn't have accepting an nfsv4 request at all if we
1327 * don't have a pseudoexport!:
1328 */
1329 if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
1330 exp = ERR_PTR(-ESERVERFAULT);
1331 return exp;
1332} 1324}
1333 1325
1334/* 1326/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c194793b642b..8715d194561a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -752,6 +752,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
752 flags, current_cred()); 752 flags, current_cred());
753 if (IS_ERR(*filp)) 753 if (IS_ERR(*filp))
754 host_err = PTR_ERR(*filp); 754 host_err = PTR_ERR(*filp);
755 else
756 host_err = ima_file_check(*filp, access);
755out_nfserr: 757out_nfserr:
756 err = nfserrno(host_err); 758 err = nfserrno(host_err);
757out: 759out:
@@ -2127,7 +2129,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
2127 */ 2129 */
2128 path.mnt = exp->ex_path.mnt; 2130 path.mnt = exp->ex_path.mnt;
2129 path.dentry = dentry; 2131 path.dentry = dentry;
2130 err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
2131nfsd_out: 2132nfsd_out:
2132 return err? nfserrno(err) : 0; 2133 return err? nfserrno(err) : 0;
2133} 2134}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 17584c524486..105b508b47a8 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2829,7 +2829,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2829 || sci->sc_seq_request != sci->sc_seq_done); 2829 || sci->sc_seq_request != sci->sc_seq_done);
2830 spin_unlock(&sci->sc_state_lock); 2830 spin_unlock(&sci->sc_state_lock);
2831 2831
2832 if (flag || nilfs_segctor_confirm(sci)) 2832 if (flag || !nilfs_segctor_confirm(sci))
2833 nilfs_segctor_write_out(sci); 2833 nilfs_segctor_write_out(sci);
2834 2834
2835 WARN_ON(!list_empty(&sci->sc_copied_buffers)); 2835 WARN_ON(!list_empty(&sci->sc_copied_buffers));
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 3dae4a13f6e4..7e9df11260f4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -599,7 +599,7 @@ bail:
599 return ret; 599 return ret;
600} 600}
601 601
602/* 602/*
603 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're 603 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
604 * particularly interested in the aio/dio case. Like the core uses 604 * particularly interested in the aio/dio case. Like the core uses
605 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from 605 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
670 670
671 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, 671 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
672 inode->i_sb->s_bdev, iov, offset, 672 inode->i_sb->s_bdev, iov, offset,
673 nr_segs, 673 nr_segs,
674 ocfs2_direct_IO_get_blocks, 674 ocfs2_direct_IO_get_blocks,
675 ocfs2_dio_end_io); 675 ocfs2_dio_end_io);
676 676
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d43d34a1dd31..21c808f752d8 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
368 } 368 }
369 ocfs2_metadata_cache_io_unlock(ci); 369 ocfs2_metadata_cache_io_unlock(ci);
370 370
371 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", 371 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
372 (unsigned long long)block, nr, 372 (unsigned long long)block, nr,
373 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", 373 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
374 flags); 374 flags);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index eda5b8bcddd5..5c9890006708 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
78 78
79unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; 79unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
80 80
81/* Only sets a new threshold if there are no active regions. 81/* Only sets a new threshold if there are no active regions.
82 * 82 *
83 * No locking or otherwise interesting code is required for reading 83 * No locking or otherwise interesting code is required for reading
84 * o2hb_dead_threshold as it can't change once regions are active and 84 * o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
170 170
171 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " 171 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
172 "milliseconds\n", reg->hr_dev_name, 172 "milliseconds\n", reg->hr_dev_name,
173 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); 173 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
174 o2quo_disk_timeout(); 174 o2quo_disk_timeout();
175} 175}
176 176
@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
624 "seq %llu last %llu changed %u equal %u\n", 624 "seq %llu last %llu changed %u equal %u\n",
625 slot->ds_node_num, (long long)slot->ds_last_generation, 625 slot->ds_node_num, (long long)slot->ds_last_generation,
626 le32_to_cpu(hb_block->hb_cksum), 626 le32_to_cpu(hb_block->hb_cksum),
627 (unsigned long long)le64_to_cpu(hb_block->hb_seq), 627 (unsigned long long)le64_to_cpu(hb_block->hb_seq),
628 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, 628 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
629 slot->ds_equal_samples); 629 slot->ds_equal_samples);
630 630
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 334f231a422c..d8d0c65ac03c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
485 } 485 }
486 486
487 if (was_valid && !valid) { 487 if (was_valid && !valid) {
488 printk(KERN_INFO "o2net: no longer connected to " 488 printk(KERN_NOTICE "o2net: no longer connected to "
489 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); 489 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
490 o2net_complete_nodes_nsw(nn); 490 o2net_complete_nodes_nsw(nn);
491 } 491 }
@@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
493 if (!was_valid && valid) { 493 if (!was_valid && valid) {
494 o2quo_conn_up(o2net_num_from_nn(nn)); 494 o2quo_conn_up(o2net_num_from_nn(nn));
495 cancel_delayed_work(&nn->nn_connect_expired); 495 cancel_delayed_work(&nn->nn_connect_expired);
496 printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", 496 printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
497 o2nm_this_node() > sc->sc_node->nd_num ? 497 o2nm_this_node() > sc->sc_node->nd_num ?
498 "connected to" : "accepted connection from", 498 "connected to" : "accepted connection from",
499 SC_NODEF_ARGS(sc)); 499 SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
930 cond_resched(); 930 cond_resched();
931 continue; 931 continue;
932 } 932 }
933 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT 933 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
934 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); 934 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
935 o2net_ensure_shutdown(nn, sc, 0); 935 o2net_ensure_shutdown(nn, sc, 0);
936 break; 936 break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
1476 1476
1477 do_gettimeofday(&now); 1477 do_gettimeofday(&now);
1478 1478
1479 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " 1479 printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
1480 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), 1480 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
1481 o2net_idle_timeout() / 1000, 1481 o2net_idle_timeout() / 1000,
1482 o2net_idle_timeout() % 1000); 1482 o2net_idle_timeout() % 1000);
1483 mlog(ML_NOTICE, "here are some times that might help debug the " 1483 mlog(ML_NOTICE, "here are some times that might help debug the "
1484 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " 1484 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
1485 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", 1485 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
1486 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, 1486 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
1487 now.tv_sec, (long) now.tv_usec, 1487 now.tv_sec, (long) now.tv_usec,
1488 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, 1488 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
1489 sc->sc_tv_advance_start.tv_sec, 1489 sc->sc_tv_advance_start.tv_sec,
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 8d58cfe410b1..96fa7ebc530c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -32,10 +32,10 @@
32 * on their number */ 32 * on their number */
33#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) 33#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
34 34
35/* 35/*
36 * This version number represents quite a lot, unfortunately. It not 36 * This version number represents quite a lot, unfortunately. It not
37 * only represents the raw network message protocol on the wire but also 37 * only represents the raw network message protocol on the wire but also
38 * locking semantics of the file system using the protocol. It should 38 * locking semantics of the file system using the protocol. It should
39 * be somewhere else, I'm sure, but right now it isn't. 39 * be somewhere else, I'm sure, but right now it isn't.
40 * 40 *
41 * With version 11, we separate out the filesystem locking portion. The 41 * With version 11, we separate out the filesystem locking portion. The
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index b5786a787fab..3cfa114aa391 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
95 mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ 95 mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
96} while (0) 96} while (0)
97 97
98#define DLM_LKSB_UNUSED1 0x01 98#define DLM_LKSB_UNUSED1 0x01
99#define DLM_LKSB_PUT_LVB 0x02 99#define DLM_LKSB_PUT_LVB 0x02
100#define DLM_LKSB_GET_LVB 0x04 100#define DLM_LKSB_GET_LVB 0x04
101#define DLM_LKSB_UNUSED2 0x08 101#define DLM_LKSB_UNUSED2 0x08
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 01cf8cc3d286..dccc439fa087 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
123 dlm_lock_put(lock); 123 dlm_lock_put(lock);
124 /* free up the reserved bast that we are cancelling. 124 /* free up the reserved bast that we are cancelling.
125 * guaranteed that this will not be the last reserved 125 * guaranteed that this will not be the last reserved
126 * ast because *both* an ast and a bast were reserved 126 * ast because *both* an ast and a bast were reserved
127 * to get to this point. the res->spinlock will not be 127 * to get to this point. the res->spinlock will not be
128 * taken here */ 128 * taken here */
129 dlm_lockres_release_ast(dlm, res); 129 dlm_lockres_release_ast(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index ca96bce50e18..f283bce776b4 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
396 /* instead of logging the same network error over 396 /* instead of logging the same network error over
397 * and over, sleep here and wait for the heartbeat 397 * and over, sleep here and wait for the heartbeat
398 * to notice the node is dead. times out after 5s. */ 398 * to notice the node is dead. times out after 5s. */
399 dlm_wait_for_node_death(dlm, res->owner, 399 dlm_wait_for_node_death(dlm, res->owner,
400 DLM_NODE_DEATH_WAIT_MAX); 400 DLM_NODE_DEATH_WAIT_MAX);
401 ret = DLM_RECOVERING; 401 ret = DLM_RECOVERING;
402 mlog(0, "node %u died so returning DLM_RECOVERING " 402 mlog(0, "node %u died so returning DLM_RECOVERING "
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 42b0bad7a612..0cd24cf54396 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
102 assert_spin_locked(&res->spinlock); 102 assert_spin_locked(&res->spinlock);
103 103
104 stringify_lockname(res->lockname.name, res->lockname.len, 104 stringify_lockname(res->lockname.name, res->lockname.len,
105 buf, sizeof(buf) - 1); 105 buf, sizeof(buf));
106 printk("lockres: %s, owner=%u, state=%u\n", 106 printk("lockres: %s, owner=%u, state=%u\n",
107 buf, res->owner, res->state); 107 buf, res->owner, res->state);
108 printk(" last used: %lu, refcnt: %u, on purge list: %s\n", 108 printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 0334000676d3..988c9055fd4e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
816 } 816 }
817 817
818 /* Once the dlm ctxt is marked as leaving then we don't want 818 /* Once the dlm ctxt is marked as leaving then we don't want
819 * to be put in someone's domain map. 819 * to be put in someone's domain map.
820 * Also, explicitly disallow joining at certain troublesome 820 * Also, explicitly disallow joining at certain troublesome
821 * times (ie. during recovery). */ 821 * times (ie. during recovery). */
822 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { 822 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 437698e9465f..733337772671 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
269 } 269 }
270 dlm_revert_pending_lock(res, lock); 270 dlm_revert_pending_lock(res, lock);
271 dlm_lock_put(lock); 271 dlm_lock_put(lock);
272 } else if (dlm_is_recovery_lock(res->lockname.name, 272 } else if (dlm_is_recovery_lock(res->lockname.name,
273 res->lockname.len)) { 273 res->lockname.len)) {
274 /* special case for the $RECOVERY lock. 274 /* special case for the $RECOVERY lock.
275 * there will never be an AST delivered to put 275 * there will never be an AST delivered to put
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 03ccf9a7b1f4..a659606dcb95 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
366 struct dlm_master_list_entry *mle; 366 struct dlm_master_list_entry *mle;
367 367
368 assert_spin_locked(&dlm->spinlock); 368 assert_spin_locked(&dlm->spinlock);
369 369
370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371 if (node_up) 371 if (node_up)
372 dlm_mle_node_up(dlm, mle, NULL, idx); 372 dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ lookup:
833 __dlm_insert_mle(dlm, mle); 833 __dlm_insert_mle(dlm, mle);
834 834
835 /* still holding the dlm spinlock, check the recovery map 835 /* still holding the dlm spinlock, check the recovery map
836 * to see if there are any nodes that still need to be 836 * to see if there are any nodes that still need to be
837 * considered. these will not appear in the mle nodemap 837 * considered. these will not appear in the mle nodemap
838 * but they might own this lockres. wait on them. */ 838 * but they might own this lockres. wait on them. */
839 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 839 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ redo_request:
883 msleep(500); 883 msleep(500);
884 } 884 }
885 continue; 885 continue;
886 } 886 }
887 887
888 dlm_kick_recovery_thread(dlm); 888 dlm_kick_recovery_thread(dlm);
889 msleep(1000); 889 msleep(1000);
@@ -939,8 +939,8 @@ wait:
939 res->lockname.name, blocked); 939 res->lockname.name, blocked);
940 if (++tries > 20) { 940 if (++tries > 20) {
941 mlog(ML_ERROR, "%s:%.*s: spinning on " 941 mlog(ML_ERROR, "%s:%.*s: spinning on "
942 "dlm_wait_for_lock_mastery, blocked=%d\n", 942 "dlm_wait_for_lock_mastery, blocked=%d\n",
943 dlm->name, res->lockname.len, 943 dlm->name, res->lockname.len,
944 res->lockname.name, blocked); 944 res->lockname.name, blocked);
945 dlm_print_one_lock_resource(res); 945 dlm_print_one_lock_resource(res);
946 dlm_print_one_mle(mle); 946 dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ recheck:
1029 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1029 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1030 b = (mle->type == DLM_MLE_BLOCK); 1030 b = (mle->type == DLM_MLE_BLOCK);
1031 if ((*blocked && !b) || (!*blocked && b)) { 1031 if ((*blocked && !b) || (!*blocked && b)) {
1032 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1032 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1033 dlm->name, res->lockname.len, res->lockname.name, 1033 dlm->name, res->lockname.len, res->lockname.name,
1034 *blocked, b); 1034 *blocked, b);
1035 *blocked = b; 1035 *blocked = b;
@@ -1602,7 +1602,7 @@ send_response:
1602 } 1602 }
1603 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1603 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1604 dlm->node_num, res->lockname.len, res->lockname.name); 1604 dlm->node_num, res->lockname.len, res->lockname.name);
1605 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1605 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1606 DLM_ASSERT_MASTER_MLE_CLEANUP); 1606 DLM_ASSERT_MASTER_MLE_CLEANUP);
1607 if (ret < 0) { 1607 if (ret < 0) {
1608 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1608 mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ again:
1701 1701
1702 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1702 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1703 mlog(0, "%.*s: node %u create mles on other " 1703 mlog(0, "%.*s: node %u create mles on other "
1704 "nodes and requests a re-assert\n", 1704 "nodes and requests a re-assert\n",
1705 namelen, lockname, to); 1705 namelen, lockname, to);
1706 reassert = 1; 1706 reassert = 1;
1707 } 1707 }
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1812 spin_unlock(&dlm->master_lock); 1812 spin_unlock(&dlm->master_lock);
1813 spin_unlock(&dlm->spinlock); 1813 spin_unlock(&dlm->spinlock);
1814 goto done; 1814 goto done;
1815 } 1815 }
1816 } 1816 }
1817 } 1817 }
1818 spin_unlock(&dlm->master_lock); 1818 spin_unlock(&dlm->master_lock);
@@ -1883,7 +1883,7 @@ ok:
1883 int extra_ref = 0; 1883 int extra_ref = 0;
1884 int nn = -1; 1884 int nn = -1;
1885 int rr, err = 0; 1885 int rr, err = 0;
1886 1886
1887 spin_lock(&mle->spinlock); 1887 spin_lock(&mle->spinlock);
1888 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1888 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1889 extra_ref = 1; 1889 extra_ref = 1;
@@ -1891,7 +1891,7 @@ ok:
1891 /* MASTER mle: if any bits set in the response map 1891 /* MASTER mle: if any bits set in the response map
1892 * then the calling node needs to re-assert to clear 1892 * then the calling node needs to re-assert to clear
1893 * up nodes that this node contacted */ 1893 * up nodes that this node contacted */
1894 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1894 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1895 nn+1)) < O2NM_MAX_NODES) { 1895 nn+1)) < O2NM_MAX_NODES) {
1896 if (nn != dlm->node_num && nn != assert->node_idx) 1896 if (nn != dlm->node_num && nn != assert->node_idx)
1897 master_request = 1; 1897 master_request = 1;
@@ -2002,7 +2002,7 @@ kill:
2002 __dlm_print_one_lock_resource(res); 2002 __dlm_print_one_lock_resource(res);
2003 spin_unlock(&res->spinlock); 2003 spin_unlock(&res->spinlock);
2004 spin_unlock(&dlm->spinlock); 2004 spin_unlock(&dlm->spinlock);
2005 *ret_data = (void *)res; 2005 *ret_data = (void *)res;
2006 dlm_put(dlm); 2006 dlm_put(dlm);
2007 return -EINVAL; 2007 return -EINVAL;
2008} 2008}
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2040 item->u.am.request_from = request_from; 2040 item->u.am.request_from = request_from;
2041 item->u.am.flags = flags; 2041 item->u.am.flags = flags;
2042 2042
2043 if (ignore_higher) 2043 if (ignore_higher)
2044 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2044 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2045 res->lockname.name); 2045 res->lockname.name);
2046 2046
2047 spin_lock(&dlm->work_lock); 2047 spin_lock(&dlm->work_lock);
2048 list_add_tail(&item->list, &dlm->work_list); 2048 list_add_tail(&item->list, &dlm->work_list);
2049 spin_unlock(&dlm->work_lock); 2049 spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ put:
2133 * think that $RECOVERY is currently mastered by a dead node. If so, 2133 * think that $RECOVERY is currently mastered by a dead node. If so,
2134 * we wait a short time to allow that node to get notified by its own 2134 * we wait a short time to allow that node to get notified by its own
2135 * heartbeat stack, then check again. All $RECOVERY lock resources 2135 * heartbeat stack, then check again. All $RECOVERY lock resources
2136 * mastered by dead nodes are purged when the hearbeat callback is 2136 * mastered by dead nodes are purged when the hearbeat callback is
2137 * fired, so we can know for sure that it is safe to continue once 2137 * fired, so we can know for sure that it is safe to continue once
2138 * the node returns a live node or no node. */ 2138 * the node returns a live node or no node. */
2139static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2139static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2174 ret = -EAGAIN; 2174 ret = -EAGAIN;
2175 } 2175 }
2176 spin_unlock(&dlm->spinlock); 2176 spin_unlock(&dlm->spinlock);
2177 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2177 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2178 master); 2178 master);
2179 break; 2179 break;
2180 } 2180 }
@@ -2602,7 +2602,7 @@ fail:
2602 2602
2603 mlog(0, "%s:%.*s: timed out during migration\n", 2603 mlog(0, "%s:%.*s: timed out during migration\n",
2604 dlm->name, res->lockname.len, res->lockname.name); 2604 dlm->name, res->lockname.len, res->lockname.name);
2605 /* avoid hang during shutdown when migrating lockres 2605 /* avoid hang during shutdown when migrating lockres
2606 * to a node which also goes down */ 2606 * to a node which also goes down */
2607 if (dlm_is_node_dead(dlm, target)) { 2607 if (dlm_is_node_dead(dlm, target)) {
2608 mlog(0, "%s:%.*s: expected migration " 2608 mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2738 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2738 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2739 spin_unlock(&res->spinlock); 2739 spin_unlock(&res->spinlock);
2740 2740
2741 /* target has died, so make the caller break out of the 2741 /* target has died, so make the caller break out of the
2742 * wait_event, but caller must recheck the domain_map */ 2742 * wait_event, but caller must recheck the domain_map */
2743 spin_lock(&dlm->spinlock); 2743 spin_lock(&dlm->spinlock);
2744 if (!test_bit(mig_target, dlm->domain_map)) 2744 if (!test_bit(mig_target, dlm->domain_map))
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 2f9e4e19a4f2..344bcf90cbf4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1050 if (lock->ml.node == dead_node) { 1050 if (lock->ml.node == dead_node) {
1051 mlog(0, "AHA! there was " 1051 mlog(0, "AHA! there was "
1052 "a $RECOVERY lock for dead " 1052 "a $RECOVERY lock for dead "
1053 "node %u (%s)!\n", 1053 "node %u (%s)!\n",
1054 dead_node, dlm->name); 1054 dead_node, dlm->name);
1055 list_del_init(&lock->list); 1055 list_del_init(&lock->list);
1056 dlm_lock_put(lock); 1056 dlm_lock_put(lock);
@@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1164 mres->master = master; 1164 mres->master = master;
1165} 1165}
1166 1166
1167static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1168 struct dlm_migratable_lockres *mres,
1169 int queue)
1170{
1171 if (!lock->lksb)
1172 return;
1173
1174 /* Ignore lvb in all locks in the blocked list */
1175 if (queue == DLM_BLOCKED_LIST)
1176 return;
1177
1178 /* Only consider lvbs in locks with granted EX or PR lock levels */
1179 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1180 return;
1181
1182 if (dlm_lvb_is_empty(mres->lvb)) {
1183 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1184 return;
1185 }
1186
1187 /* Ensure the lvb copied for migration matches in other valid locks */
1188 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1189 return;
1190
1191 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1192 "node=%u\n",
1193 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1194 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1195 lock->lockres->lockname.len, lock->lockres->lockname.name,
1196 lock->ml.node);
1197 dlm_print_one_lock_resource(lock->lockres);
1198 BUG();
1199}
1167 1200
1168/* returns 1 if this lock fills the network structure, 1201/* returns 1 if this lock fills the network structure,
1169 * 0 otherwise */ 1202 * 0 otherwise */
@@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
1181 ml->list = queue; 1214 ml->list = queue;
1182 if (lock->lksb) { 1215 if (lock->lksb) {
1183 ml->flags = lock->lksb->flags; 1216 ml->flags = lock->lksb->flags;
1184 /* send our current lvb */ 1217 dlm_prepare_lvb_for_migration(lock, mres, queue);
1185 if (ml->type == LKM_EXMODE ||
1186 ml->type == LKM_PRMODE) {
1187 /* if it is already set, this had better be a PR
1188 * and it has to match */
1189 if (!dlm_lvb_is_empty(mres->lvb) &&
1190 (ml->type == LKM_EXMODE ||
1191 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1192 mlog(ML_ERROR, "mismatched lvbs!\n");
1193 dlm_print_one_lock_resource(lock->lockres);
1194 BUG();
1195 }
1196 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1197 }
1198 } 1218 }
1199 ml->node = lock->ml.node; 1219 ml->node = lock->ml.node;
1200 mres->num_locks++; 1220 mres->num_locks++;
@@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1730 struct dlm_lock *lock = NULL; 1750 struct dlm_lock *lock = NULL;
1731 u8 from = O2NM_MAX_NODES; 1751 u8 from = O2NM_MAX_NODES;
1732 unsigned int added = 0; 1752 unsigned int added = 0;
1753 __be64 c;
1733 1754
1734 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1755 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1735 for (i=0; i<mres->num_locks; i++) { 1756 for (i=0; i<mres->num_locks; i++) {
@@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1777 /* lock is always created locally first, and 1798 /* lock is always created locally first, and
1778 * destroyed locally last. it must be on the list */ 1799 * destroyed locally last. it must be on the list */
1779 if (!lock) { 1800 if (!lock) {
1780 __be64 c = ml->cookie; 1801 c = ml->cookie;
1781 mlog(ML_ERROR, "could not find local lock " 1802 mlog(ML_ERROR, "Could not find local lock "
1782 "with cookie %u:%llu!\n", 1803 "with cookie %u:%llu, node %u, "
1804 "list %u, flags 0x%x, type %d, "
1805 "conv %d, highest blocked %d\n",
1783 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1806 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1784 dlm_get_lock_cookie_seq(be64_to_cpu(c))); 1807 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1808 ml->node, ml->list, ml->flags, ml->type,
1809 ml->convert_type, ml->highest_blocked);
1810 __dlm_print_one_lock_resource(res);
1811 BUG();
1812 }
1813
1814 if (lock->ml.node != ml->node) {
1815 c = lock->ml.cookie;
1816 mlog(ML_ERROR, "Mismatched node# in lock "
1817 "cookie %u:%llu, name %.*s, node %u\n",
1818 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1819 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1820 res->lockname.len, res->lockname.name,
1821 lock->ml.node);
1822 c = ml->cookie;
1823 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1824 "node %u, list %u, flags 0x%x, type %d, "
1825 "conv %d, highest blocked %d\n",
1826 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1827 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1828 ml->node, ml->list, ml->flags, ml->type,
1829 ml->convert_type, ml->highest_blocked);
1785 __dlm_print_one_lock_resource(res); 1830 __dlm_print_one_lock_resource(res);
1786 BUG(); 1831 BUG();
1787 } 1832 }
1788 BUG_ON(lock->ml.node != ml->node);
1789 1833
1790 if (tmpq != queue) { 1834 if (tmpq != queue) {
1791 mlog(0, "lock was on %u instead of %u for %.*s\n", 1835 c = ml->cookie;
1792 j, ml->list, res->lockname.len, res->lockname.name); 1836 mlog(0, "Lock cookie %u:%llu was on list %u "
1837 "instead of list %u for %.*s\n",
1838 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1839 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1840 j, ml->list, res->lockname.len,
1841 res->lockname.name);
1842 __dlm_print_one_lock_resource(res);
1793 spin_unlock(&res->spinlock); 1843 spin_unlock(&res->spinlock);
1794 continue; 1844 continue;
1795 } 1845 }
@@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1839 * the lvb. */ 1889 * the lvb. */
1840 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1890 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1841 } else { 1891 } else {
1842 /* otherwise, the node is sending its 1892 /* otherwise, the node is sending its
1843 * most recent valid lvb info */ 1893 * most recent valid lvb info */
1844 BUG_ON(ml->type != LKM_EXMODE && 1894 BUG_ON(ml->type != LKM_EXMODE &&
1845 ml->type != LKM_PRMODE); 1895 ml->type != LKM_PRMODE);
@@ -1886,7 +1936,7 @@ skip_lvb:
1886 spin_lock(&res->spinlock); 1936 spin_lock(&res->spinlock);
1887 list_for_each_entry(lock, queue, list) { 1937 list_for_each_entry(lock, queue, list) {
1888 if (lock->ml.cookie == ml->cookie) { 1938 if (lock->ml.cookie == ml->cookie) {
1889 __be64 c = lock->ml.cookie; 1939 c = lock->ml.cookie;
1890 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1940 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1891 "exists on this lockres!\n", dlm->name, 1941 "exists on this lockres!\n", dlm->name,
1892 res->lockname.len, res->lockname.name, 1942 res->lockname.len, res->lockname.name,
@@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2114 assert_spin_locked(&res->spinlock); 2164 assert_spin_locked(&res->spinlock);
2115 2165
2116 if (res->owner == dlm->node_num) 2166 if (res->owner == dlm->node_num)
2117 /* if this node owned the lockres, and if the dead node 2167 /* if this node owned the lockres, and if the dead node
2118 * had an EX when he died, blank out the lvb */ 2168 * had an EX when he died, blank out the lvb */
2119 search_node = dead_node; 2169 search_node = dead_node;
2120 else { 2170 else {
@@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2152 2202
2153 /* this node is the lockres master: 2203 /* this node is the lockres master:
2154 * 1) remove any stale locks for the dead node 2204 * 1) remove any stale locks for the dead node
2155 * 2) if the dead node had an EX when he died, blank out the lvb 2205 * 2) if the dead node had an EX when he died, blank out the lvb
2156 */ 2206 */
2157 assert_spin_locked(&dlm->spinlock); 2207 assert_spin_locked(&dlm->spinlock);
2158 assert_spin_locked(&res->spinlock); 2208 assert_spin_locked(&res->spinlock);
@@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2193 mlog(0, "%s:%.*s: freed %u locks for dead node %u, " 2243 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2194 "dropping ref from lockres\n", dlm->name, 2244 "dropping ref from lockres\n", dlm->name,
2195 res->lockname.len, res->lockname.name, freed, dead_node); 2245 res->lockname.len, res->lockname.name, freed, dead_node);
2196 BUG_ON(!test_bit(dead_node, res->refmap)); 2246 if(!test_bit(dead_node, res->refmap)) {
2247 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2248 "but ref was not set\n", dlm->name,
2249 res->lockname.len, res->lockname.name, freed, dead_node);
2250 __dlm_print_one_lock_resource(res);
2251 }
2197 dlm_lockres_clear_refmap_bit(dead_node, res); 2252 dlm_lockres_clear_refmap_bit(dead_node, res);
2198 } else if (test_bit(dead_node, res->refmap)) { 2253 } else if (test_bit(dead_node, res->refmap)) {
2199 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2254 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
@@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2260 } 2315 }
2261 spin_unlock(&res->spinlock); 2316 spin_unlock(&res->spinlock);
2262 continue; 2317 continue;
2263 } 2318 }
2264 spin_lock(&res->spinlock); 2319 spin_lock(&res->spinlock);
2265 /* zero the lvb if necessary */ 2320 /* zero the lvb if necessary */
2266 dlm_revalidate_lvb(dlm, res, dead_node); 2321 dlm_revalidate_lvb(dlm, res, dead_node);
@@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2411 * this function on each node racing to become the recovery 2466 * this function on each node racing to become the recovery
2412 * master will not stop attempting this until either: 2467 * master will not stop attempting this until either:
2413 * a) this node gets the EX (and becomes the recovery master), 2468 * a) this node gets the EX (and becomes the recovery master),
2414 * or b) dlm->reco.new_master gets set to some nodenum 2469 * or b) dlm->reco.new_master gets set to some nodenum
2415 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2470 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2416 * so each time a recovery master is needed, the entire cluster 2471 * so each time a recovery master is needed, the entire cluster
2417 * will sync at this point. if the new master dies, that will 2472 * will sync at this point. if the new master dies, that will
@@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2424 2479
2425 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2480 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2426 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2481 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2427again: 2482again:
2428 memset(&lksb, 0, sizeof(lksb)); 2483 memset(&lksb, 0, sizeof(lksb));
2429 2484
2430 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2485 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
@@ -2437,8 +2492,8 @@ again:
2437 if (ret == DLM_NORMAL) { 2492 if (ret == DLM_NORMAL) {
2438 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2493 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2439 dlm->name, dlm->node_num); 2494 dlm->name, dlm->node_num);
2440 2495
2441 /* got the EX lock. check to see if another node 2496 /* got the EX lock. check to see if another node
2442 * just became the reco master */ 2497 * just became the reco master */
2443 if (dlm_reco_master_ready(dlm)) { 2498 if (dlm_reco_master_ready(dlm)) {
2444 mlog(0, "%s: got reco EX lock, but %u will " 2499 mlog(0, "%s: got reco EX lock, but %u will "
@@ -2451,12 +2506,12 @@ again:
2451 /* see if recovery was already finished elsewhere */ 2506 /* see if recovery was already finished elsewhere */
2452 spin_lock(&dlm->spinlock); 2507 spin_lock(&dlm->spinlock);
2453 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2508 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2454 status = -EINVAL; 2509 status = -EINVAL;
2455 mlog(0, "%s: got reco EX lock, but " 2510 mlog(0, "%s: got reco EX lock, but "
2456 "node got recovered already\n", dlm->name); 2511 "node got recovered already\n", dlm->name);
2457 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2512 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2458 mlog(ML_ERROR, "%s: new master is %u " 2513 mlog(ML_ERROR, "%s: new master is %u "
2459 "but no dead node!\n", 2514 "but no dead node!\n",
2460 dlm->name, dlm->reco.new_master); 2515 dlm->name, dlm->reco.new_master);
2461 BUG(); 2516 BUG();
2462 } 2517 }
@@ -2468,7 +2523,7 @@ again:
2468 * set the master and send the messages to begin recovery */ 2523 * set the master and send the messages to begin recovery */
2469 if (!status) { 2524 if (!status) {
2470 mlog(0, "%s: dead=%u, this=%u, sending " 2525 mlog(0, "%s: dead=%u, this=%u, sending "
2471 "begin_reco now\n", dlm->name, 2526 "begin_reco now\n", dlm->name,
2472 dlm->reco.dead_node, dlm->node_num); 2527 dlm->reco.dead_node, dlm->node_num);
2473 status = dlm_send_begin_reco_message(dlm, 2528 status = dlm_send_begin_reco_message(dlm,
2474 dlm->reco.dead_node); 2529 dlm->reco.dead_node);
@@ -2501,7 +2556,7 @@ again:
2501 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2556 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2502 dlm->name, dlm->node_num); 2557 dlm->name, dlm->node_num);
2503 /* another node is master. wait on 2558 /* another node is master. wait on
2504 * reco.new_master != O2NM_INVALID_NODE_NUM 2559 * reco.new_master != O2NM_INVALID_NODE_NUM
2505 * for at most one second */ 2560 * for at most one second */
2506 wait_event_timeout(dlm->dlm_reco_thread_wq, 2561 wait_event_timeout(dlm->dlm_reco_thread_wq,
2507 dlm_reco_master_ready(dlm), 2562 dlm_reco_master_ready(dlm),
@@ -2589,7 +2644,13 @@ retry:
2589 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2644 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2590 ret = 0; 2645 ret = 0;
2591 } 2646 }
2592 if (ret == -EAGAIN) { 2647
2648 /*
2649 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2650 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2651 * We are handling both for compatibility reasons.
2652 */
2653 if (ret == -EAGAIN || ret == EAGAIN) {
2593 mlog(0, "%s: trying to start recovery of node " 2654 mlog(0, "%s: trying to start recovery of node "
2594 "%u, but node %u is waiting for last recovery " 2655 "%u, but node %u is waiting for last recovery "
2595 "to complete, backoff for a bit\n", dlm->name, 2656 "to complete, backoff for a bit\n", dlm->name,
@@ -2599,7 +2660,7 @@ retry:
2599 } 2660 }
2600 if (ret < 0) { 2661 if (ret < 0) {
2601 struct dlm_lock_resource *res; 2662 struct dlm_lock_resource *res;
2602 /* this is now a serious problem, possibly ENOMEM 2663 /* this is now a serious problem, possibly ENOMEM
2603 * in the network stack. must retry */ 2664 * in the network stack. must retry */
2604 mlog_errno(ret); 2665 mlog_errno(ret);
2605 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2666 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
@@ -2612,7 +2673,7 @@ retry:
2612 } else { 2673 } else {
2613 mlog(ML_ERROR, "recovery lock not found\n"); 2674 mlog(ML_ERROR, "recovery lock not found\n");
2614 } 2675 }
2615 /* sleep for a bit in hopes that we can avoid 2676 /* sleep for a bit in hopes that we can avoid
2616 * another ENOMEM */ 2677 * another ENOMEM */
2617 msleep(100); 2678 msleep(100);
2618 goto retry; 2679 goto retry;
@@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2664 } 2725 }
2665 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2726 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2666 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2727 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2667 "node %u changing it to %u\n", dlm->name, 2728 "node %u changing it to %u\n", dlm->name,
2668 dlm->reco.dead_node, br->node_idx, br->dead_node); 2729 dlm->reco.dead_node, br->node_idx, br->dead_node);
2669 } 2730 }
2670 dlm_set_reco_master(dlm, br->node_idx); 2731 dlm_set_reco_master(dlm, br->node_idx);
@@ -2730,8 +2791,8 @@ stage2:
2730 if (ret < 0) { 2791 if (ret < 0) {
2731 mlog_errno(ret); 2792 mlog_errno(ret);
2732 if (dlm_is_host_down(ret)) { 2793 if (dlm_is_host_down(ret)) {
2733 /* this has no effect on this recovery 2794 /* this has no effect on this recovery
2734 * session, so set the status to zero to 2795 * session, so set the status to zero to
2735 * finish out the last recovery */ 2796 * finish out the last recovery */
2736 mlog(ML_ERROR, "node %u went down after this " 2797 mlog(ML_ERROR, "node %u went down after this "
2737 "node finished recovery.\n", nodenum); 2798 "node finished recovery.\n", nodenum);
@@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2768 mlog(0, "%s: node %u finalizing recovery stage%d of " 2829 mlog(0, "%s: node %u finalizing recovery stage%d of "
2769 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2830 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2770 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2831 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2771 2832
2772 spin_lock(&dlm->spinlock); 2833 spin_lock(&dlm->spinlock);
2773 2834
2774 if (dlm->reco.new_master != fr->node_idx) { 2835 if (dlm->reco.new_master != fr->node_idx) {
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 00f53b2aea76..49e29ecd0201 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
190 actions &= ~(DLM_UNLOCK_REMOVE_LOCK| 190 actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
191 DLM_UNLOCK_REGRANT_LOCK| 191 DLM_UNLOCK_REGRANT_LOCK|
192 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 192 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
193 } else if (status == DLM_RECOVERING || 193 } else if (status == DLM_RECOVERING ||
194 status == DLM_MIGRATING || 194 status == DLM_MIGRATING ||
195 status == DLM_FORWARD) { 195 status == DLM_FORWARD) {
196 /* must clear the actions because this unlock 196 /* must clear the actions because this unlock
197 * is about to be retried. cannot free or do 197 * is about to be retried. cannot free or do
@@ -661,14 +661,14 @@ retry:
661 if (call_ast) { 661 if (call_ast) {
662 mlog(0, "calling unlockast(%p, %d)\n", data, status); 662 mlog(0, "calling unlockast(%p, %d)\n", data, status);
663 if (is_master) { 663 if (is_master) {
664 /* it is possible that there is one last bast 664 /* it is possible that there is one last bast
665 * pending. make sure it is flushed, then 665 * pending. make sure it is flushed, then
666 * call the unlockast. 666 * call the unlockast.
667 * not an issue if this is a mastered remotely, 667 * not an issue if this is a mastered remotely,
668 * since this lock has been removed from the 668 * since this lock has been removed from the
669 * lockres queues and cannot be found. */ 669 * lockres queues and cannot be found. */
670 dlm_kick_thread(dlm, NULL); 670 dlm_kick_thread(dlm, NULL);
671 wait_event(dlm->ast_wq, 671 wait_event(dlm->ast_wq,
672 dlm_lock_basts_flushed(dlm, lock)); 672 dlm_lock_basts_flushed(dlm, lock));
673 } 673 }
674 (*unlockast)(data, status); 674 (*unlockast)(data, status);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c5e4a49e3a12..e044019cb3b1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); 875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
876 876
877 lockres->l_level = lockres->l_requested; 877 lockres->l_level = lockres->l_requested;
878
879 /*
880 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
881 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
882 * downconverting the lock before the upconvert has fully completed.
883 */
884 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
885
878 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 886 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
879 887
880 mlog_exit_void(); 888 mlog_exit_void();
@@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
907 915
908 assert_spin_locked(&lockres->l_lock); 916 assert_spin_locked(&lockres->l_lock);
909 917
910 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
911
912 if (level > lockres->l_blocking) { 918 if (level > lockres->l_blocking) {
913 /* only schedule a downconvert if we haven't already scheduled 919 /* only schedule a downconvert if we haven't already scheduled
914 * one that goes low enough to satisfy the level we're 920 * one that goes low enough to satisfy the level we're
@@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
921 lockres->l_blocking = level; 927 lockres->l_blocking = level;
922 } 928 }
923 929
930 if (needs_downconvert)
931 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
932
924 mlog_exit(needs_downconvert); 933 mlog_exit(needs_downconvert);
925 return needs_downconvert; 934 return needs_downconvert;
926} 935}
@@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1133 mlog_entry_void(); 1142 mlog_entry_void();
1134 spin_lock_irqsave(&lockres->l_lock, flags); 1143 spin_lock_irqsave(&lockres->l_lock, flags);
1135 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 1144 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1145 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1136 if (convert) 1146 if (convert)
1137 lockres->l_action = OCFS2_AST_INVALID; 1147 lockres->l_action = OCFS2_AST_INVALID;
1138 else 1148 else
@@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1323again: 1333again:
1324 wait = 0; 1334 wait = 0;
1325 1335
1336 spin_lock_irqsave(&lockres->l_lock, flags);
1337
1326 if (catch_signals && signal_pending(current)) { 1338 if (catch_signals && signal_pending(current)) {
1327 ret = -ERESTARTSYS; 1339 ret = -ERESTARTSYS;
1328 goto out; 1340 goto unlock;
1329 } 1341 }
1330 1342
1331 spin_lock_irqsave(&lockres->l_lock, flags);
1332
1333 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, 1343 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1334 "Cluster lock called on freeing lockres %s! flags " 1344 "Cluster lock called on freeing lockres %s! flags "
1335 "0x%lx\n", lockres->l_name, lockres->l_flags); 1345 "0x%lx\n", lockres->l_name, lockres->l_flags);
@@ -1346,6 +1356,25 @@ again:
1346 goto unlock; 1356 goto unlock;
1347 } 1357 }
1348 1358
1359 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1360 /*
1361 * We've upconverted. If the lock now has a level we can
1362 * work with, we take it. If, however, the lock is not at the
1363 * required level, we go thru the full cycle. One way this could
1364 * happen is if a process requesting an upconvert to PR is
1365 * closely followed by another requesting upconvert to an EX.
1366 * If the process requesting EX lands here, we want it to
1367 * continue attempting to upconvert and let the process
1368 * requesting PR take the lock.
1369 * If multiple processes request upconvert to PR, the first one
1370 * here will take the lock. The others will have to go thru the
1371 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1372 * downconvert request.
1373 */
1374 if (level <= lockres->l_level)
1375 goto update_holders;
1376 }
1377
1349 if (lockres->l_flags & OCFS2_LOCK_BLOCKED && 1378 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1350 !ocfs2_may_continue_on_blocked_lock(lockres, level)) { 1379 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1351 /* is the lock is currently blocked on behalf of 1380 /* is the lock is currently blocked on behalf of
@@ -1416,11 +1445,14 @@ again:
1416 goto again; 1445 goto again;
1417 } 1446 }
1418 1447
1448update_holders:
1419 /* Ok, if we get here then we're good to go. */ 1449 /* Ok, if we get here then we're good to go. */
1420 ocfs2_inc_holders(lockres, level); 1450 ocfs2_inc_holders(lockres, level);
1421 1451
1422 ret = 0; 1452 ret = 0;
1423unlock: 1453unlock:
1454 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1455
1424 spin_unlock_irqrestore(&lockres->l_lock, flags); 1456 spin_unlock_irqrestore(&lockres->l_lock, flags);
1425out: 1457out:
1426 /* 1458 /*
@@ -3155,7 +3187,7 @@ out:
3155/* Mark the lockres as being dropped. It will no longer be 3187/* Mark the lockres as being dropped. It will no longer be
3156 * queued if blocking, but we still may have to wait on it 3188 * queued if blocking, but we still may have to wait on it
3157 * being dequeued from the downconvert thread before we can consider 3189 * being dequeued from the downconvert thread before we can consider
3158 * it safe to drop. 3190 * it safe to drop.
3159 * 3191 *
3160 * You can *not* attempt to call cluster_lock on this lockres anymore. */ 3192 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3161void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) 3193void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3352 unsigned long flags; 3384 unsigned long flags;
3353 int blocking; 3385 int blocking;
3354 int new_level; 3386 int new_level;
3387 int level;
3355 int ret = 0; 3388 int ret = 0;
3356 int set_lvb = 0; 3389 int set_lvb = 0;
3357 unsigned int gen; 3390 unsigned int gen;
@@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3360 3393
3361 spin_lock_irqsave(&lockres->l_lock, flags); 3394 spin_lock_irqsave(&lockres->l_lock, flags);
3362 3395
3363 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
3364
3365recheck: 3396recheck:
3397 /*
3398 * Is it still blocking? If not, we have no more work to do.
3399 */
3400 if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3401 BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3402 spin_unlock_irqrestore(&lockres->l_lock, flags);
3403 ret = 0;
3404 goto leave;
3405 }
3406
3366 if (lockres->l_flags & OCFS2_LOCK_BUSY) { 3407 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3367 /* XXX 3408 /* XXX
3368 * This is a *big* race. The OCFS2_LOCK_PENDING flag 3409 * This is a *big* race. The OCFS2_LOCK_PENDING flag
@@ -3401,6 +3442,31 @@ recheck:
3401 goto leave; 3442 goto leave;
3402 } 3443 }
3403 3444
3445 /*
3446 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3447 * set when the ast is received for an upconvert just before the
3448 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3449 * on the heels of the ast, we want to delay the downconvert just
3450 * enough to allow the up requestor to do its task. Because this
3451 * lock is in the blocked queue, the lock will be downconverted
3452 * as soon as the requestor is done with the lock.
3453 */
3454 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3455 goto leave_requeue;
3456
3457 /*
3458 * How can we block and yet be at NL? We were trying to upconvert
3459 * from NL and got canceled. The code comes back here, and now
3460 * we notice and clear BLOCKING.
3461 */
3462 if (lockres->l_level == DLM_LOCK_NL) {
3463 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3464 lockres->l_blocking = DLM_LOCK_NL;
3465 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3466 spin_unlock_irqrestore(&lockres->l_lock, flags);
3467 goto leave;
3468 }
3469
3404 /* if we're blocking an exclusive and we have *any* holders, 3470 /* if we're blocking an exclusive and we have *any* holders,
3405 * then requeue. */ 3471 * then requeue. */
3406 if ((lockres->l_blocking == DLM_LOCK_EX) 3472 if ((lockres->l_blocking == DLM_LOCK_EX)
@@ -3438,6 +3504,7 @@ recheck:
3438 * may sleep, so we save off a copy of what we're blocking as 3504 * may sleep, so we save off a copy of what we're blocking as
3439 * it may change while we're not holding the spin lock. */ 3505 * it may change while we're not holding the spin lock. */
3440 blocking = lockres->l_blocking; 3506 blocking = lockres->l_blocking;
3507 level = lockres->l_level;
3441 spin_unlock_irqrestore(&lockres->l_lock, flags); 3508 spin_unlock_irqrestore(&lockres->l_lock, flags);
3442 3509
3443 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); 3510 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
@@ -3446,7 +3513,7 @@ recheck:
3446 goto leave; 3513 goto leave;
3447 3514
3448 spin_lock_irqsave(&lockres->l_lock, flags); 3515 spin_lock_irqsave(&lockres->l_lock, flags);
3449 if (blocking != lockres->l_blocking) { 3516 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3450 /* If this changed underneath us, then we can't drop 3517 /* If this changed underneath us, then we can't drop
3451 * it just yet. */ 3518 * it just yet. */
3452 goto recheck; 3519 goto recheck;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 15713cbb865c..19ad145d2af3 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
239 mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", 239 mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
240 (unsigned long long)blkno, generation); 240 (unsigned long long)blkno, generation);
241 } 241 }
242 242
243 *max_len = len; 243 *max_len = len;
244 244
245bail: 245bail:
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index d35a27f4523e..5328529e7fd2 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
192 emi->ei_clusters += ins->ei_clusters; 192 emi->ei_clusters += ins->ei_clusters;
193 return 1; 193 return 1;
194 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && 194 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
195 (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && 195 (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
196 ins->ei_flags == emi->ei_flags) { 196 ins->ei_flags == emi->ei_flags) {
197 emi->ei_phys = ins->ei_phys; 197 emi->ei_phys = ins->ei_phys;
198 emi->ei_cpos = ins->ei_cpos; 198 emi->ei_cpos = ins->ei_cpos;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 06ccf6a86d35..558ce0312421 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
749 int ret; 749 int ret;
750 750
751 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ 751 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
752 /* ugh. in prepare/commit_write, if from==to==start of block, we 752 /* ugh. in prepare/commit_write, if from==to==start of block, we
753 ** skip the prepare. make sure we never send an offset for the start 753 ** skip the prepare. make sure we never send an offset for the start
754 ** of a block 754 ** of a block
755 */ 755 */
@@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1779 struct inode *inode = dentry->d_inode; 1779 struct inode *inode = dentry->d_inode;
1780 loff_t saved_pos, end; 1780 loff_t saved_pos, end;
1781 1781
1782 /* 1782 /*
1783 * We start with a read level meta lock and only jump to an ex 1783 * We start with a read level meta lock and only jump to an ex
1784 * if we need to make modifications here. 1784 * if we need to make modifications here.
1785 */ 1785 */
@@ -2013,8 +2013,8 @@ out_dio:
2013 /* buffered aio wouldn't have proper lock coverage today */ 2013 /* buffered aio wouldn't have proper lock coverage today */
2014 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); 2014 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2015 2015
2016 if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) || 2016 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2017 (file->f_flags & O_DIRECT && has_refcount)) { 2017 ((file->f_flags & O_DIRECT) && has_refcount)) {
2018 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2018 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2019 pos + count - 1); 2019 pos + count - 1);
2020 if (ret < 0) 2020 if (ret < 0)
@@ -2033,7 +2033,7 @@ out_dio:
2033 pos + count - 1); 2033 pos + count - 1);
2034 } 2034 }
2035 2035
2036 /* 2036 /*
2037 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io 2037 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2038 * function pointer which is called when o_direct io completes so that 2038 * function pointer which is called when o_direct io completes so that
2039 * it can unlock our rw lock. (it's the clustered equivalent of 2039 * it can unlock our rw lock. (it's the clustered equivalent of
@@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2198 goto bail; 2198 goto bail;
2199 } 2199 }
2200 2200
2201 /* 2201 /*
2202 * buffered reads protect themselves in ->readpage(). O_DIRECT reads 2202 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2203 * need locks to protect pending reads from racing with truncate. 2203 * need locks to protect pending reads from racing with truncate.
2204 */ 2204 */
@@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2220 * We're fine letting folks race truncates and extending 2220 * We're fine letting folks race truncates and extending
2221 * writes with read across the cluster, just like they can 2221 * writes with read across the cluster, just like they can
2222 * locally. Hence no rw_lock during read. 2222 * locally. Hence no rw_lock during read.
2223 * 2223 *
2224 * Take and drop the meta data lock to update inode fields 2224 * Take and drop the meta data lock to update inode fields
2225 * like i_size. This allows the checks down below 2225 * like i_size. This allows the checks down below
2226 * generic_file_aio_read() a chance of actually working. 2226 * generic_file_aio_read() a chance of actually working.
2227 */ 2227 */
2228 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); 2228 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2229 if (ret < 0) { 2229 if (ret < 0) {
@@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2248bail: 2248bail:
2249 if (have_alloc_sem) 2249 if (have_alloc_sem)
2250 up_read(&inode->i_alloc_sem); 2250 up_read(&inode->i_alloc_sem);
2251 if (rw_level != -1) 2251 if (rw_level != -1)
2252 ocfs2_rw_unlock(inode, rw_level); 2252 ocfs2_rw_unlock(inode, rw_level);
2253 mlog_exit(ret); 2253 mlog_exit(ret);
2254 2254
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0297fb8982b8..88459bdd1ff3 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
475 if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { 475 if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
476 status = ocfs2_try_open_lock(inode, 0); 476 status = ocfs2_try_open_lock(inode, 0);
477 if (status) { 477 if (status) {
478 make_bad_inode(inode); 478 make_bad_inode(inode);
479 return status; 479 return status;
480 } 480 }
481 } 481 }
@@ -684,7 +684,7 @@ bail:
684 return status; 684 return status;
685} 685}
686 686
687/* 687/*
688 * Serialize with orphan dir recovery. If the process doing 688 * Serialize with orphan dir recovery. If the process doing
689 * recovery on this orphan dir does an iget() with the dir 689 * recovery on this orphan dir does an iget() with the dir
690 * i_mutex held, we'll deadlock here. Instead we detect this 690 * i_mutex held, we'll deadlock here. Instead we detect this
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 31fbb0619510..7d9d9c132cef 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/fs.h> 8#include <linux/fs.h>
9#include <linux/mount.h> 9#include <linux/mount.h>
10#include <linux/compat.h>
10 11
11#define MLOG_MASK_PREFIX ML_INODE 12#define MLOG_MASK_PREFIX ML_INODE
12#include <cluster/masklog.h> 13#include <cluster/masklog.h>
@@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
181#ifdef CONFIG_COMPAT 182#ifdef CONFIG_COMPAT
182long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) 183long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
183{ 184{
185 bool preserve;
186 struct reflink_arguments args;
187 struct inode *inode = file->f_path.dentry->d_inode;
188
184 switch (cmd) { 189 switch (cmd) {
185 case OCFS2_IOC32_GETFLAGS: 190 case OCFS2_IOC32_GETFLAGS:
186 cmd = OCFS2_IOC_GETFLAGS; 191 cmd = OCFS2_IOC_GETFLAGS;
@@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
195 case OCFS2_IOC_GROUP_EXTEND: 200 case OCFS2_IOC_GROUP_EXTEND:
196 case OCFS2_IOC_GROUP_ADD: 201 case OCFS2_IOC_GROUP_ADD:
197 case OCFS2_IOC_GROUP_ADD64: 202 case OCFS2_IOC_GROUP_ADD64:
198 case OCFS2_IOC_REFLINK:
199 break; 203 break;
204 case OCFS2_IOC_REFLINK:
205 if (copy_from_user(&args, (struct reflink_arguments *)arg,
206 sizeof(args)))
207 return -EFAULT;
208 preserve = (args.preserve != 0);
209
210 return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
211 compat_ptr(args.new_path), preserve);
200 default: 212 default:
201 return -ENOIOCTLCMD; 213 return -ENOIOCTLCMD;
202 } 214 }
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bf34c491ae96..9336c60e3a36 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
2034 status = -ENOENT; 2034 status = -ENOENT;
2035 mlog_errno(status); 2035 mlog_errno(status);
2036 return status; 2036 return status;
2037 } 2037 }
2038 2038
2039 mutex_lock(&orphan_dir_inode->i_mutex); 2039 mutex_lock(&orphan_dir_inode->i_mutex);
2040 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); 2040 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 9362eea7424b..740f448041e2 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -136,6 +136,10 @@ enum ocfs2_unlock_action {
136#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a 136#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a
137 call to dlm_lock. Only 137 call to dlm_lock. Only
138 exists with BUSY set. */ 138 exists with BUSY set. */
139#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
140 * from downconverting
141 * before the upconvert
142 * has completed */
139 143
140struct ocfs2_lock_res_ops; 144struct ocfs2_lock_res_ops;
141 145
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 1a1a679e51b5..7638a38c32bc 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
1417 return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); 1417 return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
1418} 1418}
1419 1419
1420static inline int ocfs2_max_inline_data(int blocksize) 1420static inline int ocfs2_max_inline_data_with_xattr(int blocksize,
1421 struct ocfs2_dinode *di)
1421{ 1422{
1422 return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); 1423 if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL))
1424 return blocksize -
1425 offsetof(struct ocfs2_dinode, id2.i_data.id_data) -
1426 di->i_xattr_inline_size;
1427 else
1428 return blocksize -
1429 offsetof(struct ocfs2_dinode, id2.i_data.id_data);
1423} 1430}
1424 1431
1425static inline int ocfs2_extent_recs_per_inode(int blocksize) 1432static inline int ocfs2_extent_recs_per_inode(int blocksize)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 74db2be75dd6..8ae65c9c020c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2945 2945
2946 while (offset < end) { 2946 while (offset < end) {
2947 page_index = offset >> PAGE_CACHE_SHIFT; 2947 page_index = offset >> PAGE_CACHE_SHIFT;
2948 map_end = (page_index + 1) << PAGE_CACHE_SHIFT; 2948 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
2949 if (map_end > end) 2949 if (map_end > end)
2950 map_end = end; 2950 map_end = end;
2951 2951
@@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2957 2957
2958 page = grab_cache_page(mapping, page_index); 2958 page = grab_cache_page(mapping, page_index);
2959 2959
2960 /* This page can't be dirtied before we CoW it out. */ 2960 /*
2961 BUG_ON(PageDirty(page)); 2961 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
2962 * can't be dirtied before we CoW it out.
2963 */
2964 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2965 BUG_ON(PageDirty(page));
2962 2966
2963 if (!PageUptodate(page)) { 2967 if (!PageUptodate(page)) {
2964 ret = block_read_full_page(page, ocfs2_get_block); 2968 ret = block_read_full_page(page, ocfs2_get_block);
@@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
3170 3174
3171 while (offset < end) { 3175 while (offset < end) {
3172 page_index = offset >> PAGE_CACHE_SHIFT; 3176 page_index = offset >> PAGE_CACHE_SHIFT;
3173 map_end = (page_index + 1) << PAGE_CACHE_SHIFT; 3177 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
3174 if (map_end > end) 3178 if (map_end > end)
3175 map_end = end; 3179 map_end = end;
3176 3180
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index e49c41050264..3038c92af493 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
277 u32 dlm_key; 277 u32 dlm_key;
278 struct dlm_ctxt *dlm; 278 struct dlm_ctxt *dlm;
279 struct o2dlm_private *priv; 279 struct o2dlm_private *priv;
280 struct dlm_protocol_version dlm_version; 280 struct dlm_protocol_version fs_version;
281 281
282 BUG_ON(conn == NULL); 282 BUG_ON(conn == NULL);
283 BUG_ON(o2cb_stack.sp_proto == NULL); 283 BUG_ON(o2cb_stack.sp_proto == NULL);
@@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
304 /* used by the dlm code to make message headers unique, each 304 /* used by the dlm code to make message headers unique, each
305 * node in this domain must agree on this. */ 305 * node in this domain must agree on this. */
306 dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); 306 dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
307 dlm_version.pv_major = conn->cc_version.pv_major; 307 fs_version.pv_major = conn->cc_version.pv_major;
308 dlm_version.pv_minor = conn->cc_version.pv_minor; 308 fs_version.pv_minor = conn->cc_version.pv_minor;
309 309
310 dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); 310 dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
311 if (IS_ERR(dlm)) { 311 if (IS_ERR(dlm)) {
312 rc = PTR_ERR(dlm); 312 rc = PTR_ERR(dlm);
313 mlog_errno(rc); 313 mlog_errno(rc);
314 goto out_free; 314 goto out_free;
315 } 315 }
316 316
317 conn->cc_version.pv_major = dlm_version.pv_major; 317 conn->cc_version.pv_major = fs_version.pv_major;
318 conn->cc_version.pv_minor = dlm_version.pv_minor; 318 conn->cc_version.pv_minor = fs_version.pv_minor;
319 conn->cc_lockspace = dlm; 319 conn->cc_lockspace = dlm;
320 320
321 dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); 321 dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 26069917a9f5..755cd49a5ef3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1062 "file system, but write access is " 1062 "file system, but write access is "
1063 "unavailable.\n"); 1063 "unavailable.\n");
1064 else 1064 else
1065 mlog_errno(status); 1065 mlog_errno(status);
1066 goto read_super_error; 1066 goto read_super_error;
1067 } 1067 }
1068 1068
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 49b133ccbf11..32499d213fc4 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
137 } 137 }
138 138
139 memcpy(link, target, len); 139 memcpy(link, target, len);
140 nd_set_link(nd, link);
141 140
142bail: 141bail:
142 nd_set_link(nd, status ? ERR_PTR(status) : link);
143 brelse(bh); 143 brelse(bh);
144 144
145 mlog_exit(status); 145 mlog_exit(status);
146 return status ? ERR_PTR(status) : link; 146 return NULL;
147} 147}
148 148
149static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 149static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
150{ 150{
151 char *link = cookie; 151 char *link = nd_get_link(nd);
152 152 if (!IS_ERR(link))
153 kfree(link); 153 kfree(link);
154} 154}
155 155
156const struct inode_operations ocfs2_symlink_inode_operations = { 156const struct inode_operations ocfs2_symlink_inode_operations = {
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index c61369342a27..a0a120e82b97 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
267} 267}
268 268
269/* Warning: even if it returns true, this does *not* guarantee that 269/* Warning: even if it returns true, this does *not* guarantee that
270 * the block is stored in our inode metadata cache. 270 * the block is stored in our inode metadata cache.
271 * 271 *
272 * This can be called under lock_buffer() 272 * This can be called under lock_buffer()
273 */ 273 */
274int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, 274int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index e42bbd843ed1..58324c299165 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2369,16 +2369,30 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
2369{ 2369{
2370 struct pid_namespace *ns = dentry->d_sb->s_fs_info; 2370 struct pid_namespace *ns = dentry->d_sb->s_fs_info;
2371 pid_t tgid = task_tgid_nr_ns(current, ns); 2371 pid_t tgid = task_tgid_nr_ns(current, ns);
2372 char tmp[PROC_NUMBUF]; 2372 char *name = ERR_PTR(-ENOENT);
2373 if (!tgid) 2373 if (tgid) {
2374 return ERR_PTR(-ENOENT); 2374 name = __getname();
2375 sprintf(tmp, "%d", task_tgid_nr_ns(current, ns)); 2375 if (!name)
2376 return ERR_PTR(vfs_follow_link(nd,tmp)); 2376 name = ERR_PTR(-ENOMEM);
2377 else
2378 sprintf(name, "%d", tgid);
2379 }
2380 nd_set_link(nd, name);
2381 return NULL;
2382}
2383
2384static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
2385 void *cookie)
2386{
2387 char *s = nd_get_link(nd);
2388 if (!IS_ERR(s))
2389 __putname(s);
2377} 2390}
2378 2391
2379static const struct inode_operations proc_self_inode_operations = { 2392static const struct inode_operations proc_self_inode_operations = {
2380 .readlink = proc_self_readlink, 2393 .readlink = proc_self_readlink,
2381 .follow_link = proc_self_follow_link, 2394 .follow_link = proc_self_follow_link,
2395 .put_link = proc_self_put_link,
2382}; 2396};
2383 2397
2384/* 2398/*
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9087b10209e6..2df0f5c7c60b 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1497,9 +1497,11 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1497 1497
1498 args.objectid = key->on_disk_key.k_objectid; 1498 args.objectid = key->on_disk_key.k_objectid;
1499 args.dirid = key->on_disk_key.k_dir_id; 1499 args.dirid = key->on_disk_key.k_dir_id;
1500 reiserfs_write_unlock(s);
1500 inode = iget5_locked(s, key->on_disk_key.k_objectid, 1501 inode = iget5_locked(s, key->on_disk_key.k_objectid,
1501 reiserfs_find_actor, reiserfs_init_locked_inode, 1502 reiserfs_find_actor, reiserfs_init_locked_inode,
1502 (void *)(&args)); 1503 (void *)(&args));
1504 reiserfs_write_lock(s);
1503 if (!inode) 1505 if (!inode)
1504 return ERR_PTR(-ENOMEM); 1506 return ERR_PTR(-ENOMEM);
1505 1507
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 220b758523ae..6a06a1d1ea7b 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -81,24 +81,23 @@ int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr)
81 if (!sd_attrs) 81 if (!sd_attrs)
82 return -ENOMEM; 82 return -ENOMEM;
83 sd->s_iattr = sd_attrs; 83 sd->s_iattr = sd_attrs;
84 } else { 84 }
85 /* attributes were changed at least once in past */ 85 /* attributes were changed at least once in past */
86 iattrs = &sd_attrs->ia_iattr; 86 iattrs = &sd_attrs->ia_iattr;
87 87
88 if (ia_valid & ATTR_UID) 88 if (ia_valid & ATTR_UID)
89 iattrs->ia_uid = iattr->ia_uid; 89 iattrs->ia_uid = iattr->ia_uid;
90 if (ia_valid & ATTR_GID) 90 if (ia_valid & ATTR_GID)
91 iattrs->ia_gid = iattr->ia_gid; 91 iattrs->ia_gid = iattr->ia_gid;
92 if (ia_valid & ATTR_ATIME) 92 if (ia_valid & ATTR_ATIME)
93 iattrs->ia_atime = iattr->ia_atime; 93 iattrs->ia_atime = iattr->ia_atime;
94 if (ia_valid & ATTR_MTIME) 94 if (ia_valid & ATTR_MTIME)
95 iattrs->ia_mtime = iattr->ia_mtime; 95 iattrs->ia_mtime = iattr->ia_mtime;
96 if (ia_valid & ATTR_CTIME) 96 if (ia_valid & ATTR_CTIME)
97 iattrs->ia_ctime = iattr->ia_ctime; 97 iattrs->ia_ctime = iattr->ia_ctime;
98 if (ia_valid & ATTR_MODE) { 98 if (ia_valid & ATTR_MODE) {
99 umode_t mode = iattr->ia_mode; 99 umode_t mode = iattr->ia_mode;
100 iattrs->ia_mode = sd->s_mode = mode; 100 iattrs->ia_mode = sd->s_mode = mode;
101 }
102 } 101 }
103 return 0; 102 return 0;
104} 103}
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index 1e67c441ea82..f745948b61e4 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -77,6 +77,7 @@ struct drm_nouveau_gpuobj_free {
77#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 77#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
78#define NOUVEAU_GETPARAM_CHIPSET_ID 11 78#define NOUVEAU_GETPARAM_CHIPSET_ID 11
79#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 79#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
80#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
80struct drm_nouveau_getparam { 81struct drm_nouveau_getparam {
81 uint64_t param; 82 uint64_t param;
82 uint64_t value; 83 uint64_t value;
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 2be7e1249b6f..c7645f480d12 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -68,7 +68,8 @@
68#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 68#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
69#define DRM_VMW_PARAM_3D 2 69#define DRM_VMW_PARAM_3D 2
70#define DRM_VMW_PARAM_FIFO_OFFSET 3 70#define DRM_VMW_PARAM_FIFO_OFFSET 3
71 71#define DRM_VMW_PARAM_HW_CAPS 4
72#define DRM_VMW_PARAM_FIFO_CAPS 5
72 73
73/** 74/**
74 * struct drm_vmw_getparam_arg 75 * struct drm_vmw_getparam_arg
@@ -181,6 +182,8 @@ struct drm_vmw_context_arg {
181 * The size of the array should equal the total number of mipmap levels. 182 * The size of the array should equal the total number of mipmap levels.
182 * @shareable: Boolean whether other clients (as identified by file descriptors) 183 * @shareable: Boolean whether other clients (as identified by file descriptors)
183 * may reference this surface. 184 * may reference this surface.
185 * @scanout: Boolean whether the surface is intended to be used as a
186 * scanout.
184 * 187 *
185 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 188 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
186 * Output data from the DRM_VMW_REF_SURFACE Ioctl. 189 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
@@ -192,7 +195,7 @@ struct drm_vmw_surface_create_req {
192 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 195 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
193 uint64_t size_addr; 196 uint64_t size_addr;
194 int32_t shareable; 197 int32_t shareable;
195 uint32_t pad64; 198 int32_t scanout;
196}; 199};
197 200
198/** 201/**
@@ -295,17 +298,28 @@ union drm_vmw_surface_reference_arg {
295 * 298 *
296 * @commands: User-space address of a command buffer cast to an uint64_t. 299 * @commands: User-space address of a command buffer cast to an uint64_t.
297 * @command-size: Size in bytes of the command buffer. 300 * @command-size: Size in bytes of the command buffer.
301 * @throttle-us: Sleep until software is less than @throttle_us
302 * microseconds ahead of hardware. The driver may round this value
303 * to the nearest kernel tick.
298 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 304 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
299 * uint64_t. 305 * uint64_t.
306 * @version: Allows expanding the execbuf ioctl parameters without breaking
307 * backwards compatibility, since user-space will always tell the kernel
308 * which version it uses.
309 * @flags: Execbuf flags. None currently.
300 * 310 *
301 * Argument to the DRM_VMW_EXECBUF Ioctl. 311 * Argument to the DRM_VMW_EXECBUF Ioctl.
302 */ 312 */
303 313
314#define DRM_VMW_EXECBUF_VERSION 0
315
304struct drm_vmw_execbuf_arg { 316struct drm_vmw_execbuf_arg {
305 uint64_t commands; 317 uint64_t commands;
306 uint32_t command_size; 318 uint32_t command_size;
307 uint32_t pad64; 319 uint32_t throttle_us;
308 uint64_t fence_rep; 320 uint64_t fence_rep;
321 uint32_t version;
322 uint32_t flags;
309}; 323};
310 324
311/** 325/**
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index ab94335b4bb9..6816be6c3f77 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -1,5 +1,9 @@
1/* 1/*
2 * linux/include/asm-arm/hardware/amba.h 2 * linux/include/amba/bus.h
3 *
4 * This device type deals with ARM PrimeCells and anything else that
5 * presents a proper CID (0xB105F00D) at the end of the I/O register
6 * region or that is derived from a PrimeCell.
3 * 7 *
4 * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. 8 * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
5 * 9 *
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 38a6948ce0c2..20f31567ccee 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id)
647 return id[ATA_ID_SECTOR_SIZE] & (1 << 13); 647 return id[ATA_ID_SECTOR_SIZE] & (1 << 13);
648} 648}
649 649
650static inline u8 ata_id_logical_per_physical_sectors(const u16 *id) 650static inline u16 ata_id_logical_per_physical_sectors(const u16 *id)
651{ 651{
652 return id[ATA_ID_SECTOR_SIZE] & 0xf; 652 return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf);
653} 653}
654 654
655static inline int ata_id_has_lba48(const u16 *id) 655static inline int ata_id_has_lba48(const u16 *id)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index c05a29cb9bb2..25b8b2f33ae9 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -25,7 +25,7 @@
25static __inline__ int get_bitmask_order(unsigned int count) 25static __inline__ int get_bitmask_order(unsigned int count)
26{ 26{
27 int order; 27 int order;
28 28
29 order = fls(count); 29 order = fls(count);
30 return order; /* We could be slightly more clever with -1 here... */ 30 return order; /* We could be slightly more clever with -1 here... */
31} 31}
@@ -33,7 +33,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
33static __inline__ int get_count_order(unsigned int count) 33static __inline__ int get_count_order(unsigned int count)
34{ 34{
35 int order; 35 int order;
36 36
37 order = fls(count) - 1; 37 order = fls(count) - 1;
38 if (count & (count - 1)) 38 if (count & (count - 1))
39 order++; 39 order++;
@@ -45,6 +45,31 @@ static inline unsigned long hweight_long(unsigned long w)
45 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 45 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
46} 46}
47 47
48/*
49 * Clearly slow versions of the hweightN() functions, their benefit is
50 * of course compile time evaluation of constant arguments.
51 */
52#define HWEIGHT8(w) \
53 ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
54 (!!((w) & (1ULL << 0))) + \
55 (!!((w) & (1ULL << 1))) + \
56 (!!((w) & (1ULL << 2))) + \
57 (!!((w) & (1ULL << 3))) + \
58 (!!((w) & (1ULL << 4))) + \
59 (!!((w) & (1ULL << 5))) + \
60 (!!((w) & (1ULL << 6))) + \
61 (!!((w) & (1ULL << 7))) )
62
63#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
64#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
65#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
66
67/*
68 * Type invariant version that simply casts things to the
69 * largest type.
70 */
71#define HWEIGHT(w) HWEIGHT64((u64)(w))
72
48/** 73/**
49 * rol32 - rotate a 32-bit value left 74 * rol32 - rotate a 32-bit value left
50 * @word: value to rotate 75 * @word: value to rotate
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5c8018977efa..1896e868854f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -461,8 +461,7 @@ struct request_queue
461#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 461#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
464#define QUEUE_FLAG_CQ 16 /* hardware does queuing */ 464#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
465#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
466 465
467#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 466#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
468 (1 << QUEUE_FLAG_CLUSTER) | \ 467 (1 << QUEUE_FLAG_CLUSTER) | \
@@ -586,7 +585,6 @@ enum {
586 585
587#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 586#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
588#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 587#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
589#define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
590#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 588#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
591#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 589#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
592#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 590#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5be3dab4a695..188fcae10a99 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -15,6 +15,7 @@
15# define __acquire(x) __context__(x,1) 15# define __acquire(x) __context__(x,1)
16# define __release(x) __context__(x,-1) 16# define __release(x) __context__(x,-1)
17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
18# define __percpu __attribute__((noderef, address_space(3)))
18extern void __chk_user_ptr(const volatile void __user *); 19extern void __chk_user_ptr(const volatile void __user *);
19extern void __chk_io_ptr(const volatile void __iomem *); 20extern void __chk_io_ptr(const volatile void __iomem *);
20#else 21#else
@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
32# define __acquire(x) (void)0 33# define __acquire(x) (void)0
33# define __release(x) (void)0 34# define __release(x) (void)0
34# define __cond_lock(x,c) (c) 35# define __cond_lock(x,c) (c)
36# define __percpu
35#endif 37#endif
36 38
37#ifdef __KERNEL__ 39#ifdef __KERNEL__
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b1bcb275b596..ebb1cd5bc241 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -729,6 +729,7 @@ struct inode {
729 uid_t i_uid; 729 uid_t i_uid;
730 gid_t i_gid; 730 gid_t i_gid;
731 dev_t i_rdev; 731 dev_t i_rdev;
732 unsigned int i_blkbits;
732 u64 i_version; 733 u64 i_version;
733 loff_t i_size; 734 loff_t i_size;
734#ifdef __NEED_I_SIZE_ORDERED 735#ifdef __NEED_I_SIZE_ORDERED
@@ -738,7 +739,6 @@ struct inode {
738 struct timespec i_mtime; 739 struct timespec i_mtime;
739 struct timespec i_ctime; 740 struct timespec i_ctime;
740 blkcnt_t i_blocks; 741 blkcnt_t i_blocks;
741 unsigned int i_blkbits;
742 unsigned short i_bytes; 742 unsigned short i_bytes;
743 umode_t i_mode; 743 umode_t i_mode;
744 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 744 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 0b4f97d24d7f..eb054ae95605 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -134,6 +134,8 @@ extern void
134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); 134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
135extern void unregister_ftrace_function_probe_all(char *glob); 135extern void unregister_ftrace_function_probe_all(char *glob);
136 136
137extern int ftrace_text_reserved(void *start, void *end);
138
137enum { 139enum {
138 FTRACE_FL_FREE = (1 << 0), 140 FTRACE_FL_FREE = (1 << 0),
139 FTRACE_FL_FAILED = (1 << 1), 141 FTRACE_FL_FAILED = (1 << 1),
@@ -141,7 +143,6 @@ enum {
141 FTRACE_FL_ENABLED = (1 << 3), 143 FTRACE_FL_ENABLED = (1 << 3),
142 FTRACE_FL_NOTRACE = (1 << 4), 144 FTRACE_FL_NOTRACE = (1 << 4),
143 FTRACE_FL_CONVERTED = (1 << 5), 145 FTRACE_FL_CONVERTED = (1 << 5),
144 FTRACE_FL_FROZEN = (1 << 6),
145}; 146};
146 147
147struct dyn_ftrace { 148struct dyn_ftrace {
@@ -250,6 +251,10 @@ static inline int unregister_ftrace_command(char *cmd_name)
250{ 251{
251 return -EINVAL; 252 return -EINVAL;
252} 253}
254static inline int ftrace_text_reserved(void *start, void *end)
255{
256 return 0;
257}
253#endif /* CONFIG_DYNAMIC_FTRACE */ 258#endif /* CONFIG_DYNAMIC_FTRACE */
254 259
255/* totally disable ftrace - can not re-enable after this */ 260/* totally disable ftrace - can not re-enable after this */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 2233c98d80df..cd95919d9ff3 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -5,6 +5,7 @@
5#include <linux/trace_seq.h> 5#include <linux/trace_seq.h>
6#include <linux/percpu.h> 6#include <linux/percpu.h>
7#include <linux/hardirq.h> 7#include <linux/hardirq.h>
8#include <linux/perf_event.h>
8 9
9struct trace_array; 10struct trace_array;
10struct tracer; 11struct tracer;
@@ -138,9 +139,6 @@ struct ftrace_event_call {
138 139
139#define FTRACE_MAX_PROFILE_SIZE 2048 140#define FTRACE_MAX_PROFILE_SIZE 2048
140 141
141extern char *perf_trace_buf;
142extern char *perf_trace_buf_nmi;
143
144#define MAX_FILTER_PRED 32 142#define MAX_FILTER_PRED 32
145#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 143#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
146 144
@@ -188,13 +186,27 @@ do { \
188 __trace_printk(ip, fmt, ##args); \ 186 __trace_printk(ip, fmt, ##args); \
189} while (0) 187} while (0)
190 188
191#ifdef CONFIG_EVENT_PROFILE 189#ifdef CONFIG_PERF_EVENTS
192struct perf_event; 190struct perf_event;
193extern int ftrace_profile_enable(int event_id); 191extern int ftrace_profile_enable(int event_id);
194extern void ftrace_profile_disable(int event_id); 192extern void ftrace_profile_disable(int event_id);
195extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, 193extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
196 char *filter_str); 194 char *filter_str);
197extern void ftrace_profile_free_filter(struct perf_event *event); 195extern void ftrace_profile_free_filter(struct perf_event *event);
196extern void *
197ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp,
198 unsigned long *irq_flags);
199
200static inline void
201ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr,
202 u64 count, unsigned long irq_flags)
203{
204 struct trace_entry *entry = raw_data;
205
206 perf_tp_event(entry->type, addr, count, raw_data, size);
207 perf_swevent_put_recursion_context(rctx);
208 local_irq_restore(irq_flags);
209}
198#endif 210#endif
199 211
200#endif /* _LINUX_FTRACE_EVENT_H */ 212#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 5977b724f7c6..c70d27af03f9 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -66,14 +66,14 @@ register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
66 perf_overflow_handler_t triggered, 66 perf_overflow_handler_t triggered,
67 int cpu); 67 int cpu);
68 68
69extern struct perf_event ** 69extern struct perf_event * __percpu *
70register_wide_hw_breakpoint(struct perf_event_attr *attr, 70register_wide_hw_breakpoint(struct perf_event_attr *attr,
71 perf_overflow_handler_t triggered); 71 perf_overflow_handler_t triggered);
72 72
73extern int register_perf_hw_breakpoint(struct perf_event *bp); 73extern int register_perf_hw_breakpoint(struct perf_event *bp);
74extern int __register_perf_hw_breakpoint(struct perf_event *bp); 74extern int __register_perf_hw_breakpoint(struct perf_event *bp);
75extern void unregister_hw_breakpoint(struct perf_event *bp); 75extern void unregister_hw_breakpoint(struct perf_event *bp);
76extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events); 76extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
77 77
78extern int dbg_reserve_bp_slot(struct perf_event *bp); 78extern int dbg_reserve_bp_slot(struct perf_event *bp);
79extern int dbg_release_bp_slot(struct perf_event *bp); 79extern int dbg_release_bp_slot(struct perf_event *bp);
@@ -100,7 +100,7 @@ static inline struct perf_event *
100register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, 100register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
101 perf_overflow_handler_t triggered, 101 perf_overflow_handler_t triggered,
102 int cpu) { return NULL; } 102 int cpu) { return NULL; }
103static inline struct perf_event ** 103static inline struct perf_event * __percpu *
104register_wide_hw_breakpoint(struct perf_event_attr *attr, 104register_wide_hw_breakpoint(struct perf_event_attr *attr,
105 perf_overflow_handler_t triggered) { return NULL; } 105 perf_overflow_handler_t triggered) { return NULL; }
106static inline int 106static inline int
@@ -109,7 +109,7 @@ static inline int
109__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } 109__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
110static inline void unregister_hw_breakpoint(struct perf_event *bp) { } 110static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
111static inline void 111static inline void
112unregister_wide_hw_breakpoint(struct perf_event **cpu_events) { } 112unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
113static inline int 113static inline int
114reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; } 114reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
115static inline void release_bp_slot(struct perf_event *bp) { } 115static inline void release_bp_slot(struct perf_event *bp) { }
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 99dc6d5cf7e5..975837e7d6c0 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -17,7 +17,7 @@ struct linux_binprm;
17extern int ima_bprm_check(struct linux_binprm *bprm); 17extern int ima_bprm_check(struct linux_binprm *bprm);
18extern int ima_inode_alloc(struct inode *inode); 18extern int ima_inode_alloc(struct inode *inode);
19extern void ima_inode_free(struct inode *inode); 19extern void ima_inode_free(struct inode *inode);
20extern int ima_path_check(struct path *path, int mask); 20extern int ima_file_check(struct file *file, int mask);
21extern void ima_file_free(struct file *file); 21extern void ima_file_free(struct file *file);
22extern int ima_file_mmap(struct file *file, unsigned long prot); 22extern int ima_file_mmap(struct file *file, unsigned long prot);
23extern void ima_counts_get(struct file *file); 23extern void ima_counts_get(struct file *file);
@@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode)
38 return; 38 return;
39} 39}
40 40
41static inline int ima_path_check(struct path *path, int mask) 41static inline int ima_file_check(struct file *file, int mask)
42{ 42{
43 return 0; 43 return 0;
44} 44}
diff --git a/include/linux/input.h b/include/linux/input.h
index 735ceaf1bc2d..663208afb64c 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -376,6 +376,7 @@ struct input_absinfo {
376#define KEY_DISPLAY_OFF 245 /* display device to off state */ 376#define KEY_DISPLAY_OFF 245 /* display device to off state */
377 377
378#define KEY_WIMAX 246 378#define KEY_WIMAX 246
379#define KEY_RFKILL 247 /* Key that controls all radios */
379 380
380/* Range 248 - 255 is reserved for special needs of AT keyboard driver */ 381/* Range 248 - 255 is reserved for special needs of AT keyboard driver */
381 382
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 6f6c5f300af6..bc0fc795bd35 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -124,7 +124,7 @@ extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
124 */ 124 */
125static inline bool kfifo_initialized(struct kfifo *fifo) 125static inline bool kfifo_initialized(struct kfifo *fifo)
126{ 126{
127 return fifo->buffer != 0; 127 return fifo->buffer != NULL;
128} 128}
129 129
130/** 130/**
diff --git a/include/linux/list.h b/include/linux/list.h
index 969f6e92d089..5d9c6558e8ab 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -206,6 +206,20 @@ static inline int list_empty_careful(const struct list_head *head)
206} 206}
207 207
208/** 208/**
209 * list_rotate_left - rotate the list to the left
210 * @head: the head of the list
211 */
212static inline void list_rotate_left(struct list_head *head)
213{
214 struct list_head *first;
215
216 if (!list_empty(head)) {
217 first = head->next;
218 list_move_tail(first, head);
219 }
220}
221
222/**
209 * list_is_singular - tests whether a list has just one entry. 223 * list_is_singular - tests whether a list has just one entry.
210 * @head: the list to test. 224 * @head: the list to test.
211 */ 225 */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a177698d95e2..90e0521b1690 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -288,7 +288,7 @@ struct perf_event_mmap_page {
288}; 288};
289 289
290#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) 290#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
291#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 291#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
292#define PERF_RECORD_MISC_KERNEL (1 << 0) 292#define PERF_RECORD_MISC_KERNEL (1 << 0)
293#define PERF_RECORD_MISC_USER (2 << 0) 293#define PERF_RECORD_MISC_USER (2 << 0)
294#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 294#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
@@ -354,8 +354,8 @@ enum perf_event_type {
354 * u64 stream_id; 354 * u64 stream_id;
355 * }; 355 * };
356 */ 356 */
357 PERF_RECORD_THROTTLE = 5, 357 PERF_RECORD_THROTTLE = 5,
358 PERF_RECORD_UNTHROTTLE = 6, 358 PERF_RECORD_UNTHROTTLE = 6,
359 359
360 /* 360 /*
361 * struct { 361 * struct {
@@ -369,10 +369,10 @@ enum perf_event_type {
369 369
370 /* 370 /*
371 * struct { 371 * struct {
372 * struct perf_event_header header; 372 * struct perf_event_header header;
373 * u32 pid, tid; 373 * u32 pid, tid;
374 * 374 *
375 * struct read_format values; 375 * struct read_format values;
376 * }; 376 * };
377 */ 377 */
378 PERF_RECORD_READ = 8, 378 PERF_RECORD_READ = 8,
@@ -410,7 +410,7 @@ enum perf_event_type {
410 * char data[size];}&& PERF_SAMPLE_RAW 410 * char data[size];}&& PERF_SAMPLE_RAW
411 * }; 411 * };
412 */ 412 */
413 PERF_RECORD_SAMPLE = 9, 413 PERF_RECORD_SAMPLE = 9,
414 414
415 PERF_RECORD_MAX, /* non-ABI */ 415 PERF_RECORD_MAX, /* non-ABI */
416}; 416};
@@ -476,18 +476,19 @@ struct hw_perf_event {
476 union { 476 union {
477 struct { /* hardware */ 477 struct { /* hardware */
478 u64 config; 478 u64 config;
479 u64 last_tag;
479 unsigned long config_base; 480 unsigned long config_base;
480 unsigned long event_base; 481 unsigned long event_base;
481 int idx; 482 int idx;
483 int last_cpu;
482 }; 484 };
483 struct { /* software */ 485 struct { /* software */
484 s64 remaining; 486 s64 remaining;
485 struct hrtimer hrtimer; 487 struct hrtimer hrtimer;
486 }; 488 };
487#ifdef CONFIG_HAVE_HW_BREAKPOINT 489#ifdef CONFIG_HAVE_HW_BREAKPOINT
488 union { /* breakpoint */ 490 /* breakpoint */
489 struct arch_hw_breakpoint info; 491 struct arch_hw_breakpoint info;
490 };
491#endif 492#endif
492 }; 493 };
493 atomic64_t prev_count; 494 atomic64_t prev_count;
@@ -496,9 +497,8 @@ struct hw_perf_event {
496 atomic64_t period_left; 497 atomic64_t period_left;
497 u64 interrupts; 498 u64 interrupts;
498 499
499 u64 freq_count; 500 u64 freq_time_stamp;
500 u64 freq_interrupts; 501 u64 freq_count_stamp;
501 u64 freq_stamp;
502#endif 502#endif
503}; 503};
504 504
@@ -510,6 +510,8 @@ struct perf_event;
510struct pmu { 510struct pmu {
511 int (*enable) (struct perf_event *event); 511 int (*enable) (struct perf_event *event);
512 void (*disable) (struct perf_event *event); 512 void (*disable) (struct perf_event *event);
513 int (*start) (struct perf_event *event);
514 void (*stop) (struct perf_event *event);
513 void (*read) (struct perf_event *event); 515 void (*read) (struct perf_event *event);
514 void (*unthrottle) (struct perf_event *event); 516 void (*unthrottle) (struct perf_event *event);
515}; 517};
@@ -563,6 +565,10 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
563 struct perf_sample_data *, 565 struct perf_sample_data *,
564 struct pt_regs *regs); 566 struct pt_regs *regs);
565 567
568enum perf_group_flag {
569 PERF_GROUP_SOFTWARE = 0x1,
570};
571
566/** 572/**
567 * struct perf_event - performance event kernel representation: 573 * struct perf_event - performance event kernel representation:
568 */ 574 */
@@ -572,6 +578,7 @@ struct perf_event {
572 struct list_head event_entry; 578 struct list_head event_entry;
573 struct list_head sibling_list; 579 struct list_head sibling_list;
574 int nr_siblings; 580 int nr_siblings;
581 int group_flags;
575 struct perf_event *group_leader; 582 struct perf_event *group_leader;
576 struct perf_event *output; 583 struct perf_event *output;
577 const struct pmu *pmu; 584 const struct pmu *pmu;
@@ -656,7 +663,7 @@ struct perf_event {
656 663
657 perf_overflow_handler_t overflow_handler; 664 perf_overflow_handler_t overflow_handler;
658 665
659#ifdef CONFIG_EVENT_PROFILE 666#ifdef CONFIG_EVENT_TRACING
660 struct event_filter *filter; 667 struct event_filter *filter;
661#endif 668#endif
662 669
@@ -681,7 +688,8 @@ struct perf_event_context {
681 */ 688 */
682 struct mutex mutex; 689 struct mutex mutex;
683 690
684 struct list_head group_list; 691 struct list_head pinned_groups;
692 struct list_head flexible_groups;
685 struct list_head event_list; 693 struct list_head event_list;
686 int nr_events; 694 int nr_events;
687 int nr_active; 695 int nr_active;
@@ -744,10 +752,9 @@ extern int perf_max_events;
744 752
745extern const struct pmu *hw_perf_event_init(struct perf_event *event); 753extern const struct pmu *hw_perf_event_init(struct perf_event *event);
746 754
747extern void perf_event_task_sched_in(struct task_struct *task, int cpu); 755extern void perf_event_task_sched_in(struct task_struct *task);
748extern void perf_event_task_sched_out(struct task_struct *task, 756extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
749 struct task_struct *next, int cpu); 757extern void perf_event_task_tick(struct task_struct *task);
750extern void perf_event_task_tick(struct task_struct *task, int cpu);
751extern int perf_event_init_task(struct task_struct *child); 758extern int perf_event_init_task(struct task_struct *child);
752extern void perf_event_exit_task(struct task_struct *child); 759extern void perf_event_exit_task(struct task_struct *child);
753extern void perf_event_free_task(struct task_struct *task); 760extern void perf_event_free_task(struct task_struct *task);
@@ -762,7 +769,7 @@ extern int perf_event_task_disable(void);
762extern int perf_event_task_enable(void); 769extern int perf_event_task_enable(void);
763extern int hw_perf_group_sched_in(struct perf_event *group_leader, 770extern int hw_perf_group_sched_in(struct perf_event *group_leader,
764 struct perf_cpu_context *cpuctx, 771 struct perf_cpu_context *cpuctx,
765 struct perf_event_context *ctx, int cpu); 772 struct perf_event_context *ctx);
766extern void perf_event_update_userpage(struct perf_event *event); 773extern void perf_event_update_userpage(struct perf_event *event);
767extern int perf_event_release_kernel(struct perf_event *event); 774extern int perf_event_release_kernel(struct perf_event *event);
768extern struct perf_event * 775extern struct perf_event *
@@ -850,9 +857,23 @@ extern int sysctl_perf_event_paranoid;
850extern int sysctl_perf_event_mlock; 857extern int sysctl_perf_event_mlock;
851extern int sysctl_perf_event_sample_rate; 858extern int sysctl_perf_event_sample_rate;
852 859
860static inline bool perf_paranoid_tracepoint_raw(void)
861{
862 return sysctl_perf_event_paranoid > -1;
863}
864
865static inline bool perf_paranoid_cpu(void)
866{
867 return sysctl_perf_event_paranoid > 0;
868}
869
870static inline bool perf_paranoid_kernel(void)
871{
872 return sysctl_perf_event_paranoid > 1;
873}
874
853extern void perf_event_init(void); 875extern void perf_event_init(void);
854extern void perf_tp_event(int event_id, u64 addr, u64 count, 876extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size);
855 void *record, int entry_size);
856extern void perf_bp_event(struct perf_event *event, void *data); 877extern void perf_bp_event(struct perf_event *event, void *data);
857 878
858#ifndef perf_misc_flags 879#ifndef perf_misc_flags
@@ -873,12 +894,12 @@ extern void perf_event_enable(struct perf_event *event);
873extern void perf_event_disable(struct perf_event *event); 894extern void perf_event_disable(struct perf_event *event);
874#else 895#else
875static inline void 896static inline void
876perf_event_task_sched_in(struct task_struct *task, int cpu) { } 897perf_event_task_sched_in(struct task_struct *task) { }
877static inline void 898static inline void
878perf_event_task_sched_out(struct task_struct *task, 899perf_event_task_sched_out(struct task_struct *task,
879 struct task_struct *next, int cpu) { } 900 struct task_struct *next) { }
880static inline void 901static inline void
881perf_event_task_tick(struct task_struct *task, int cpu) { } 902perf_event_task_tick(struct task_struct *task) { }
882static inline int perf_event_init_task(struct task_struct *child) { return 0; } 903static inline int perf_event_init_task(struct task_struct *child) { return 0; }
883static inline void perf_event_exit_task(struct task_struct *child) { } 904static inline void perf_event_exit_task(struct task_struct *child) { }
884static inline void perf_event_free_task(struct task_struct *task) { } 905static inline void perf_event_free_task(struct task_struct *task) { }
@@ -893,13 +914,13 @@ static inline void
893perf_sw_event(u32 event_id, u64 nr, int nmi, 914perf_sw_event(u32 event_id, u64 nr, int nmi,
894 struct pt_regs *regs, u64 addr) { } 915 struct pt_regs *regs, u64 addr) { }
895static inline void 916static inline void
896perf_bp_event(struct perf_event *event, void *data) { } 917perf_bp_event(struct perf_event *event, void *data) { }
897 918
898static inline void perf_event_mmap(struct vm_area_struct *vma) { } 919static inline void perf_event_mmap(struct vm_area_struct *vma) { }
899static inline void perf_event_comm(struct task_struct *tsk) { } 920static inline void perf_event_comm(struct task_struct *tsk) { }
900static inline void perf_event_fork(struct task_struct *tsk) { } 921static inline void perf_event_fork(struct task_struct *tsk) { }
901static inline void perf_event_init(void) { } 922static inline void perf_event_init(void) { }
902static inline int perf_swevent_get_recursion_context(void) { return -1; } 923static inline int perf_swevent_get_recursion_context(void) { return -1; }
903static inline void perf_swevent_put_recursion_context(int rctx) { } 924static inline void perf_swevent_put_recursion_context(int rctx) { }
904static inline void perf_event_enable(struct perf_event *event) { } 925static inline void perf_event_enable(struct perf_event *event) { }
905static inline void perf_event_disable(struct perf_event *event) { } 926static inline void perf_event_disable(struct perf_event *event) { }
diff --git a/include/linux/sched.h b/include/linux/sched.h
index abdfacc58653..78efe7c485ac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p);
310#ifdef CONFIG_DETECT_SOFTLOCKUP 310#ifdef CONFIG_DETECT_SOFTLOCKUP
311extern void softlockup_tick(void); 311extern void softlockup_tick(void);
312extern void touch_softlockup_watchdog(void); 312extern void touch_softlockup_watchdog(void);
313extern void touch_softlockup_watchdog_sync(void);
313extern void touch_all_softlockup_watchdogs(void); 314extern void touch_all_softlockup_watchdogs(void);
314extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 315extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
315 void __user *buffer, 316 void __user *buffer,
@@ -323,6 +324,9 @@ static inline void softlockup_tick(void)
323static inline void touch_softlockup_watchdog(void) 324static inline void touch_softlockup_watchdog(void)
324{ 325{
325} 326}
327static inline void touch_softlockup_watchdog_sync(void)
328{
329}
326static inline void touch_all_softlockup_watchdogs(void) 330static inline void touch_all_softlockup_watchdogs(void)
327{ 331{
328} 332}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 207466a49f3d..feee739afe21 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -99,7 +99,7 @@ struct perf_event_attr;
99#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 99#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
100#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 100#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
101 101
102#ifdef CONFIG_EVENT_PROFILE 102#ifdef CONFIG_PERF_EVENTS
103 103
104#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 104#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
105 .profile_enable = prof_sysenter_enable, \ 105 .profile_enable = prof_sysenter_enable, \
@@ -113,7 +113,7 @@ struct perf_event_attr;
113#define TRACE_SYS_ENTER_PROFILE_INIT(sname) 113#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
114#define TRACE_SYS_EXIT_PROFILE(sname) 114#define TRACE_SYS_EXIT_PROFILE(sname)
115#define TRACE_SYS_EXIT_PROFILE_INIT(sname) 115#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
116#endif 116#endif /* CONFIG_PERF_EVENTS */
117 117
118#ifdef CONFIG_FTRACE_SYSCALLS 118#ifdef CONFIG_FTRACE_SYSCALLS
119#define __SC_STR_ADECL1(t, a) #a 119#define __SC_STR_ADECL1(t, a) #a
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index ba1ba0c5efd1..63d449807d9b 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -11,6 +11,8 @@ struct nf_conntrack_ecache;
11struct netns_ct { 11struct netns_ct {
12 atomic_t count; 12 atomic_t count;
13 unsigned int expect_count; 13 unsigned int expect_count;
14 unsigned int htable_size;
15 struct kmem_cache *nf_conntrack_cachep;
14 struct hlist_nulls_head *hash; 16 struct hlist_nulls_head *hash;
15 struct hlist_head *expect_hash; 17 struct hlist_head *expect_hash;
16 struct hlist_nulls_head unconfirmed; 18 struct hlist_nulls_head unconfirmed;
@@ -28,5 +30,6 @@ struct netns_ct {
28#endif 30#endif
29 int hash_vmalloc; 31 int hash_vmalloc;
30 int expect_vmalloc; 32 int expect_vmalloc;
33 char *slabname;
31}; 34};
32#endif 35#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2eb3814d6258..9a4b8b714079 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,6 +40,7 @@ struct netns_ipv4 {
40 struct xt_table *iptable_security; 40 struct xt_table *iptable_security;
41 struct xt_table *nat_table; 41 struct xt_table *nat_table;
42 struct hlist_head *nat_bysource; 42 struct hlist_head *nat_bysource;
43 unsigned int nat_htable_size;
43 int nat_vmalloced; 44 int nat_vmalloced;
44#endif 45#endif
45 46
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index a870ba125aa8..5c1dcfc16c60 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -20,14 +20,17 @@ TRACE_EVENT(lock_acquire,
20 TP_STRUCT__entry( 20 TP_STRUCT__entry(
21 __field(unsigned int, flags) 21 __field(unsigned int, flags)
22 __string(name, lock->name) 22 __string(name, lock->name)
23 __field(void *, lockdep_addr)
23 ), 24 ),
24 25
25 TP_fast_assign( 26 TP_fast_assign(
26 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); 27 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
27 __assign_str(name, lock->name); 28 __assign_str(name, lock->name);
29 __entry->lockdep_addr = lock;
28 ), 30 ),
29 31
30 TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "", 32 TP_printk("%p %s%s%s", __entry->lockdep_addr,
33 (__entry->flags & 1) ? "try " : "",
31 (__entry->flags & 2) ? "read " : "", 34 (__entry->flags & 2) ? "read " : "",
32 __get_str(name)) 35 __get_str(name))
33); 36);
@@ -40,13 +43,16 @@ TRACE_EVENT(lock_release,
40 43
41 TP_STRUCT__entry( 44 TP_STRUCT__entry(
42 __string(name, lock->name) 45 __string(name, lock->name)
46 __field(void *, lockdep_addr)
43 ), 47 ),
44 48
45 TP_fast_assign( 49 TP_fast_assign(
46 __assign_str(name, lock->name); 50 __assign_str(name, lock->name);
51 __entry->lockdep_addr = lock;
47 ), 52 ),
48 53
49 TP_printk("%s", __get_str(name)) 54 TP_printk("%p %s",
55 __entry->lockdep_addr, __get_str(name))
50); 56);
51 57
52#ifdef CONFIG_LOCK_STAT 58#ifdef CONFIG_LOCK_STAT
@@ -59,13 +65,16 @@ TRACE_EVENT(lock_contended,
59 65
60 TP_STRUCT__entry( 66 TP_STRUCT__entry(
61 __string(name, lock->name) 67 __string(name, lock->name)
68 __field(void *, lockdep_addr)
62 ), 69 ),
63 70
64 TP_fast_assign( 71 TP_fast_assign(
65 __assign_str(name, lock->name); 72 __assign_str(name, lock->name);
73 __entry->lockdep_addr = lock;
66 ), 74 ),
67 75
68 TP_printk("%s", __get_str(name)) 76 TP_printk("%p %s",
77 __entry->lockdep_addr, __get_str(name))
69); 78);
70 79
71TRACE_EVENT(lock_acquired, 80TRACE_EVENT(lock_acquired,
@@ -75,16 +84,18 @@ TRACE_EVENT(lock_acquired,
75 84
76 TP_STRUCT__entry( 85 TP_STRUCT__entry(
77 __string(name, lock->name) 86 __string(name, lock->name)
78 __field(unsigned long, wait_usec) 87 __field(s64, wait_nsec)
79 __field(unsigned long, wait_nsec_rem) 88 __field(void *, lockdep_addr)
80 ), 89 ),
90
81 TP_fast_assign( 91 TP_fast_assign(
82 __assign_str(name, lock->name); 92 __assign_str(name, lock->name);
83 __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC); 93 __entry->wait_nsec = waittime;
84 __entry->wait_usec = (unsigned long) waittime; 94 __entry->lockdep_addr = lock;
85 ), 95 ),
86 TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec, 96 TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
87 __entry->wait_nsec_rem) 97 __get_str(name),
98 __entry->wait_nsec)
88); 99);
89 100
90#endif 101#endif
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6fe03e902ca..f2c09e4d656c 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -499,7 +499,7 @@ static inline int ftrace_get_offsets_##call( \
499 499
500#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 500#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
501 501
502#ifdef CONFIG_EVENT_PROFILE 502#ifdef CONFIG_PERF_EVENTS
503 503
504/* 504/*
505 * Generate the functions needed for tracepoint perf_event support. 505 * Generate the functions needed for tracepoint perf_event support.
@@ -542,7 +542,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
542 542
543#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 543#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
544 544
545#endif 545#endif /* CONFIG_PERF_EVENTS */
546 546
547/* 547/*
548 * Stage 4 of the trace events. 548 * Stage 4 of the trace events.
@@ -627,7 +627,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
627 * 627 *
628 */ 628 */
629 629
630#ifdef CONFIG_EVENT_PROFILE 630#ifdef CONFIG_PERF_EVENTS
631 631
632#define _TRACE_PROFILE_INIT(call) \ 632#define _TRACE_PROFILE_INIT(call) \
633 .profile_enable = ftrace_profile_enable_##call, \ 633 .profile_enable = ftrace_profile_enable_##call, \
@@ -635,7 +635,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
635 635
636#else 636#else
637#define _TRACE_PROFILE_INIT(call) 637#define _TRACE_PROFILE_INIT(call)
638#endif 638#endif /* CONFIG_PERF_EVENTS */
639 639
640#undef __entry 640#undef __entry
641#define __entry entry 641#define __entry entry
@@ -835,7 +835,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
835 * } 835 * }
836 */ 836 */
837 837
838#ifdef CONFIG_EVENT_PROFILE 838#ifdef CONFIG_PERF_EVENTS
839 839
840#undef __perf_addr 840#undef __perf_addr
841#define __perf_addr(a) __addr = (a) 841#define __perf_addr(a) __addr = (a)
@@ -850,22 +850,12 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
850 proto) \ 850 proto) \
851{ \ 851{ \
852 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 852 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
853 extern int perf_swevent_get_recursion_context(void); \
854 extern void perf_swevent_put_recursion_context(int rctx); \
855 extern void perf_tp_event(int, u64, u64, void *, int); \
856 struct ftrace_raw_##call *entry; \ 853 struct ftrace_raw_##call *entry; \
857 u64 __addr = 0, __count = 1; \ 854 u64 __addr = 0, __count = 1; \
858 unsigned long irq_flags; \ 855 unsigned long irq_flags; \
859 struct trace_entry *ent; \
860 int __entry_size; \ 856 int __entry_size; \
861 int __data_size; \ 857 int __data_size; \
862 char *trace_buf; \
863 char *raw_data; \
864 int __cpu; \
865 int rctx; \ 858 int rctx; \
866 int pc; \
867 \
868 pc = preempt_count(); \
869 \ 859 \
870 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 860 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
871 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 861 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
@@ -875,42 +865,16 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
875 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 865 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
876 "profile buffer not large enough")) \ 866 "profile buffer not large enough")) \
877 return; \ 867 return; \
878 \ 868 entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \
879 local_irq_save(irq_flags); \ 869 __entry_size, event_call->id, &rctx, &irq_flags); \
880 \ 870 if (!entry) \
881 rctx = perf_swevent_get_recursion_context(); \ 871 return; \
882 if (rctx < 0) \
883 goto end_recursion; \
884 \
885 __cpu = smp_processor_id(); \
886 \
887 if (in_nmi()) \
888 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
889 else \
890 trace_buf = rcu_dereference(perf_trace_buf); \
891 \
892 if (!trace_buf) \
893 goto end; \
894 \
895 raw_data = per_cpu_ptr(trace_buf, __cpu); \
896 \
897 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
898 entry = (struct ftrace_raw_##call *)raw_data; \
899 ent = &entry->ent; \
900 tracing_generic_entry_update(ent, irq_flags, pc); \
901 ent->type = event_call->id; \
902 \
903 tstruct \ 872 tstruct \
904 \ 873 \
905 { assign; } \ 874 { assign; } \
906 \ 875 \
907 perf_tp_event(event_call->id, __addr, __count, entry, \ 876 ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \
908 __entry_size); \ 877 __count, irq_flags); \
909 \
910end: \
911 perf_swevent_put_recursion_context(rctx); \
912end_recursion: \
913 local_irq_restore(irq_flags); \
914} 878}
915 879
916#undef DEFINE_EVENT 880#undef DEFINE_EVENT
@@ -927,7 +891,7 @@ static void ftrace_profile_##call(proto) \
927 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 891 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
928 892
929#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 893#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
930#endif /* CONFIG_EVENT_PROFILE */ 894#endif /* CONFIG_PERF_EVENTS */
931 895
932#undef _TRACE_PROFILE_INIT 896#undef _TRACE_PROFILE_INIT
933 897
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 961fda3556bb..3d463dcef298 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -49,12 +49,12 @@ ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
49enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); 49enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
50enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); 50enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
51#endif 51#endif
52#ifdef CONFIG_EVENT_PROFILE 52
53#ifdef CONFIG_PERF_EVENTS
53int prof_sysenter_enable(struct ftrace_event_call *call); 54int prof_sysenter_enable(struct ftrace_event_call *call);
54void prof_sysenter_disable(struct ftrace_event_call *call); 55void prof_sysenter_disable(struct ftrace_event_call *call);
55int prof_sysexit_enable(struct ftrace_event_call *call); 56int prof_sysexit_enable(struct ftrace_event_call *call);
56void prof_sysexit_disable(struct ftrace_event_call *call); 57void prof_sysexit_disable(struct ftrace_event_call *call);
57
58#endif 58#endif
59 59
60#endif /* _TRACE_SYSCALL_H */ 60#endif /* _TRACE_SYSCALL_H */
diff --git a/init/Kconfig b/init/Kconfig
index d95ca7cd5d45..cf2dc3cd4f04 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -976,19 +976,6 @@ config PERF_EVENTS
976 976
977 Say Y if unsure. 977 Say Y if unsure.
978 978
979config EVENT_PROFILE
980 bool "Tracepoint profiling sources"
981 depends on PERF_EVENTS && EVENT_TRACING
982 default y
983 help
984 Allow the use of tracepoints as software performance events.
985
986 When this is enabled, you can create perf events based on
987 tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
988 found in debugfs://tracing/events/*/*/id. (The -e/--events
989 option to the perf tool can parse and interpret symbolic
990 tracepoints, in the subsystem:tracepoint_name format.)
991
992config PERF_COUNTERS 979config PERF_COUNTERS
993 bool "Kernel performance counters (old config option)" 980 bool "Kernel performance counters (old config option)"
994 depends on HAVE_PERF_EVENTS 981 depends on HAVE_PERF_EVENTS
@@ -1112,7 +1099,7 @@ config MMAP_ALLOW_UNINITIALIZED
1112 See Documentation/nommu-mmap.txt for more information. 1099 See Documentation/nommu-mmap.txt for more information.
1113 1100
1114config PROFILING 1101config PROFILING
1115 bool "Profiling support (EXPERIMENTAL)" 1102 bool "Profiling support"
1116 help 1103 help
1117 Say Y here to enable the extended profiling support mechanisms used 1104 Say Y here to enable the extended profiling support mechanisms used
1118 by profilers such as OProfile. 1105 by profilers such as OProfile.
diff --git a/init/main.c b/init/main.c
index dac44a9356a5..4cb47a159f02 100644
--- a/init/main.c
+++ b/init/main.c
@@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void)
657 proc_caches_init(); 657 proc_caches_init();
658 buffer_init(); 658 buffer_init();
659 key_init(); 659 key_init();
660 radix_tree_init();
660 security_init(); 661 security_init();
661 vfs_caches_init(totalram_pages); 662 vfs_caches_init(totalram_pages);
662 radix_tree_init();
663 signals_init(); 663 signals_init();
664 /* rootfs populating might need page-writeback */ 664 /* rootfs populating might need page-writeback */
665 page_writeback_init(); 665 page_writeback_init();
diff --git a/kernel/futex.c b/kernel/futex.c
index d9b3a2228f9d..e7a35f1039e7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -530,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
530 return -EINVAL; 530 return -EINVAL;
531 531
532 WARN_ON(!atomic_read(&pi_state->refcount)); 532 WARN_ON(!atomic_read(&pi_state->refcount));
533 WARN_ON(pid && pi_state->owner && 533
534 pi_state->owner->pid != pid); 534 /*
535 * When pi_state->owner is NULL then the owner died
536 * and another waiter is on the fly. pi_state->owner
537 * is fixed up by the task which acquires
538 * pi_state->rt_mutex.
539 *
540 * We do not check for pid == 0 which can happen when
541 * the owner died and robust_list_exit() cleared the
542 * TID.
543 */
544 if (pid && pi_state->owner) {
545 /*
546 * Bail out if user space manipulated the
547 * futex value.
548 */
549 if (pid != task_pid_vnr(pi_state->owner))
550 return -EINVAL;
551 }
535 552
536 atomic_inc(&pi_state->refcount); 553 atomic_inc(&pi_state->refcount);
537 *ps = pi_state; 554 *ps = pi_state;
@@ -758,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
758 if (!pi_state) 775 if (!pi_state)
759 return -EINVAL; 776 return -EINVAL;
760 777
778 /*
779 * If current does not own the pi_state then the futex is
780 * inconsistent and user space fiddled with the futex value.
781 */
782 if (pi_state->owner != current)
783 return -EINVAL;
784
761 raw_spin_lock(&pi_state->pi_mutex.wait_lock); 785 raw_spin_lock(&pi_state->pi_mutex.wait_lock);
762 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); 786 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
763 787
@@ -1971,7 +1995,7 @@ retry_private:
1971 /* Unqueue and drop the lock */ 1995 /* Unqueue and drop the lock */
1972 unqueue_me_pi(&q); 1996 unqueue_me_pi(&q);
1973 1997
1974 goto out; 1998 goto out_put_key;
1975 1999
1976out_unlock_put_key: 2000out_unlock_put_key:
1977 queue_unlock(&q, hb); 2001 queue_unlock(&q, hb);
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 4d99512ee149..03808ed342a6 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -413,17 +413,17 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
413 * 413 *
414 * @return a set of per_cpu pointers to perf events 414 * @return a set of per_cpu pointers to perf events
415 */ 415 */
416struct perf_event ** 416struct perf_event * __percpu *
417register_wide_hw_breakpoint(struct perf_event_attr *attr, 417register_wide_hw_breakpoint(struct perf_event_attr *attr,
418 perf_overflow_handler_t triggered) 418 perf_overflow_handler_t triggered)
419{ 419{
420 struct perf_event **cpu_events, **pevent, *bp; 420 struct perf_event * __percpu *cpu_events, **pevent, *bp;
421 long err; 421 long err;
422 int cpu; 422 int cpu;
423 423
424 cpu_events = alloc_percpu(typeof(*cpu_events)); 424 cpu_events = alloc_percpu(typeof(*cpu_events));
425 if (!cpu_events) 425 if (!cpu_events)
426 return ERR_PTR(-ENOMEM); 426 return (void __percpu __force *)ERR_PTR(-ENOMEM);
427 427
428 get_online_cpus(); 428 get_online_cpus();
429 for_each_online_cpu(cpu) { 429 for_each_online_cpu(cpu) {
@@ -451,7 +451,7 @@ fail:
451 put_online_cpus(); 451 put_online_cpus();
452 452
453 free_percpu(cpu_events); 453 free_percpu(cpu_events);
454 return ERR_PTR(err); 454 return (void __percpu __force *)ERR_PTR(err);
455} 455}
456EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); 456EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
457 457
@@ -459,7 +459,7 @@ EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
459 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel 459 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
460 * @cpu_events: the per cpu set of events to unregister 460 * @cpu_events: the per cpu set of events to unregister
461 */ 461 */
462void unregister_wide_hw_breakpoint(struct perf_event **cpu_events) 462void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
463{ 463{
464 int cpu; 464 int cpu;
465 struct perf_event **pevent; 465 struct perf_event **pevent;
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 498cabba225e..35edbe22e9a9 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -80,7 +80,7 @@ int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask)
80 80
81 buffer = kmalloc(size, gfp_mask); 81 buffer = kmalloc(size, gfp_mask);
82 if (!buffer) { 82 if (!buffer) {
83 _kfifo_init(fifo, 0, 0); 83 _kfifo_init(fifo, NULL, 0);
84 return -ENOMEM; 84 return -ENOMEM;
85 } 85 }
86 86
@@ -97,6 +97,7 @@ EXPORT_SYMBOL(kfifo_alloc);
97void kfifo_free(struct kfifo *fifo) 97void kfifo_free(struct kfifo *fifo)
98{ 98{
99 kfree(fifo->buffer); 99 kfree(fifo->buffer);
100 _kfifo_init(fifo, NULL, 0);
100} 101}
101EXPORT_SYMBOL(kfifo_free); 102EXPORT_SYMBOL(kfifo_free);
102 103
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index c7ade62e4ef0..761fdd2b3034 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -599,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs)
599 599
600 /* Signal the primary CPU that we are done: */ 600 /* Signal the primary CPU that we are done: */
601 atomic_set(&cpu_in_kgdb[cpu], 0); 601 atomic_set(&cpu_in_kgdb[cpu], 0);
602 touch_softlockup_watchdog(); 602 touch_softlockup_watchdog_sync();
603 clocksource_touch_watchdog(); 603 clocksource_touch_watchdog();
604 local_irq_restore(flags); 604 local_irq_restore(flags);
605} 605}
@@ -1453,7 +1453,7 @@ acquirelock:
1453 (kgdb_info[cpu].task && 1453 (kgdb_info[cpu].task &&
1454 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { 1454 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
1455 atomic_set(&kgdb_active, -1); 1455 atomic_set(&kgdb_active, -1);
1456 touch_softlockup_watchdog(); 1456 touch_softlockup_watchdog_sync();
1457 clocksource_touch_watchdog(); 1457 clocksource_touch_watchdog();
1458 local_irq_restore(flags); 1458 local_irq_restore(flags);
1459 1459
@@ -1553,7 +1553,7 @@ kgdb_restore:
1553 } 1553 }
1554 /* Free kgdb_active */ 1554 /* Free kgdb_active */
1555 atomic_set(&kgdb_active, -1); 1555 atomic_set(&kgdb_active, -1);
1556 touch_softlockup_watchdog(); 1556 touch_softlockup_watchdog_sync();
1557 clocksource_touch_watchdog(); 1557 clocksource_touch_watchdog();
1558 local_irq_restore(flags); 1558 local_irq_restore(flags);
1559 1559
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b7df302a0204..ccec774c716d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -44,6 +44,7 @@
44#include <linux/debugfs.h> 44#include <linux/debugfs.h>
45#include <linux/kdebug.h> 45#include <linux/kdebug.h>
46#include <linux/memory.h> 46#include <linux/memory.h>
47#include <linux/ftrace.h>
47 48
48#include <asm-generic/sections.h> 49#include <asm-generic/sections.h>
49#include <asm/cacheflush.h> 50#include <asm/cacheflush.h>
@@ -93,6 +94,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
93 {"native_get_debugreg",}, 94 {"native_get_debugreg",},
94 {"irq_entries_start",}, 95 {"irq_entries_start",},
95 {"common_interrupt",}, 96 {"common_interrupt",},
97 {"mcount",}, /* mcount can be called from everywhere */
96 {NULL} /* Terminator */ 98 {NULL} /* Terminator */
97}; 99};
98 100
@@ -124,30 +126,6 @@ static LIST_HEAD(kprobe_insn_pages);
124static int kprobe_garbage_slots; 126static int kprobe_garbage_slots;
125static int collect_garbage_slots(void); 127static int collect_garbage_slots(void);
126 128
127static int __kprobes check_safety(void)
128{
129 int ret = 0;
130#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
131 ret = freeze_processes();
132 if (ret == 0) {
133 struct task_struct *p, *q;
134 do_each_thread(p, q) {
135 if (p != current && p->state == TASK_RUNNING &&
136 p->pid != 0) {
137 printk("Check failed: %s is running\n",p->comm);
138 ret = -1;
139 goto loop_end;
140 }
141 } while_each_thread(p, q);
142 }
143loop_end:
144 thaw_processes();
145#else
146 synchronize_sched();
147#endif
148 return ret;
149}
150
151/** 129/**
152 * __get_insn_slot() - Find a slot on an executable page for an instruction. 130 * __get_insn_slot() - Find a slot on an executable page for an instruction.
153 * We allocate an executable page if there's no room on existing ones. 131 * We allocate an executable page if there's no room on existing ones.
@@ -235,9 +213,8 @@ static int __kprobes collect_garbage_slots(void)
235{ 213{
236 struct kprobe_insn_page *kip, *next; 214 struct kprobe_insn_page *kip, *next;
237 215
238 /* Ensure no-one is preepmted on the garbages */ 216 /* Ensure no-one is interrupted on the garbages */
239 if (check_safety()) 217 synchronize_sched();
240 return -EAGAIN;
241 218
242 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) { 219 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
243 int i; 220 int i;
@@ -728,7 +705,8 @@ int __kprobes register_kprobe(struct kprobe *p)
728 705
729 preempt_disable(); 706 preempt_disable();
730 if (!kernel_text_address((unsigned long) p->addr) || 707 if (!kernel_text_address((unsigned long) p->addr) ||
731 in_kprobes_functions((unsigned long) p->addr)) { 708 in_kprobes_functions((unsigned long) p->addr) ||
709 ftrace_text_reserved(p->addr, p->addr)) {
732 preempt_enable(); 710 preempt_enable();
733 return -EINVAL; 711 return -EINVAL;
734 } 712 }
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2ae7409bf38f..482d5e1d3764 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -56,21 +56,6 @@ static atomic_t nr_task_events __read_mostly;
56 */ 56 */
57int sysctl_perf_event_paranoid __read_mostly = 1; 57int sysctl_perf_event_paranoid __read_mostly = 1;
58 58
59static inline bool perf_paranoid_tracepoint_raw(void)
60{
61 return sysctl_perf_event_paranoid > -1;
62}
63
64static inline bool perf_paranoid_cpu(void)
65{
66 return sysctl_perf_event_paranoid > 0;
67}
68
69static inline bool perf_paranoid_kernel(void)
70{
71 return sysctl_perf_event_paranoid > 1;
72}
73
74int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ 59int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
75 60
76/* 61/*
@@ -98,11 +83,12 @@ void __weak hw_perf_enable(void) { barrier(); }
98 83
99void __weak hw_perf_event_setup(int cpu) { barrier(); } 84void __weak hw_perf_event_setup(int cpu) { barrier(); }
100void __weak hw_perf_event_setup_online(int cpu) { barrier(); } 85void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
86void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
101 87
102int __weak 88int __weak
103hw_perf_group_sched_in(struct perf_event *group_leader, 89hw_perf_group_sched_in(struct perf_event *group_leader,
104 struct perf_cpu_context *cpuctx, 90 struct perf_cpu_context *cpuctx,
105 struct perf_event_context *ctx, int cpu) 91 struct perf_event_context *ctx)
106{ 92{
107 return 0; 93 return 0;
108} 94}
@@ -248,7 +234,7 @@ static void perf_unpin_context(struct perf_event_context *ctx)
248 234
249static inline u64 perf_clock(void) 235static inline u64 perf_clock(void)
250{ 236{
251 return cpu_clock(smp_processor_id()); 237 return cpu_clock(raw_smp_processor_id());
252} 238}
253 239
254/* 240/*
@@ -289,6 +275,15 @@ static void update_event_times(struct perf_event *event)
289 event->total_time_running = run_end - event->tstamp_running; 275 event->total_time_running = run_end - event->tstamp_running;
290} 276}
291 277
278static struct list_head *
279ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
280{
281 if (event->attr.pinned)
282 return &ctx->pinned_groups;
283 else
284 return &ctx->flexible_groups;
285}
286
292/* 287/*
293 * Add a event from the lists for its context. 288 * Add a event from the lists for its context.
294 * Must be called with ctx->mutex and ctx->lock held. 289 * Must be called with ctx->mutex and ctx->lock held.
@@ -303,9 +298,19 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
303 * add it straight to the context's event list, or to the group 298 * add it straight to the context's event list, or to the group
304 * leader's sibling list: 299 * leader's sibling list:
305 */ 300 */
306 if (group_leader == event) 301 if (group_leader == event) {
307 list_add_tail(&event->group_entry, &ctx->group_list); 302 struct list_head *list;
308 else { 303
304 if (is_software_event(event))
305 event->group_flags |= PERF_GROUP_SOFTWARE;
306
307 list = ctx_group_list(event, ctx);
308 list_add_tail(&event->group_entry, list);
309 } else {
310 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
311 !is_software_event(event))
312 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
313
309 list_add_tail(&event->group_entry, &group_leader->sibling_list); 314 list_add_tail(&event->group_entry, &group_leader->sibling_list);
310 group_leader->nr_siblings++; 315 group_leader->nr_siblings++;
311 } 316 }
@@ -355,9 +360,14 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
355 * to the context list directly: 360 * to the context list directly:
356 */ 361 */
357 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 362 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
363 struct list_head *list;
358 364
359 list_move_tail(&sibling->group_entry, &ctx->group_list); 365 list = ctx_group_list(event, ctx);
366 list_move_tail(&sibling->group_entry, list);
360 sibling->group_leader = sibling; 367 sibling->group_leader = sibling;
368
369 /* Inherit group flags from the previous leader */
370 sibling->group_flags = event->group_flags;
361 } 371 }
362} 372}
363 373
@@ -608,14 +618,13 @@ void perf_event_disable(struct perf_event *event)
608static int 618static int
609event_sched_in(struct perf_event *event, 619event_sched_in(struct perf_event *event,
610 struct perf_cpu_context *cpuctx, 620 struct perf_cpu_context *cpuctx,
611 struct perf_event_context *ctx, 621 struct perf_event_context *ctx)
612 int cpu)
613{ 622{
614 if (event->state <= PERF_EVENT_STATE_OFF) 623 if (event->state <= PERF_EVENT_STATE_OFF)
615 return 0; 624 return 0;
616 625
617 event->state = PERF_EVENT_STATE_ACTIVE; 626 event->state = PERF_EVENT_STATE_ACTIVE;
618 event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 627 event->oncpu = smp_processor_id();
619 /* 628 /*
620 * The new state must be visible before we turn it on in the hardware: 629 * The new state must be visible before we turn it on in the hardware:
621 */ 630 */
@@ -642,8 +651,7 @@ event_sched_in(struct perf_event *event,
642static int 651static int
643group_sched_in(struct perf_event *group_event, 652group_sched_in(struct perf_event *group_event,
644 struct perf_cpu_context *cpuctx, 653 struct perf_cpu_context *cpuctx,
645 struct perf_event_context *ctx, 654 struct perf_event_context *ctx)
646 int cpu)
647{ 655{
648 struct perf_event *event, *partial_group; 656 struct perf_event *event, *partial_group;
649 int ret; 657 int ret;
@@ -651,18 +659,18 @@ group_sched_in(struct perf_event *group_event,
651 if (group_event->state == PERF_EVENT_STATE_OFF) 659 if (group_event->state == PERF_EVENT_STATE_OFF)
652 return 0; 660 return 0;
653 661
654 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu); 662 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
655 if (ret) 663 if (ret)
656 return ret < 0 ? ret : 0; 664 return ret < 0 ? ret : 0;
657 665
658 if (event_sched_in(group_event, cpuctx, ctx, cpu)) 666 if (event_sched_in(group_event, cpuctx, ctx))
659 return -EAGAIN; 667 return -EAGAIN;
660 668
661 /* 669 /*
662 * Schedule in siblings as one group (if any): 670 * Schedule in siblings as one group (if any):
663 */ 671 */
664 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 672 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
665 if (event_sched_in(event, cpuctx, ctx, cpu)) { 673 if (event_sched_in(event, cpuctx, ctx)) {
666 partial_group = event; 674 partial_group = event;
667 goto group_error; 675 goto group_error;
668 } 676 }
@@ -686,24 +694,6 @@ group_error:
686} 694}
687 695
688/* 696/*
689 * Return 1 for a group consisting entirely of software events,
690 * 0 if the group contains any hardware events.
691 */
692static int is_software_only_group(struct perf_event *leader)
693{
694 struct perf_event *event;
695
696 if (!is_software_event(leader))
697 return 0;
698
699 list_for_each_entry(event, &leader->sibling_list, group_entry)
700 if (!is_software_event(event))
701 return 0;
702
703 return 1;
704}
705
706/*
707 * Work out whether we can put this event group on the CPU now. 697 * Work out whether we can put this event group on the CPU now.
708 */ 698 */
709static int group_can_go_on(struct perf_event *event, 699static int group_can_go_on(struct perf_event *event,
@@ -713,7 +703,7 @@ static int group_can_go_on(struct perf_event *event,
713 /* 703 /*
714 * Groups consisting entirely of software events can always go on. 704 * Groups consisting entirely of software events can always go on.
715 */ 705 */
716 if (is_software_only_group(event)) 706 if (event->group_flags & PERF_GROUP_SOFTWARE)
717 return 1; 707 return 1;
718 /* 708 /*
719 * If an exclusive group is already on, no other hardware 709 * If an exclusive group is already on, no other hardware
@@ -754,7 +744,6 @@ static void __perf_install_in_context(void *info)
754 struct perf_event *event = info; 744 struct perf_event *event = info;
755 struct perf_event_context *ctx = event->ctx; 745 struct perf_event_context *ctx = event->ctx;
756 struct perf_event *leader = event->group_leader; 746 struct perf_event *leader = event->group_leader;
757 int cpu = smp_processor_id();
758 int err; 747 int err;
759 748
760 /* 749 /*
@@ -801,7 +790,7 @@ static void __perf_install_in_context(void *info)
801 if (!group_can_go_on(event, cpuctx, 1)) 790 if (!group_can_go_on(event, cpuctx, 1))
802 err = -EEXIST; 791 err = -EEXIST;
803 else 792 else
804 err = event_sched_in(event, cpuctx, ctx, cpu); 793 err = event_sched_in(event, cpuctx, ctx);
805 794
806 if (err) { 795 if (err) {
807 /* 796 /*
@@ -943,11 +932,9 @@ static void __perf_event_enable(void *info)
943 } else { 932 } else {
944 perf_disable(); 933 perf_disable();
945 if (event == leader) 934 if (event == leader)
946 err = group_sched_in(event, cpuctx, ctx, 935 err = group_sched_in(event, cpuctx, ctx);
947 smp_processor_id());
948 else 936 else
949 err = event_sched_in(event, cpuctx, ctx, 937 err = event_sched_in(event, cpuctx, ctx);
950 smp_processor_id());
951 perf_enable(); 938 perf_enable();
952 } 939 }
953 940
@@ -1043,8 +1030,15 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
1043 return 0; 1030 return 0;
1044} 1031}
1045 1032
1046void __perf_event_sched_out(struct perf_event_context *ctx, 1033enum event_type_t {
1047 struct perf_cpu_context *cpuctx) 1034 EVENT_FLEXIBLE = 0x1,
1035 EVENT_PINNED = 0x2,
1036 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1037};
1038
1039static void ctx_sched_out(struct perf_event_context *ctx,
1040 struct perf_cpu_context *cpuctx,
1041 enum event_type_t event_type)
1048{ 1042{
1049 struct perf_event *event; 1043 struct perf_event *event;
1050 1044
@@ -1055,10 +1049,18 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1055 update_context_time(ctx); 1049 update_context_time(ctx);
1056 1050
1057 perf_disable(); 1051 perf_disable();
1058 if (ctx->nr_active) { 1052 if (!ctx->nr_active)
1059 list_for_each_entry(event, &ctx->group_list, group_entry) 1053 goto out_enable;
1054
1055 if (event_type & EVENT_PINNED)
1056 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1060 group_sched_out(event, cpuctx, ctx); 1057 group_sched_out(event, cpuctx, ctx);
1061 } 1058
1059 if (event_type & EVENT_FLEXIBLE)
1060 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1061 group_sched_out(event, cpuctx, ctx);
1062
1063 out_enable:
1062 perf_enable(); 1064 perf_enable();
1063 out: 1065 out:
1064 raw_spin_unlock(&ctx->lock); 1066 raw_spin_unlock(&ctx->lock);
@@ -1170,9 +1172,9 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
1170 * not restart the event. 1172 * not restart the event.
1171 */ 1173 */
1172void perf_event_task_sched_out(struct task_struct *task, 1174void perf_event_task_sched_out(struct task_struct *task,
1173 struct task_struct *next, int cpu) 1175 struct task_struct *next)
1174{ 1176{
1175 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1177 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1176 struct perf_event_context *ctx = task->perf_event_ctxp; 1178 struct perf_event_context *ctx = task->perf_event_ctxp;
1177 struct perf_event_context *next_ctx; 1179 struct perf_event_context *next_ctx;
1178 struct perf_event_context *parent; 1180 struct perf_event_context *parent;
@@ -1220,15 +1222,13 @@ void perf_event_task_sched_out(struct task_struct *task,
1220 rcu_read_unlock(); 1222 rcu_read_unlock();
1221 1223
1222 if (do_switch) { 1224 if (do_switch) {
1223 __perf_event_sched_out(ctx, cpuctx); 1225 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1224 cpuctx->task_ctx = NULL; 1226 cpuctx->task_ctx = NULL;
1225 } 1227 }
1226} 1228}
1227 1229
1228/* 1230static void task_ctx_sched_out(struct perf_event_context *ctx,
1229 * Called with IRQs disabled 1231 enum event_type_t event_type)
1230 */
1231static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1232{ 1232{
1233 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1233 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1234 1234
@@ -1238,47 +1238,41 @@ static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1238 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 1238 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1239 return; 1239 return;
1240 1240
1241 __perf_event_sched_out(ctx, cpuctx); 1241 ctx_sched_out(ctx, cpuctx, event_type);
1242 cpuctx->task_ctx = NULL; 1242 cpuctx->task_ctx = NULL;
1243} 1243}
1244 1244
1245/* 1245/*
1246 * Called with IRQs disabled 1246 * Called with IRQs disabled
1247 */ 1247 */
1248static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) 1248static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1249{
1250 task_ctx_sched_out(ctx, EVENT_ALL);
1251}
1252
1253/*
1254 * Called with IRQs disabled
1255 */
1256static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1257 enum event_type_t event_type)
1249{ 1258{
1250 __perf_event_sched_out(&cpuctx->ctx, cpuctx); 1259 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1251} 1260}
1252 1261
1253static void 1262static void
1254__perf_event_sched_in(struct perf_event_context *ctx, 1263ctx_pinned_sched_in(struct perf_event_context *ctx,
1255 struct perf_cpu_context *cpuctx, int cpu) 1264 struct perf_cpu_context *cpuctx)
1256{ 1265{
1257 struct perf_event *event; 1266 struct perf_event *event;
1258 int can_add_hw = 1;
1259
1260 raw_spin_lock(&ctx->lock);
1261 ctx->is_active = 1;
1262 if (likely(!ctx->nr_events))
1263 goto out;
1264
1265 ctx->timestamp = perf_clock();
1266
1267 perf_disable();
1268 1267
1269 /* 1268 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1270 * First go through the list and put on any pinned groups 1269 if (event->state <= PERF_EVENT_STATE_OFF)
1271 * in order to give them the best chance of going on.
1272 */
1273 list_for_each_entry(event, &ctx->group_list, group_entry) {
1274 if (event->state <= PERF_EVENT_STATE_OFF ||
1275 !event->attr.pinned)
1276 continue; 1270 continue;
1277 if (event->cpu != -1 && event->cpu != cpu) 1271 if (event->cpu != -1 && event->cpu != smp_processor_id())
1278 continue; 1272 continue;
1279 1273
1280 if (group_can_go_on(event, cpuctx, 1)) 1274 if (group_can_go_on(event, cpuctx, 1))
1281 group_sched_in(event, cpuctx, ctx, cpu); 1275 group_sched_in(event, cpuctx, ctx);
1282 1276
1283 /* 1277 /*
1284 * If this pinned group hasn't been scheduled, 1278 * If this pinned group hasn't been scheduled,
@@ -1289,32 +1283,83 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1289 event->state = PERF_EVENT_STATE_ERROR; 1283 event->state = PERF_EVENT_STATE_ERROR;
1290 } 1284 }
1291 } 1285 }
1286}
1292 1287
1293 list_for_each_entry(event, &ctx->group_list, group_entry) { 1288static void
1294 /* 1289ctx_flexible_sched_in(struct perf_event_context *ctx,
1295 * Ignore events in OFF or ERROR state, and 1290 struct perf_cpu_context *cpuctx)
1296 * ignore pinned events since we did them already. 1291{
1297 */ 1292 struct perf_event *event;
1298 if (event->state <= PERF_EVENT_STATE_OFF || 1293 int can_add_hw = 1;
1299 event->attr.pinned)
1300 continue;
1301 1294
1295 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1296 /* Ignore events in OFF or ERROR state */
1297 if (event->state <= PERF_EVENT_STATE_OFF)
1298 continue;
1302 /* 1299 /*
1303 * Listen to the 'cpu' scheduling filter constraint 1300 * Listen to the 'cpu' scheduling filter constraint
1304 * of events: 1301 * of events:
1305 */ 1302 */
1306 if (event->cpu != -1 && event->cpu != cpu) 1303 if (event->cpu != -1 && event->cpu != smp_processor_id())
1307 continue; 1304 continue;
1308 1305
1309 if (group_can_go_on(event, cpuctx, can_add_hw)) 1306 if (group_can_go_on(event, cpuctx, can_add_hw))
1310 if (group_sched_in(event, cpuctx, ctx, cpu)) 1307 if (group_sched_in(event, cpuctx, ctx))
1311 can_add_hw = 0; 1308 can_add_hw = 0;
1312 } 1309 }
1310}
1311
1312static void
1313ctx_sched_in(struct perf_event_context *ctx,
1314 struct perf_cpu_context *cpuctx,
1315 enum event_type_t event_type)
1316{
1317 raw_spin_lock(&ctx->lock);
1318 ctx->is_active = 1;
1319 if (likely(!ctx->nr_events))
1320 goto out;
1321
1322 ctx->timestamp = perf_clock();
1323
1324 perf_disable();
1325
1326 /*
1327 * First go through the list and put on any pinned groups
1328 * in order to give them the best chance of going on.
1329 */
1330 if (event_type & EVENT_PINNED)
1331 ctx_pinned_sched_in(ctx, cpuctx);
1332
1333 /* Then walk through the lower prio flexible groups */
1334 if (event_type & EVENT_FLEXIBLE)
1335 ctx_flexible_sched_in(ctx, cpuctx);
1336
1313 perf_enable(); 1337 perf_enable();
1314 out: 1338 out:
1315 raw_spin_unlock(&ctx->lock); 1339 raw_spin_unlock(&ctx->lock);
1316} 1340}
1317 1341
1342static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1343 enum event_type_t event_type)
1344{
1345 struct perf_event_context *ctx = &cpuctx->ctx;
1346
1347 ctx_sched_in(ctx, cpuctx, event_type);
1348}
1349
1350static void task_ctx_sched_in(struct task_struct *task,
1351 enum event_type_t event_type)
1352{
1353 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1354 struct perf_event_context *ctx = task->perf_event_ctxp;
1355
1356 if (likely(!ctx))
1357 return;
1358 if (cpuctx->task_ctx == ctx)
1359 return;
1360 ctx_sched_in(ctx, cpuctx, event_type);
1361 cpuctx->task_ctx = ctx;
1362}
1318/* 1363/*
1319 * Called from scheduler to add the events of the current task 1364 * Called from scheduler to add the events of the current task
1320 * with interrupts disabled. 1365 * with interrupts disabled.
@@ -1326,38 +1371,128 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1326 * accessing the event control register. If a NMI hits, then it will 1371 * accessing the event control register. If a NMI hits, then it will
1327 * keep the event running. 1372 * keep the event running.
1328 */ 1373 */
1329void perf_event_task_sched_in(struct task_struct *task, int cpu) 1374void perf_event_task_sched_in(struct task_struct *task)
1330{ 1375{
1331 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1376 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1332 struct perf_event_context *ctx = task->perf_event_ctxp; 1377 struct perf_event_context *ctx = task->perf_event_ctxp;
1333 1378
1334 if (likely(!ctx)) 1379 if (likely(!ctx))
1335 return; 1380 return;
1381
1336 if (cpuctx->task_ctx == ctx) 1382 if (cpuctx->task_ctx == ctx)
1337 return; 1383 return;
1338 __perf_event_sched_in(ctx, cpuctx, cpu); 1384
1385 /*
1386 * We want to keep the following priority order:
1387 * cpu pinned (that don't need to move), task pinned,
1388 * cpu flexible, task flexible.
1389 */
1390 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1391
1392 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1393 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1394 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1395
1339 cpuctx->task_ctx = ctx; 1396 cpuctx->task_ctx = ctx;
1340} 1397}
1341 1398
1342static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) 1399#define MAX_INTERRUPTS (~0ULL)
1400
1401static void perf_log_throttle(struct perf_event *event, int enable);
1402
1403static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1343{ 1404{
1344 struct perf_event_context *ctx = &cpuctx->ctx; 1405 u64 frequency = event->attr.sample_freq;
1406 u64 sec = NSEC_PER_SEC;
1407 u64 divisor, dividend;
1408
1409 int count_fls, nsec_fls, frequency_fls, sec_fls;
1410
1411 count_fls = fls64(count);
1412 nsec_fls = fls64(nsec);
1413 frequency_fls = fls64(frequency);
1414 sec_fls = 30;
1415
1416 /*
1417 * We got @count in @nsec, with a target of sample_freq HZ
1418 * the target period becomes:
1419 *
1420 * @count * 10^9
1421 * period = -------------------
1422 * @nsec * sample_freq
1423 *
1424 */
1425
1426 /*
1427 * Reduce accuracy by one bit such that @a and @b converge
1428 * to a similar magnitude.
1429 */
1430#define REDUCE_FLS(a, b) \
1431do { \
1432 if (a##_fls > b##_fls) { \
1433 a >>= 1; \
1434 a##_fls--; \
1435 } else { \
1436 b >>= 1; \
1437 b##_fls--; \
1438 } \
1439} while (0)
1440
1441 /*
1442 * Reduce accuracy until either term fits in a u64, then proceed with
1443 * the other, so that finally we can do a u64/u64 division.
1444 */
1445 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1446 REDUCE_FLS(nsec, frequency);
1447 REDUCE_FLS(sec, count);
1448 }
1449
1450 if (count_fls + sec_fls > 64) {
1451 divisor = nsec * frequency;
1452
1453 while (count_fls + sec_fls > 64) {
1454 REDUCE_FLS(count, sec);
1455 divisor >>= 1;
1456 }
1345 1457
1346 __perf_event_sched_in(ctx, cpuctx, cpu); 1458 dividend = count * sec;
1459 } else {
1460 dividend = count * sec;
1461
1462 while (nsec_fls + frequency_fls > 64) {
1463 REDUCE_FLS(nsec, frequency);
1464 dividend >>= 1;
1465 }
1466
1467 divisor = nsec * frequency;
1468 }
1469
1470 return div64_u64(dividend, divisor);
1347} 1471}
1348 1472
1349#define MAX_INTERRUPTS (~0ULL) 1473static void perf_event_stop(struct perf_event *event)
1474{
1475 if (!event->pmu->stop)
1476 return event->pmu->disable(event);
1350 1477
1351static void perf_log_throttle(struct perf_event *event, int enable); 1478 return event->pmu->stop(event);
1479}
1480
1481static int perf_event_start(struct perf_event *event)
1482{
1483 if (!event->pmu->start)
1484 return event->pmu->enable(event);
1352 1485
1353static void perf_adjust_period(struct perf_event *event, u64 events) 1486 return event->pmu->start(event);
1487}
1488
1489static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1354{ 1490{
1355 struct hw_perf_event *hwc = &event->hw; 1491 struct hw_perf_event *hwc = &event->hw;
1356 u64 period, sample_period; 1492 u64 period, sample_period;
1357 s64 delta; 1493 s64 delta;
1358 1494
1359 events *= hwc->sample_period; 1495 period = perf_calculate_period(event, nsec, count);
1360 period = div64_u64(events, event->attr.sample_freq);
1361 1496
1362 delta = (s64)(period - hwc->sample_period); 1497 delta = (s64)(period - hwc->sample_period);
1363 delta = (delta + 7) / 8; /* low pass filter */ 1498 delta = (delta + 7) / 8; /* low pass filter */
@@ -1368,13 +1503,22 @@ static void perf_adjust_period(struct perf_event *event, u64 events)
1368 sample_period = 1; 1503 sample_period = 1;
1369 1504
1370 hwc->sample_period = sample_period; 1505 hwc->sample_period = sample_period;
1506
1507 if (atomic64_read(&hwc->period_left) > 8*sample_period) {
1508 perf_disable();
1509 perf_event_stop(event);
1510 atomic64_set(&hwc->period_left, 0);
1511 perf_event_start(event);
1512 perf_enable();
1513 }
1371} 1514}
1372 1515
1373static void perf_ctx_adjust_freq(struct perf_event_context *ctx) 1516static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1374{ 1517{
1375 struct perf_event *event; 1518 struct perf_event *event;
1376 struct hw_perf_event *hwc; 1519 struct hw_perf_event *hwc;
1377 u64 interrupts, freq; 1520 u64 interrupts, now;
1521 s64 delta;
1378 1522
1379 raw_spin_lock(&ctx->lock); 1523 raw_spin_lock(&ctx->lock);
1380 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 1524 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
@@ -1395,44 +1539,18 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1395 if (interrupts == MAX_INTERRUPTS) { 1539 if (interrupts == MAX_INTERRUPTS) {
1396 perf_log_throttle(event, 1); 1540 perf_log_throttle(event, 1);
1397 event->pmu->unthrottle(event); 1541 event->pmu->unthrottle(event);
1398 interrupts = 2*sysctl_perf_event_sample_rate/HZ;
1399 } 1542 }
1400 1543
1401 if (!event->attr.freq || !event->attr.sample_freq) 1544 if (!event->attr.freq || !event->attr.sample_freq)
1402 continue; 1545 continue;
1403 1546
1404 /* 1547 event->pmu->read(event);
1405 * if the specified freq < HZ then we need to skip ticks 1548 now = atomic64_read(&event->count);
1406 */ 1549 delta = now - hwc->freq_count_stamp;
1407 if (event->attr.sample_freq < HZ) { 1550 hwc->freq_count_stamp = now;
1408 freq = event->attr.sample_freq;
1409
1410 hwc->freq_count += freq;
1411 hwc->freq_interrupts += interrupts;
1412
1413 if (hwc->freq_count < HZ)
1414 continue;
1415
1416 interrupts = hwc->freq_interrupts;
1417 hwc->freq_interrupts = 0;
1418 hwc->freq_count -= HZ;
1419 } else
1420 freq = HZ;
1421
1422 perf_adjust_period(event, freq * interrupts);
1423 1551
1424 /* 1552 if (delta > 0)
1425 * In order to avoid being stalled by an (accidental) huge 1553 perf_adjust_period(event, TICK_NSEC, delta);
1426 * sample period, force reset the sample period if we didn't
1427 * get any events in this freq period.
1428 */
1429 if (!interrupts) {
1430 perf_disable();
1431 event->pmu->disable(event);
1432 atomic64_set(&hwc->period_left, 0);
1433 event->pmu->enable(event);
1434 perf_enable();
1435 }
1436 } 1554 }
1437 raw_spin_unlock(&ctx->lock); 1555 raw_spin_unlock(&ctx->lock);
1438} 1556}
@@ -1442,26 +1560,18 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1442 */ 1560 */
1443static void rotate_ctx(struct perf_event_context *ctx) 1561static void rotate_ctx(struct perf_event_context *ctx)
1444{ 1562{
1445 struct perf_event *event;
1446
1447 if (!ctx->nr_events) 1563 if (!ctx->nr_events)
1448 return; 1564 return;
1449 1565
1450 raw_spin_lock(&ctx->lock); 1566 raw_spin_lock(&ctx->lock);
1451 /* 1567
1452 * Rotate the first entry last (works just fine for group events too): 1568 /* Rotate the first entry last of non-pinned groups */
1453 */ 1569 list_rotate_left(&ctx->flexible_groups);
1454 perf_disable();
1455 list_for_each_entry(event, &ctx->group_list, group_entry) {
1456 list_move_tail(&event->group_entry, &ctx->group_list);
1457 break;
1458 }
1459 perf_enable();
1460 1570
1461 raw_spin_unlock(&ctx->lock); 1571 raw_spin_unlock(&ctx->lock);
1462} 1572}
1463 1573
1464void perf_event_task_tick(struct task_struct *curr, int cpu) 1574void perf_event_task_tick(struct task_struct *curr)
1465{ 1575{
1466 struct perf_cpu_context *cpuctx; 1576 struct perf_cpu_context *cpuctx;
1467 struct perf_event_context *ctx; 1577 struct perf_event_context *ctx;
@@ -1469,24 +1579,43 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
1469 if (!atomic_read(&nr_events)) 1579 if (!atomic_read(&nr_events))
1470 return; 1580 return;
1471 1581
1472 cpuctx = &per_cpu(perf_cpu_context, cpu); 1582 cpuctx = &__get_cpu_var(perf_cpu_context);
1473 ctx = curr->perf_event_ctxp; 1583 ctx = curr->perf_event_ctxp;
1474 1584
1585 perf_disable();
1586
1475 perf_ctx_adjust_freq(&cpuctx->ctx); 1587 perf_ctx_adjust_freq(&cpuctx->ctx);
1476 if (ctx) 1588 if (ctx)
1477 perf_ctx_adjust_freq(ctx); 1589 perf_ctx_adjust_freq(ctx);
1478 1590
1479 perf_event_cpu_sched_out(cpuctx); 1591 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1480 if (ctx) 1592 if (ctx)
1481 __perf_event_task_sched_out(ctx); 1593 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
1482 1594
1483 rotate_ctx(&cpuctx->ctx); 1595 rotate_ctx(&cpuctx->ctx);
1484 if (ctx) 1596 if (ctx)
1485 rotate_ctx(ctx); 1597 rotate_ctx(ctx);
1486 1598
1487 perf_event_cpu_sched_in(cpuctx, cpu); 1599 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1488 if (ctx) 1600 if (ctx)
1489 perf_event_task_sched_in(curr, cpu); 1601 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1602
1603 perf_enable();
1604}
1605
1606static int event_enable_on_exec(struct perf_event *event,
1607 struct perf_event_context *ctx)
1608{
1609 if (!event->attr.enable_on_exec)
1610 return 0;
1611
1612 event->attr.enable_on_exec = 0;
1613 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1614 return 0;
1615
1616 __perf_event_mark_enabled(event, ctx);
1617
1618 return 1;
1490} 1619}
1491 1620
1492/* 1621/*
@@ -1499,6 +1628,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1499 struct perf_event *event; 1628 struct perf_event *event;
1500 unsigned long flags; 1629 unsigned long flags;
1501 int enabled = 0; 1630 int enabled = 0;
1631 int ret;
1502 1632
1503 local_irq_save(flags); 1633 local_irq_save(flags);
1504 ctx = task->perf_event_ctxp; 1634 ctx = task->perf_event_ctxp;
@@ -1509,14 +1639,16 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1509 1639
1510 raw_spin_lock(&ctx->lock); 1640 raw_spin_lock(&ctx->lock);
1511 1641
1512 list_for_each_entry(event, &ctx->group_list, group_entry) { 1642 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1513 if (!event->attr.enable_on_exec) 1643 ret = event_enable_on_exec(event, ctx);
1514 continue; 1644 if (ret)
1515 event->attr.enable_on_exec = 0; 1645 enabled = 1;
1516 if (event->state >= PERF_EVENT_STATE_INACTIVE) 1646 }
1517 continue; 1647
1518 __perf_event_mark_enabled(event, ctx); 1648 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1519 enabled = 1; 1649 ret = event_enable_on_exec(event, ctx);
1650 if (ret)
1651 enabled = 1;
1520 } 1652 }
1521 1653
1522 /* 1654 /*
@@ -1527,7 +1659,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1527 1659
1528 raw_spin_unlock(&ctx->lock); 1660 raw_spin_unlock(&ctx->lock);
1529 1661
1530 perf_event_task_sched_in(task, smp_processor_id()); 1662 perf_event_task_sched_in(task);
1531 out: 1663 out:
1532 local_irq_restore(flags); 1664 local_irq_restore(flags);
1533} 1665}
@@ -1590,7 +1722,8 @@ __perf_event_init_context(struct perf_event_context *ctx,
1590{ 1722{
1591 raw_spin_lock_init(&ctx->lock); 1723 raw_spin_lock_init(&ctx->lock);
1592 mutex_init(&ctx->mutex); 1724 mutex_init(&ctx->mutex);
1593 INIT_LIST_HEAD(&ctx->group_list); 1725 INIT_LIST_HEAD(&ctx->pinned_groups);
1726 INIT_LIST_HEAD(&ctx->flexible_groups);
1594 INIT_LIST_HEAD(&ctx->event_list); 1727 INIT_LIST_HEAD(&ctx->event_list);
1595 atomic_set(&ctx->refcount, 1); 1728 atomic_set(&ctx->refcount, 1);
1596 ctx->task = task; 1729 ctx->task = task;
@@ -3608,7 +3741,7 @@ void __perf_event_mmap(struct vm_area_struct *vma)
3608 /* .tid */ 3741 /* .tid */
3609 .start = vma->vm_start, 3742 .start = vma->vm_start,
3610 .len = vma->vm_end - vma->vm_start, 3743 .len = vma->vm_end - vma->vm_start,
3611 .pgoff = vma->vm_pgoff, 3744 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
3612 }, 3745 },
3613 }; 3746 };
3614 3747
@@ -3688,12 +3821,12 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
3688 3821
3689 if (event->attr.freq) { 3822 if (event->attr.freq) {
3690 u64 now = perf_clock(); 3823 u64 now = perf_clock();
3691 s64 delta = now - hwc->freq_stamp; 3824 s64 delta = now - hwc->freq_time_stamp;
3692 3825
3693 hwc->freq_stamp = now; 3826 hwc->freq_time_stamp = now;
3694 3827
3695 if (delta > 0 && delta < TICK_NSEC) 3828 if (delta > 0 && delta < 2*TICK_NSEC)
3696 perf_adjust_period(event, NSEC_PER_SEC / (int)delta); 3829 perf_adjust_period(event, delta, hwc->last_period);
3697 } 3830 }
3698 3831
3699 /* 3832 /*
@@ -4184,7 +4317,7 @@ static const struct pmu perf_ops_task_clock = {
4184 .read = task_clock_perf_event_read, 4317 .read = task_clock_perf_event_read,
4185}; 4318};
4186 4319
4187#ifdef CONFIG_EVENT_PROFILE 4320#ifdef CONFIG_EVENT_TRACING
4188 4321
4189void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 4322void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4190 int entry_size) 4323 int entry_size)
@@ -4289,7 +4422,7 @@ static void perf_event_free_filter(struct perf_event *event)
4289{ 4422{
4290} 4423}
4291 4424
4292#endif /* CONFIG_EVENT_PROFILE */ 4425#endif /* CONFIG_EVENT_TRACING */
4293 4426
4294#ifdef CONFIG_HAVE_HW_BREAKPOINT 4427#ifdef CONFIG_HAVE_HW_BREAKPOINT
4295static void bp_perf_event_destroy(struct perf_event *event) 4428static void bp_perf_event_destroy(struct perf_event *event)
@@ -4870,8 +5003,15 @@ inherit_event(struct perf_event *parent_event,
4870 else 5003 else
4871 child_event->state = PERF_EVENT_STATE_OFF; 5004 child_event->state = PERF_EVENT_STATE_OFF;
4872 5005
4873 if (parent_event->attr.freq) 5006 if (parent_event->attr.freq) {
4874 child_event->hw.sample_period = parent_event->hw.sample_period; 5007 u64 sample_period = parent_event->hw.sample_period;
5008 struct hw_perf_event *hwc = &child_event->hw;
5009
5010 hwc->sample_period = sample_period;
5011 hwc->last_period = sample_period;
5012
5013 atomic64_set(&hwc->period_left, sample_period);
5014 }
4875 5015
4876 child_event->overflow_handler = parent_event->overflow_handler; 5016 child_event->overflow_handler = parent_event->overflow_handler;
4877 5017
@@ -5039,7 +5179,11 @@ void perf_event_exit_task(struct task_struct *child)
5039 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); 5179 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
5040 5180
5041again: 5181again:
5042 list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list, 5182 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5183 group_entry)
5184 __perf_event_exit_task(child_event, child_ctx, child);
5185
5186 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
5043 group_entry) 5187 group_entry)
5044 __perf_event_exit_task(child_event, child_ctx, child); 5188 __perf_event_exit_task(child_event, child_ctx, child);
5045 5189
@@ -5048,7 +5192,8 @@ again:
5048 * its siblings to the list, but we obtained 'tmp' before that which 5192 * its siblings to the list, but we obtained 'tmp' before that which
5049 * will still point to the list head terminating the iteration. 5193 * will still point to the list head terminating the iteration.
5050 */ 5194 */
5051 if (!list_empty(&child_ctx->group_list)) 5195 if (!list_empty(&child_ctx->pinned_groups) ||
5196 !list_empty(&child_ctx->flexible_groups))
5052 goto again; 5197 goto again;
5053 5198
5054 mutex_unlock(&child_ctx->mutex); 5199 mutex_unlock(&child_ctx->mutex);
@@ -5056,6 +5201,24 @@ again:
5056 put_ctx(child_ctx); 5201 put_ctx(child_ctx);
5057} 5202}
5058 5203
5204static void perf_free_event(struct perf_event *event,
5205 struct perf_event_context *ctx)
5206{
5207 struct perf_event *parent = event->parent;
5208
5209 if (WARN_ON_ONCE(!parent))
5210 return;
5211
5212 mutex_lock(&parent->child_mutex);
5213 list_del_init(&event->child_list);
5214 mutex_unlock(&parent->child_mutex);
5215
5216 fput(parent->filp);
5217
5218 list_del_event(event, ctx);
5219 free_event(event);
5220}
5221
5059/* 5222/*
5060 * free an unexposed, unused context as created by inheritance by 5223 * free an unexposed, unused context as created by inheritance by
5061 * init_task below, used by fork() in case of fail. 5224 * init_task below, used by fork() in case of fail.
@@ -5070,36 +5233,70 @@ void perf_event_free_task(struct task_struct *task)
5070 5233
5071 mutex_lock(&ctx->mutex); 5234 mutex_lock(&ctx->mutex);
5072again: 5235again:
5073 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) { 5236 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5074 struct perf_event *parent = event->parent; 5237 perf_free_event(event, ctx);
5075 5238
5076 if (WARN_ON_ONCE(!parent)) 5239 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5077 continue; 5240 group_entry)
5241 perf_free_event(event, ctx);
5242
5243 if (!list_empty(&ctx->pinned_groups) ||
5244 !list_empty(&ctx->flexible_groups))
5245 goto again;
5078 5246
5079 mutex_lock(&parent->child_mutex); 5247 mutex_unlock(&ctx->mutex);
5080 list_del_init(&event->child_list);
5081 mutex_unlock(&parent->child_mutex);
5082 5248
5083 fput(parent->filp); 5249 put_ctx(ctx);
5250}
5084 5251
5085 list_del_event(event, ctx); 5252static int
5086 free_event(event); 5253inherit_task_group(struct perf_event *event, struct task_struct *parent,
5254 struct perf_event_context *parent_ctx,
5255 struct task_struct *child,
5256 int *inherited_all)
5257{
5258 int ret;
5259 struct perf_event_context *child_ctx = child->perf_event_ctxp;
5260
5261 if (!event->attr.inherit) {
5262 *inherited_all = 0;
5263 return 0;
5087 } 5264 }
5088 5265
5089 if (!list_empty(&ctx->group_list)) 5266 if (!child_ctx) {
5090 goto again; 5267 /*
5268 * This is executed from the parent task context, so
5269 * inherit events that have been marked for cloning.
5270 * First allocate and initialize a context for the
5271 * child.
5272 */
5091 5273
5092 mutex_unlock(&ctx->mutex); 5274 child_ctx = kzalloc(sizeof(struct perf_event_context),
5275 GFP_KERNEL);
5276 if (!child_ctx)
5277 return -ENOMEM;
5093 5278
5094 put_ctx(ctx); 5279 __perf_event_init_context(child_ctx, child);
5280 child->perf_event_ctxp = child_ctx;
5281 get_task_struct(child);
5282 }
5283
5284 ret = inherit_group(event, parent, parent_ctx,
5285 child, child_ctx);
5286
5287 if (ret)
5288 *inherited_all = 0;
5289
5290 return ret;
5095} 5291}
5096 5292
5293
5097/* 5294/*
5098 * Initialize the perf_event context in task_struct 5295 * Initialize the perf_event context in task_struct
5099 */ 5296 */
5100int perf_event_init_task(struct task_struct *child) 5297int perf_event_init_task(struct task_struct *child)
5101{ 5298{
5102 struct perf_event_context *child_ctx = NULL, *parent_ctx; 5299 struct perf_event_context *child_ctx, *parent_ctx;
5103 struct perf_event_context *cloned_ctx; 5300 struct perf_event_context *cloned_ctx;
5104 struct perf_event *event; 5301 struct perf_event *event;
5105 struct task_struct *parent = current; 5302 struct task_struct *parent = current;
@@ -5137,41 +5334,22 @@ int perf_event_init_task(struct task_struct *child)
5137 * We dont have to disable NMIs - we are only looking at 5334 * We dont have to disable NMIs - we are only looking at
5138 * the list, not manipulating it: 5335 * the list, not manipulating it:
5139 */ 5336 */
5140 list_for_each_entry(event, &parent_ctx->group_list, group_entry) { 5337 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
5141 5338 ret = inherit_task_group(event, parent, parent_ctx, child,
5142 if (!event->attr.inherit) { 5339 &inherited_all);
5143 inherited_all = 0; 5340 if (ret)
5144 continue; 5341 break;
5145 } 5342 }
5146
5147 if (!child->perf_event_ctxp) {
5148 /*
5149 * This is executed from the parent task context, so
5150 * inherit events that have been marked for cloning.
5151 * First allocate and initialize a context for the
5152 * child.
5153 */
5154
5155 child_ctx = kzalloc(sizeof(struct perf_event_context),
5156 GFP_KERNEL);
5157 if (!child_ctx) {
5158 ret = -ENOMEM;
5159 break;
5160 }
5161
5162 __perf_event_init_context(child_ctx, child);
5163 child->perf_event_ctxp = child_ctx;
5164 get_task_struct(child);
5165 }
5166 5343
5167 ret = inherit_group(event, parent, parent_ctx, 5344 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
5168 child, child_ctx); 5345 ret = inherit_task_group(event, parent, parent_ctx, child,
5169 if (ret) { 5346 &inherited_all);
5170 inherited_all = 0; 5347 if (ret)
5171 break; 5348 break;
5172 }
5173 } 5349 }
5174 5350
5351 child_ctx = child->perf_event_ctxp;
5352
5175 if (child_ctx && inherited_all) { 5353 if (child_ctx && inherited_all) {
5176 /* 5354 /*
5177 * Mark the child context as a clone of the parent 5355 * Mark the child context as a clone of the parent
@@ -5220,7 +5398,9 @@ static void __perf_event_exit_cpu(void *info)
5220 struct perf_event_context *ctx = &cpuctx->ctx; 5398 struct perf_event_context *ctx = &cpuctx->ctx;
5221 struct perf_event *event, *tmp; 5399 struct perf_event *event, *tmp;
5222 5400
5223 list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) 5401 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5402 __perf_event_remove_from_context(event);
5403 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
5224 __perf_event_remove_from_context(event); 5404 __perf_event_remove_from_context(event);
5225} 5405}
5226static void perf_event_exit_cpu(int cpu) 5406static void perf_event_exit_cpu(int cpu)
@@ -5258,6 +5438,10 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5258 perf_event_exit_cpu(cpu); 5438 perf_event_exit_cpu(cpu);
5259 break; 5439 break;
5260 5440
5441 case CPU_DEAD:
5442 hw_perf_event_setup_offline(cpu);
5443 break;
5444
5261 default: 5445 default:
5262 break; 5446 break;
5263 } 5447 }
diff --git a/kernel/sched.c b/kernel/sched.c
index 3a8fb30a91b1..3e71ebb101c2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2794,7 +2794,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2794 */ 2794 */
2795 prev_state = prev->state; 2795 prev_state = prev->state;
2796 finish_arch_switch(prev); 2796 finish_arch_switch(prev);
2797 perf_event_task_sched_in(current, cpu_of(rq)); 2797#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2798 local_irq_disable();
2799#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2800 perf_event_task_sched_in(current);
2801#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2802 local_irq_enable();
2803#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2798 finish_lock_switch(rq, prev); 2804 finish_lock_switch(rq, prev);
2799 2805
2800 fire_sched_in_preempt_notifiers(current); 2806 fire_sched_in_preempt_notifiers(current);
@@ -5309,7 +5315,7 @@ void scheduler_tick(void)
5309 curr->sched_class->task_tick(rq, curr, 0); 5315 curr->sched_class->task_tick(rq, curr, 0);
5310 raw_spin_unlock(&rq->lock); 5316 raw_spin_unlock(&rq->lock);
5311 5317
5312 perf_event_task_tick(curr, cpu); 5318 perf_event_task_tick(curr);
5313 5319
5314#ifdef CONFIG_SMP 5320#ifdef CONFIG_SMP
5315 rq->idle_at_tick = idle_cpu(cpu); 5321 rq->idle_at_tick = idle_cpu(cpu);
@@ -5523,7 +5529,7 @@ need_resched_nonpreemptible:
5523 5529
5524 if (likely(prev != next)) { 5530 if (likely(prev != next)) {
5525 sched_info_switch(prev, next); 5531 sched_info_switch(prev, next);
5526 perf_event_task_sched_out(prev, next, cpu); 5532 perf_event_task_sched_out(prev, next);
5527 5533
5528 rq->nr_switches++; 5534 rq->nr_switches++;
5529 rq->curr = next; 5535 rq->curr = next;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index a09502e2ef75..7c1a67ef0274 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -500,22 +500,17 @@ EXPORT_SYMBOL(tasklet_kill);
500 */ 500 */
501 501
502/* 502/*
503 * The trampoline is called when the hrtimer expires. If this is 503 * The trampoline is called when the hrtimer expires. It schedules a tasklet
504 * called from the hrtimer interrupt then we schedule the tasklet as 504 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
505 * the timer callback function expects to run in softirq context. If 505 * hrtimer callback, but from softirq context.
506 * it's called in softirq context anyway (i.e. high resolution timers
507 * disabled) then the hrtimer callback is called right away.
508 */ 506 */
509static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) 507static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
510{ 508{
511 struct tasklet_hrtimer *ttimer = 509 struct tasklet_hrtimer *ttimer =
512 container_of(timer, struct tasklet_hrtimer, timer); 510 container_of(timer, struct tasklet_hrtimer, timer);
513 511
514 if (hrtimer_is_hres_active(timer)) { 512 tasklet_hi_schedule(&ttimer->tasklet);
515 tasklet_hi_schedule(&ttimer->tasklet); 513 return HRTIMER_NORESTART;
516 return HRTIMER_NORESTART;
517 }
518 return ttimer->function(timer);
519} 514}
520 515
521/* 516/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d22579087e27..0d4c7898ab80 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock);
25static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ 25static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
26static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ 26static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
27static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 27static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
28static DEFINE_PER_CPU(bool, softlock_touch_sync);
28 29
29static int __read_mostly did_panic; 30static int __read_mostly did_panic;
30int __read_mostly softlockup_thresh = 60; 31int __read_mostly softlockup_thresh = 60;
@@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void)
79} 80}
80EXPORT_SYMBOL(touch_softlockup_watchdog); 81EXPORT_SYMBOL(touch_softlockup_watchdog);
81 82
83void touch_softlockup_watchdog_sync(void)
84{
85 __raw_get_cpu_var(softlock_touch_sync) = true;
86 __raw_get_cpu_var(softlockup_touch_ts) = 0;
87}
88
82void touch_all_softlockup_watchdogs(void) 89void touch_all_softlockup_watchdogs(void)
83{ 90{
84 int cpu; 91 int cpu;
@@ -118,6 +125,14 @@ void softlockup_tick(void)
118 } 125 }
119 126
120 if (touch_ts == 0) { 127 if (touch_ts == 0) {
128 if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) {
129 /*
130 * If the time stamp was touched atomically
131 * make sure the scheduler tick is up to date.
132 */
133 per_cpu(softlock_touch_sync, this_cpu) = false;
134 sched_clock_tick();
135 }
121 __touch_softlockup_watchdog(); 136 __touch_softlockup_watchdog();
122 return; 137 return;
123 } 138 }
diff --git a/kernel/sys.c b/kernel/sys.c
index 26a6b73a6b85..18bde979f346 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -222,6 +222,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
222 if (which > PRIO_USER || which < PRIO_PROCESS) 222 if (which > PRIO_USER || which < PRIO_PROCESS)
223 return -EINVAL; 223 return -EINVAL;
224 224
225 rcu_read_lock();
225 read_lock(&tasklist_lock); 226 read_lock(&tasklist_lock);
226 switch (which) { 227 switch (which) {
227 case PRIO_PROCESS: 228 case PRIO_PROCESS:
@@ -267,6 +268,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
267 } 268 }
268out_unlock: 269out_unlock:
269 read_unlock(&tasklist_lock); 270 read_unlock(&tasklist_lock);
271 rcu_read_unlock();
270 272
271 return retval; 273 return retval;
272} 274}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 7faaa32fbf4f..e2ab064c6d41 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -880,6 +880,7 @@ void getboottime(struct timespec *ts)
880 880
881 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); 881 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
882} 882}
883EXPORT_SYMBOL_GPL(getboottime);
883 884
884/** 885/**
885 * monotonic_to_bootbased - Convert the monotonic time to boot based. 886 * monotonic_to_bootbased - Convert the monotonic time to boot based.
@@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts)
889{ 890{
890 *ts = timespec_add_safe(*ts, total_sleep_time); 891 *ts = timespec_add_safe(*ts, total_sleep_time);
891} 892}
893EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
892 894
893unsigned long get_seconds(void) 895unsigned long get_seconds(void)
894{ 896{
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index cd9ecd89ec77..d00c6fe23f54 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -51,7 +51,9 @@ endif
51obj-$(CONFIG_EVENT_TRACING) += trace_events.o 51obj-$(CONFIG_EVENT_TRACING) += trace_events.o
52obj-$(CONFIG_EVENT_TRACING) += trace_export.o 52obj-$(CONFIG_EVENT_TRACING) += trace_export.o
53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o 53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
54obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o 54ifeq ($(CONFIG_PERF_EVENTS),y)
55obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
56endif
55obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
56obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
57obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o 59obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1e6640f80454..1904797f4a8a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -22,7 +22,6 @@
22#include <linux/hardirq.h> 22#include <linux/hardirq.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <linux/kprobes.h>
26#include <linux/ftrace.h> 25#include <linux/ftrace.h>
27#include <linux/sysctl.h> 26#include <linux/sysctl.h>
28#include <linux/ctype.h> 27#include <linux/ctype.h>
@@ -898,36 +897,6 @@ static struct dyn_ftrace *ftrace_free_records;
898 } \ 897 } \
899 } 898 }
900 899
901#ifdef CONFIG_KPROBES
902
903static int frozen_record_count;
904
905static inline void freeze_record(struct dyn_ftrace *rec)
906{
907 if (!(rec->flags & FTRACE_FL_FROZEN)) {
908 rec->flags |= FTRACE_FL_FROZEN;
909 frozen_record_count++;
910 }
911}
912
913static inline void unfreeze_record(struct dyn_ftrace *rec)
914{
915 if (rec->flags & FTRACE_FL_FROZEN) {
916 rec->flags &= ~FTRACE_FL_FROZEN;
917 frozen_record_count--;
918 }
919}
920
921static inline int record_frozen(struct dyn_ftrace *rec)
922{
923 return rec->flags & FTRACE_FL_FROZEN;
924}
925#else
926# define freeze_record(rec) ({ 0; })
927# define unfreeze_record(rec) ({ 0; })
928# define record_frozen(rec) ({ 0; })
929#endif /* CONFIG_KPROBES */
930
931static void ftrace_free_rec(struct dyn_ftrace *rec) 900static void ftrace_free_rec(struct dyn_ftrace *rec)
932{ 901{
933 rec->freelist = ftrace_free_records; 902 rec->freelist = ftrace_free_records;
@@ -1025,6 +994,21 @@ static void ftrace_bug(int failed, unsigned long ip)
1025} 994}
1026 995
1027 996
997/* Return 1 if the address range is reserved for ftrace */
998int ftrace_text_reserved(void *start, void *end)
999{
1000 struct dyn_ftrace *rec;
1001 struct ftrace_page *pg;
1002
1003 do_for_each_ftrace_rec(pg, rec) {
1004 if (rec->ip <= (unsigned long)end &&
1005 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1006 return 1;
1007 } while_for_each_ftrace_rec();
1008 return 0;
1009}
1010
1011
1028static int 1012static int
1029__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 1013__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1030{ 1014{
@@ -1076,14 +1060,6 @@ static void ftrace_replace_code(int enable)
1076 !(rec->flags & FTRACE_FL_CONVERTED)) 1060 !(rec->flags & FTRACE_FL_CONVERTED))
1077 continue; 1061 continue;
1078 1062
1079 /* ignore updates to this record's mcount site */
1080 if (get_kprobe((void *)rec->ip)) {
1081 freeze_record(rec);
1082 continue;
1083 } else {
1084 unfreeze_record(rec);
1085 }
1086
1087 failed = __ftrace_replace_code(rec, enable); 1063 failed = __ftrace_replace_code(rec, enable);
1088 if (failed) { 1064 if (failed) {
1089 rec->flags |= FTRACE_FL_FAILED; 1065 rec->flags |= FTRACE_FL_FAILED;
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 9e25573242cf..f0d693005075 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -6,14 +6,12 @@
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/kprobes.h>
9#include "trace.h" 10#include "trace.h"
10 11
11 12
12char *perf_trace_buf; 13static char *perf_trace_buf;
13EXPORT_SYMBOL_GPL(perf_trace_buf); 14static char *perf_trace_buf_nmi;
14
15char *perf_trace_buf_nmi;
16EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
17 15
18typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; 16typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
19 17
@@ -120,3 +118,47 @@ void ftrace_profile_disable(int event_id)
120 } 118 }
121 mutex_unlock(&event_mutex); 119 mutex_unlock(&event_mutex);
122} 120}
121
122__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
123 int *rctxp, unsigned long *irq_flags)
124{
125 struct trace_entry *entry;
126 char *trace_buf, *raw_data;
127 int pc, cpu;
128
129 pc = preempt_count();
130
131 /* Protect the per cpu buffer, begin the rcu read side */
132 local_irq_save(*irq_flags);
133
134 *rctxp = perf_swevent_get_recursion_context();
135 if (*rctxp < 0)
136 goto err_recursion;
137
138 cpu = smp_processor_id();
139
140 if (in_nmi())
141 trace_buf = rcu_dereference(perf_trace_buf_nmi);
142 else
143 trace_buf = rcu_dereference(perf_trace_buf);
144
145 if (!trace_buf)
146 goto err;
147
148 raw_data = per_cpu_ptr(trace_buf, cpu);
149
150 /* zero the dead bytes from align to not leak stack to user */
151 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
152
153 entry = (struct trace_entry *)raw_data;
154 tracing_generic_entry_update(entry, *irq_flags, pc);
155 entry->type = type;
156
157 return raw_data;
158err:
159 perf_swevent_put_recursion_context(*rctxp);
160err_recursion:
161 local_irq_restore(*irq_flags);
162 return NULL;
163}
164EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index e42af9aad69f..4615f62a04f1 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1371,7 +1371,7 @@ out_unlock:
1371 return err; 1371 return err;
1372} 1372}
1373 1373
1374#ifdef CONFIG_EVENT_PROFILE 1374#ifdef CONFIG_PERF_EVENTS
1375 1375
1376void ftrace_profile_free_filter(struct perf_event *event) 1376void ftrace_profile_free_filter(struct perf_event *event)
1377{ 1377{
@@ -1439,5 +1439,5 @@ out_unlock:
1439 return err; 1439 return err;
1440} 1440}
1441 1441
1442#endif /* CONFIG_EVENT_PROFILE */ 1442#endif /* CONFIG_PERF_EVENTS */
1443 1443
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 6ea90c0e2c96..356c10227c98 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -91,11 +91,6 @@ static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
91 return retval; 91 return retval;
92} 92}
93 93
94static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
95{
96 return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
97}
98
99static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, 94static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
100 void *dummy) 95 void *dummy)
101{ 96{
@@ -231,9 +226,7 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
231{ 226{
232 int ret = -EINVAL; 227 int ret = -EINVAL;
233 228
234 if (ff->func == fetch_argument) 229 if (ff->func == fetch_register) {
235 ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
236 else if (ff->func == fetch_register) {
237 const char *name; 230 const char *name;
238 name = regs_query_register_name((unsigned int)((long)ff->data)); 231 name = regs_query_register_name((unsigned int)((long)ff->data));
239 ret = snprintf(buf, n, "%%%s", name); 232 ret = snprintf(buf, n, "%%%s", name);
@@ -489,14 +482,6 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
489 } 482 }
490 } else 483 } else
491 ret = -EINVAL; 484 ret = -EINVAL;
492 } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
493 ret = strict_strtoul(arg + 3, 10, &param);
494 if (ret || param > PARAM_MAX_ARGS)
495 ret = -EINVAL;
496 else {
497 ff->func = fetch_argument;
498 ff->data = (void *)param;
499 }
500 } else 485 } else
501 ret = -EINVAL; 486 ret = -EINVAL;
502 return ret; 487 return ret;
@@ -611,7 +596,6 @@ static int create_trace_probe(int argc, char **argv)
611 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] 596 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
612 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] 597 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
613 * Fetch args: 598 * Fetch args:
614 * $argN : fetch Nth of function argument. (N:0-)
615 * $retval : fetch return value 599 * $retval : fetch return value
616 * $stack : fetch stack address 600 * $stack : fetch stack address
617 * $stackN : fetch Nth of stack (N:0-) 601 * $stackN : fetch Nth of stack (N:0-)
@@ -689,7 +673,7 @@ static int create_trace_probe(int argc, char **argv)
689 return -EINVAL; 673 return -EINVAL;
690 } 674 }
691 /* an address specified */ 675 /* an address specified */
692 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); 676 ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
693 if (ret) { 677 if (ret) {
694 pr_info("Failed to parse address.\n"); 678 pr_info("Failed to parse address.\n");
695 return ret; 679 return ret;
@@ -958,7 +942,7 @@ static const struct file_operations kprobe_profile_ops = {
958}; 942};
959 943
960/* Kprobe handler */ 944/* Kprobe handler */
961static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) 945static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
962{ 946{
963 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 947 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
964 struct kprobe_trace_entry *entry; 948 struct kprobe_trace_entry *entry;
@@ -978,7 +962,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
978 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 962 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
979 irq_flags, pc); 963 irq_flags, pc);
980 if (!event) 964 if (!event)
981 return 0; 965 return;
982 966
983 entry = ring_buffer_event_data(event); 967 entry = ring_buffer_event_data(event);
984 entry->nargs = tp->nr_args; 968 entry->nargs = tp->nr_args;
@@ -988,11 +972,10 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
988 972
989 if (!filter_current_check_discard(buffer, call, entry, event)) 973 if (!filter_current_check_discard(buffer, call, entry, event))
990 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 974 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
991 return 0;
992} 975}
993 976
994/* Kretprobe handler */ 977/* Kretprobe handler */
995static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, 978static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
996 struct pt_regs *regs) 979 struct pt_regs *regs)
997{ 980{
998 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 981 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
@@ -1011,7 +994,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
1011 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 994 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
1012 irq_flags, pc); 995 irq_flags, pc);
1013 if (!event) 996 if (!event)
1014 return 0; 997 return;
1015 998
1016 entry = ring_buffer_event_data(event); 999 entry = ring_buffer_event_data(event);
1017 entry->nargs = tp->nr_args; 1000 entry->nargs = tp->nr_args;
@@ -1022,8 +1005,6 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
1022 1005
1023 if (!filter_current_check_discard(buffer, call, entry, event)) 1006 if (!filter_current_check_discard(buffer, call, entry, event))
1024 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); 1007 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
1025
1026 return 0;
1027} 1008}
1028 1009
1029/* Event entry printers */ 1010/* Event entry printers */
@@ -1250,137 +1231,67 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call,
1250 ", REC->" FIELD_STRING_RETIP); 1231 ", REC->" FIELD_STRING_RETIP);
1251} 1232}
1252 1233
1253#ifdef CONFIG_EVENT_PROFILE 1234#ifdef CONFIG_PERF_EVENTS
1254 1235
1255/* Kprobe profile handler */ 1236/* Kprobe profile handler */
1256static __kprobes int kprobe_profile_func(struct kprobe *kp, 1237static __kprobes void kprobe_profile_func(struct kprobe *kp,
1257 struct pt_regs *regs) 1238 struct pt_regs *regs)
1258{ 1239{
1259 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1240 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1260 struct ftrace_event_call *call = &tp->call; 1241 struct ftrace_event_call *call = &tp->call;
1261 struct kprobe_trace_entry *entry; 1242 struct kprobe_trace_entry *entry;
1262 struct trace_entry *ent; 1243 int size, __size, i;
1263 int size, __size, i, pc, __cpu;
1264 unsigned long irq_flags; 1244 unsigned long irq_flags;
1265 char *trace_buf;
1266 char *raw_data;
1267 int rctx; 1245 int rctx;
1268 1246
1269 pc = preempt_count();
1270 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1247 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1271 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1248 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1272 size -= sizeof(u32); 1249 size -= sizeof(u32);
1273 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1250 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1274 "profile buffer not large enough")) 1251 "profile buffer not large enough"))
1275 return 0; 1252 return;
1276
1277 /*
1278 * Protect the non nmi buffer
1279 * This also protects the rcu read side
1280 */
1281 local_irq_save(irq_flags);
1282 1253
1283 rctx = perf_swevent_get_recursion_context(); 1254 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
1284 if (rctx < 0) 1255 if (!entry)
1285 goto end_recursion; 1256 return;
1286
1287 __cpu = smp_processor_id();
1288
1289 if (in_nmi())
1290 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1291 else
1292 trace_buf = rcu_dereference(perf_trace_buf);
1293
1294 if (!trace_buf)
1295 goto end;
1296
1297 raw_data = per_cpu_ptr(trace_buf, __cpu);
1298
1299 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1300 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1301 entry = (struct kprobe_trace_entry *)raw_data;
1302 ent = &entry->ent;
1303 1257
1304 tracing_generic_entry_update(ent, irq_flags, pc);
1305 ent->type = call->id;
1306 entry->nargs = tp->nr_args; 1258 entry->nargs = tp->nr_args;
1307 entry->ip = (unsigned long)kp->addr; 1259 entry->ip = (unsigned long)kp->addr;
1308 for (i = 0; i < tp->nr_args; i++) 1260 for (i = 0; i < tp->nr_args; i++)
1309 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1261 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1310 perf_tp_event(call->id, entry->ip, 1, entry, size);
1311 1262
1312end: 1263 ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags);
1313 perf_swevent_put_recursion_context(rctx);
1314end_recursion:
1315 local_irq_restore(irq_flags);
1316
1317 return 0;
1318} 1264}
1319 1265
1320/* Kretprobe profile handler */ 1266/* Kretprobe profile handler */
1321static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, 1267static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1322 struct pt_regs *regs) 1268 struct pt_regs *regs)
1323{ 1269{
1324 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1270 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1325 struct ftrace_event_call *call = &tp->call; 1271 struct ftrace_event_call *call = &tp->call;
1326 struct kretprobe_trace_entry *entry; 1272 struct kretprobe_trace_entry *entry;
1327 struct trace_entry *ent; 1273 int size, __size, i;
1328 int size, __size, i, pc, __cpu;
1329 unsigned long irq_flags; 1274 unsigned long irq_flags;
1330 char *trace_buf;
1331 char *raw_data;
1332 int rctx; 1275 int rctx;
1333 1276
1334 pc = preempt_count();
1335 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1277 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1336 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1278 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1337 size -= sizeof(u32); 1279 size -= sizeof(u32);
1338 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1280 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1339 "profile buffer not large enough")) 1281 "profile buffer not large enough"))
1340 return 0; 1282 return;
1341
1342 /*
1343 * Protect the non nmi buffer
1344 * This also protects the rcu read side
1345 */
1346 local_irq_save(irq_flags);
1347
1348 rctx = perf_swevent_get_recursion_context();
1349 if (rctx < 0)
1350 goto end_recursion;
1351
1352 __cpu = smp_processor_id();
1353
1354 if (in_nmi())
1355 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1356 else
1357 trace_buf = rcu_dereference(perf_trace_buf);
1358
1359 if (!trace_buf)
1360 goto end;
1361
1362 raw_data = per_cpu_ptr(trace_buf, __cpu);
1363 1283
1364 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1284 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
1365 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1285 if (!entry)
1366 entry = (struct kretprobe_trace_entry *)raw_data; 1286 return;
1367 ent = &entry->ent;
1368 1287
1369 tracing_generic_entry_update(ent, irq_flags, pc);
1370 ent->type = call->id;
1371 entry->nargs = tp->nr_args; 1288 entry->nargs = tp->nr_args;
1372 entry->func = (unsigned long)tp->rp.kp.addr; 1289 entry->func = (unsigned long)tp->rp.kp.addr;
1373 entry->ret_ip = (unsigned long)ri->ret_addr; 1290 entry->ret_ip = (unsigned long)ri->ret_addr;
1374 for (i = 0; i < tp->nr_args; i++) 1291 for (i = 0; i < tp->nr_args; i++)
1375 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1292 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1376 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1377
1378end:
1379 perf_swevent_put_recursion_context(rctx);
1380end_recursion:
1381 local_irq_restore(irq_flags);
1382 1293
1383 return 0; 1294 ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags);
1384} 1295}
1385 1296
1386static int probe_profile_enable(struct ftrace_event_call *call) 1297static int probe_profile_enable(struct ftrace_event_call *call)
@@ -1408,7 +1319,7 @@ static void probe_profile_disable(struct ftrace_event_call *call)
1408 disable_kprobe(&tp->rp.kp); 1319 disable_kprobe(&tp->rp.kp);
1409 } 1320 }
1410} 1321}
1411#endif /* CONFIG_EVENT_PROFILE */ 1322#endif /* CONFIG_PERF_EVENTS */
1412 1323
1413 1324
1414static __kprobes 1325static __kprobes
@@ -1418,10 +1329,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1418 1329
1419 if (tp->flags & TP_FLAG_TRACE) 1330 if (tp->flags & TP_FLAG_TRACE)
1420 kprobe_trace_func(kp, regs); 1331 kprobe_trace_func(kp, regs);
1421#ifdef CONFIG_EVENT_PROFILE 1332#ifdef CONFIG_PERF_EVENTS
1422 if (tp->flags & TP_FLAG_PROFILE) 1333 if (tp->flags & TP_FLAG_PROFILE)
1423 kprobe_profile_func(kp, regs); 1334 kprobe_profile_func(kp, regs);
1424#endif /* CONFIG_EVENT_PROFILE */ 1335#endif
1425 return 0; /* We don't tweek kernel, so just return 0 */ 1336 return 0; /* We don't tweek kernel, so just return 0 */
1426} 1337}
1427 1338
@@ -1432,10 +1343,10 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1432 1343
1433 if (tp->flags & TP_FLAG_TRACE) 1344 if (tp->flags & TP_FLAG_TRACE)
1434 kretprobe_trace_func(ri, regs); 1345 kretprobe_trace_func(ri, regs);
1435#ifdef CONFIG_EVENT_PROFILE 1346#ifdef CONFIG_PERF_EVENTS
1436 if (tp->flags & TP_FLAG_PROFILE) 1347 if (tp->flags & TP_FLAG_PROFILE)
1437 kretprobe_profile_func(ri, regs); 1348 kretprobe_profile_func(ri, regs);
1438#endif /* CONFIG_EVENT_PROFILE */ 1349#endif
1439 return 0; /* We don't tweek kernel, so just return 0 */ 1350 return 0; /* We don't tweek kernel, so just return 0 */
1440} 1351}
1441 1352
@@ -1464,7 +1375,7 @@ static int register_probe_event(struct trace_probe *tp)
1464 call->regfunc = probe_event_enable; 1375 call->regfunc = probe_event_enable;
1465 call->unregfunc = probe_event_disable; 1376 call->unregfunc = probe_event_disable;
1466 1377
1467#ifdef CONFIG_EVENT_PROFILE 1378#ifdef CONFIG_PERF_EVENTS
1468 call->profile_enable = probe_profile_enable; 1379 call->profile_enable = probe_profile_enable;
1469 call->profile_disable = probe_profile_disable; 1380 call->profile_disable = probe_profile_disable;
1470#endif 1381#endif
@@ -1523,28 +1434,67 @@ static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1523 1434
1524static __init int kprobe_trace_self_tests_init(void) 1435static __init int kprobe_trace_self_tests_init(void)
1525{ 1436{
1526 int ret; 1437 int ret, warn = 0;
1527 int (*target)(int, int, int, int, int, int); 1438 int (*target)(int, int, int, int, int, int);
1439 struct trace_probe *tp;
1528 1440
1529 target = kprobe_trace_selftest_target; 1441 target = kprobe_trace_selftest_target;
1530 1442
1531 pr_info("Testing kprobe tracing: "); 1443 pr_info("Testing kprobe tracing: ");
1532 1444
1533 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " 1445 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1534 "$arg1 $arg2 $arg3 $arg4 $stack $stack0"); 1446 "$stack $stack0 +0($stack)");
1535 if (WARN_ON_ONCE(ret)) 1447 if (WARN_ON_ONCE(ret)) {
1536 pr_warning("error enabling function entry\n"); 1448 pr_warning("error on probing function entry.\n");
1449 warn++;
1450 } else {
1451 /* Enable trace point */
1452 tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
1453 if (WARN_ON_ONCE(tp == NULL)) {
1454 pr_warning("error on getting new probe.\n");
1455 warn++;
1456 } else
1457 probe_event_enable(&tp->call);
1458 }
1537 1459
1538 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " 1460 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1539 "$retval"); 1461 "$retval");
1540 if (WARN_ON_ONCE(ret)) 1462 if (WARN_ON_ONCE(ret)) {
1541 pr_warning("error enabling function return\n"); 1463 pr_warning("error on probing function return.\n");
1464 warn++;
1465 } else {
1466 /* Enable trace point */
1467 tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
1468 if (WARN_ON_ONCE(tp == NULL)) {
1469 pr_warning("error on getting new probe.\n");
1470 warn++;
1471 } else
1472 probe_event_enable(&tp->call);
1473 }
1474
1475 if (warn)
1476 goto end;
1542 1477
1543 ret = target(1, 2, 3, 4, 5, 6); 1478 ret = target(1, 2, 3, 4, 5, 6);
1544 1479
1545 cleanup_all_probes(); 1480 ret = command_trace_probe("-:testprobe");
1481 if (WARN_ON_ONCE(ret)) {
1482 pr_warning("error on deleting a probe.\n");
1483 warn++;
1484 }
1485
1486 ret = command_trace_probe("-:testprobe2");
1487 if (WARN_ON_ONCE(ret)) {
1488 pr_warning("error on deleting a probe.\n");
1489 warn++;
1490 }
1546 1491
1547 pr_cont("OK\n"); 1492end:
1493 cleanup_all_probes();
1494 if (warn)
1495 pr_cont("NG: Some tests are failed. Please check them.\n");
1496 else
1497 pr_cont("OK\n");
1548 return 0; 1498 return 0;
1549} 1499}
1550 1500
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 678a5120ee30..f4bc9b27de5f 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
157 unsigned long val, flags; 157 unsigned long val, flags;
158 char buf[64]; 158 char buf[64];
159 int ret; 159 int ret;
160 int cpu;
160 161
161 if (count >= sizeof(buf)) 162 if (count >= sizeof(buf))
162 return -EINVAL; 163 return -EINVAL;
@@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
171 return ret; 172 return ret;
172 173
173 local_irq_save(flags); 174 local_irq_save(flags);
175
176 /*
177 * In case we trace inside arch_spin_lock() or after (NMI),
178 * we will cause circular lock, so we also need to increase
179 * the percpu trace_active here.
180 */
181 cpu = smp_processor_id();
182 per_cpu(trace_active, cpu)++;
183
174 arch_spin_lock(&max_stack_lock); 184 arch_spin_lock(&max_stack_lock);
175 *ptr = val; 185 *ptr = val;
176 arch_spin_unlock(&max_stack_lock); 186 arch_spin_unlock(&max_stack_lock);
187
188 per_cpu(trace_active, cpu)--;
177 local_irq_restore(flags); 189 local_irq_restore(flags);
178 190
179 return count; 191 return count;
@@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
206 218
207static void *t_start(struct seq_file *m, loff_t *pos) 219static void *t_start(struct seq_file *m, loff_t *pos)
208{ 220{
221 int cpu;
222
209 local_irq_disable(); 223 local_irq_disable();
224
225 cpu = smp_processor_id();
226 per_cpu(trace_active, cpu)++;
227
210 arch_spin_lock(&max_stack_lock); 228 arch_spin_lock(&max_stack_lock);
211 229
212 if (*pos == 0) 230 if (*pos == 0)
@@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
217 235
218static void t_stop(struct seq_file *m, void *p) 236static void t_stop(struct seq_file *m, void *p)
219{ 237{
238 int cpu;
239
220 arch_spin_unlock(&max_stack_lock); 240 arch_spin_unlock(&max_stack_lock);
241
242 cpu = smp_processor_id();
243 per_cpu(trace_active, cpu)--;
244
221 local_irq_enable(); 245 local_irq_enable();
222} 246}
223 247
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 75289f372dd2..4e332b9e449c 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -421,7 +421,7 @@ int __init init_ftrace_syscalls(void)
421} 421}
422core_initcall(init_ftrace_syscalls); 422core_initcall(init_ftrace_syscalls);
423 423
424#ifdef CONFIG_EVENT_PROFILE 424#ifdef CONFIG_PERF_EVENTS
425 425
426static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); 426static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
427static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); 427static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
@@ -433,12 +433,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
433 struct syscall_metadata *sys_data; 433 struct syscall_metadata *sys_data;
434 struct syscall_trace_enter *rec; 434 struct syscall_trace_enter *rec;
435 unsigned long flags; 435 unsigned long flags;
436 char *trace_buf;
437 char *raw_data;
438 int syscall_nr; 436 int syscall_nr;
439 int rctx; 437 int rctx;
440 int size; 438 int size;
441 int cpu;
442 439
443 syscall_nr = syscall_get_nr(current, regs); 440 syscall_nr = syscall_get_nr(current, regs);
444 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) 441 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
@@ -457,37 +454,15 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
457 "profile buffer not large enough")) 454 "profile buffer not large enough"))
458 return; 455 return;
459 456
460 /* Protect the per cpu buffer, begin the rcu read side */ 457 rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
461 local_irq_save(flags); 458 sys_data->enter_event->id, &rctx, &flags);
462 459 if (!rec)
463 rctx = perf_swevent_get_recursion_context(); 460 return;
464 if (rctx < 0)
465 goto end_recursion;
466
467 cpu = smp_processor_id();
468
469 trace_buf = rcu_dereference(perf_trace_buf);
470
471 if (!trace_buf)
472 goto end;
473
474 raw_data = per_cpu_ptr(trace_buf, cpu);
475
476 /* zero the dead bytes from align to not leak stack to user */
477 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
478 461
479 rec = (struct syscall_trace_enter *) raw_data;
480 tracing_generic_entry_update(&rec->ent, 0, 0);
481 rec->ent.type = sys_data->enter_event->id;
482 rec->nr = syscall_nr; 462 rec->nr = syscall_nr;
483 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 463 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
484 (unsigned long *)&rec->args); 464 (unsigned long *)&rec->args);
485 perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size); 465 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
486
487end:
488 perf_swevent_put_recursion_context(rctx);
489end_recursion:
490 local_irq_restore(flags);
491} 466}
492 467
493int prof_sysenter_enable(struct ftrace_event_call *call) 468int prof_sysenter_enable(struct ftrace_event_call *call)
@@ -531,11 +506,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
531 struct syscall_trace_exit *rec; 506 struct syscall_trace_exit *rec;
532 unsigned long flags; 507 unsigned long flags;
533 int syscall_nr; 508 int syscall_nr;
534 char *trace_buf;
535 char *raw_data;
536 int rctx; 509 int rctx;
537 int size; 510 int size;
538 int cpu;
539 511
540 syscall_nr = syscall_get_nr(current, regs); 512 syscall_nr = syscall_get_nr(current, regs);
541 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) 513 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
@@ -557,38 +529,15 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
557 "exit event has grown above profile buffer size")) 529 "exit event has grown above profile buffer size"))
558 return; 530 return;
559 531
560 /* Protect the per cpu buffer, begin the rcu read side */ 532 rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
561 local_irq_save(flags); 533 sys_data->exit_event->id, &rctx, &flags);
562 534 if (!rec)
563 rctx = perf_swevent_get_recursion_context(); 535 return;
564 if (rctx < 0)
565 goto end_recursion;
566
567 cpu = smp_processor_id();
568
569 trace_buf = rcu_dereference(perf_trace_buf);
570
571 if (!trace_buf)
572 goto end;
573
574 raw_data = per_cpu_ptr(trace_buf, cpu);
575
576 /* zero the dead bytes from align to not leak stack to user */
577 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
578
579 rec = (struct syscall_trace_exit *)raw_data;
580 536
581 tracing_generic_entry_update(&rec->ent, 0, 0);
582 rec->ent.type = sys_data->exit_event->id;
583 rec->nr = syscall_nr; 537 rec->nr = syscall_nr;
584 rec->ret = syscall_get_return_value(current, regs); 538 rec->ret = syscall_get_return_value(current, regs);
585 539
586 perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size); 540 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
587
588end:
589 perf_swevent_put_recursion_context(rctx);
590end_recursion:
591 local_irq_restore(flags);
592} 541}
593 542
594int prof_sysexit_enable(struct ftrace_event_call *call) 543int prof_sysexit_enable(struct ftrace_event_call *call)
@@ -626,6 +575,5 @@ void prof_sysexit_disable(struct ftrace_event_call *call)
626 mutex_unlock(&syscall_trace_lock); 575 mutex_unlock(&syscall_trace_lock);
627} 576}
628 577
629#endif 578#endif /* CONFIG_PERF_EVENTS */
630
631 579
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 25c3ed594c54..4dc24cc13f5c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -520,6 +520,14 @@ config LOCK_STAT
520 520
521 For more details, see Documentation/lockstat.txt 521 For more details, see Documentation/lockstat.txt
522 522
523 This also enables lock events required by "perf lock",
524 subcommand of perf.
525 If you want to use "perf lock", you also need to turn on
526 CONFIG_EVENT_TRACING.
527
528 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
529 (CONFIG_LOCKDEP defines "acquire" and "release" events.)
530
523config DEBUG_LOCKDEP 531config DEBUG_LOCKDEP
524 bool "Lock dependency engine debugging" 532 bool "Lock dependency engine debugging"
525 depends on DEBUG_KERNEL && LOCKDEP 533 depends on DEBUG_KERNEL && LOCKDEP
diff --git a/lib/idr.c b/lib/idr.c
index ba7d37cf7847..0dc782216d4b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -140,7 +140,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
140 id = *starting_id; 140 id = *starting_id;
141 restart: 141 restart:
142 p = idp->top; 142 p = idp->top;
143 l = p->layer; 143 l = idp->layers;
144 pa[l--] = NULL;
144 while (1) { 145 while (1) {
145 /* 146 /*
146 * We run around this while until we reach the leaf node... 147 * We run around this while until we reach the leaf node...
@@ -154,11 +155,13 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
154 oid = id; 155 oid = id;
155 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
156 157
157 /* did id go over the limit? */ 158 /* if already at the top layer, we need to grow */
158 if (id >= (1 << (idp->layers * IDR_BITS))) { 159 if (id >= 1 << (idp->layers * IDR_BITS)) {
159 *starting_id = id; 160 *starting_id = id;
160 return IDR_NEED_TO_GROW; 161 return IDR_NEED_TO_GROW;
161 } 162 }
163 p = pa[l];
164 BUG_ON(!p);
162 165
163 /* If we need to go up one layer, continue the 166 /* If we need to go up one layer, continue the
164 * loop; otherwise, restart from the top. 167 * loop; otherwise, restart from the top.
diff --git a/mm/migrate.c b/mm/migrate.c
index efddbf0926b2..880bd592d38e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -912,6 +912,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
912 goto out_pm; 912 goto out_pm;
913 913
914 err = -ENODEV; 914 err = -ENODEV;
915 if (node < 0 || node >= MAX_NUMNODES)
916 goto out_pm;
917
915 if (!node_state(node, N_HIGH_MEMORY)) 918 if (!node_state(node, N_HIGH_MEMORY))
916 goto out_pm; 919 goto out_pm;
917 920
@@ -999,33 +1002,27 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
999#define DO_PAGES_STAT_CHUNK_NR 16 1002#define DO_PAGES_STAT_CHUNK_NR 16
1000 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1003 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1001 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1004 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1002 unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1003 int err;
1004 1005
1005 for (i = 0; i < nr_pages; i += chunk_nr) { 1006 while (nr_pages) {
1006 if (chunk_nr > nr_pages - i) 1007 unsigned long chunk_nr;
1007 chunk_nr = nr_pages - i;
1008 1008
1009 err = copy_from_user(chunk_pages, &pages[i], 1009 chunk_nr = nr_pages;
1010 chunk_nr * sizeof(*chunk_pages)); 1010 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1011 if (err) { 1011 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1012 err = -EFAULT; 1012
1013 goto out; 1013 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1014 } 1014 break;
1015 1015
1016 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1016 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1017 1017
1018 err = copy_to_user(&status[i], chunk_status, 1018 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1019 chunk_nr * sizeof(*chunk_status)); 1019 break;
1020 if (err) {
1021 err = -EFAULT;
1022 goto out;
1023 }
1024 }
1025 err = 0;
1026 1020
1027out: 1021 pages += chunk_nr;
1028 return err; 1022 status += chunk_nr;
1023 nr_pages -= chunk_nr;
1024 }
1025 return nr_pages ? -EFAULT : 0;
1029} 1026}
1030 1027
1031/* 1028/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f52481b1c1e5..237050478f28 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -459,6 +459,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
459 list_for_each_entry(c, &p->children, sibling) { 459 list_for_each_entry(c, &p->children, sibling) {
460 if (c->mm == p->mm) 460 if (c->mm == p->mm)
461 continue; 461 continue;
462 if (mem && !task_in_mem_cgroup(c, mem))
463 continue;
462 if (!oom_kill_task(c)) 464 if (!oom_kill_task(c))
463 return 0; 465 return 0;
464 } 466 }
diff --git a/net/9p/client.c b/net/9p/client.c
index 8af95b2dddd6..09d4f1e2e4a8 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
69 69
70static int parse_opts(char *opts, struct p9_client *clnt) 70static int parse_opts(char *opts, struct p9_client *clnt)
71{ 71{
72 char *options; 72 char *options, *tmp_options;
73 char *p; 73 char *p;
74 substring_t args[MAX_OPT_ARGS]; 74 substring_t args[MAX_OPT_ARGS];
75 int option; 75 int option;
@@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
81 if (!opts) 81 if (!opts)
82 return 0; 82 return 0;
83 83
84 options = kstrdup(opts, GFP_KERNEL); 84 tmp_options = kstrdup(opts, GFP_KERNEL);
85 if (!options) { 85 if (!tmp_options) {
86 P9_DPRINTK(P9_DEBUG_ERROR, 86 P9_DPRINTK(P9_DEBUG_ERROR,
87 "failed to allocate copy of option string\n"); 87 "failed to allocate copy of option string\n");
88 return -ENOMEM; 88 return -ENOMEM;
89 } 89 }
90 options = tmp_options;
90 91
91 while ((p = strsep(&options, ",")) != NULL) { 92 while ((p = strsep(&options, ",")) != NULL) {
92 int token; 93 int token;
@@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
108 break; 109 break;
109 case Opt_trans: 110 case Opt_trans:
110 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); 111 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
112 if(clnt->trans_mod == NULL) {
113 P9_DPRINTK(P9_DEBUG_ERROR,
114 "Could not find request transport: %s\n",
115 (char *) &args[0]);
116 ret = -EINVAL;
117 goto free_and_return;
118 }
111 break; 119 break;
112 case Opt_legacy: 120 case Opt_legacy:
113 clnt->dotu = 0; 121 clnt->dotu = 0;
@@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
117 } 125 }
118 } 126 }
119 127
120 kfree(options); 128free_and_return:
129 kfree(tmp_options);
121 return ret; 130 return ret;
122} 131}
123 132
@@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
667 clnt->trans = NULL; 676 clnt->trans = NULL;
668 spin_lock_init(&clnt->lock); 677 spin_lock_init(&clnt->lock);
669 INIT_LIST_HEAD(&clnt->fidlist); 678 INIT_LIST_HEAD(&clnt->fidlist);
670 clnt->fidpool = p9_idpool_create();
671 if (IS_ERR(clnt->fidpool)) {
672 err = PTR_ERR(clnt->fidpool);
673 clnt->fidpool = NULL;
674 goto error;
675 }
676 679
677 p9_tag_init(clnt); 680 p9_tag_init(clnt);
678 681
679 err = parse_opts(options, clnt); 682 err = parse_opts(options, clnt);
680 if (err < 0) 683 if (err < 0)
681 goto error; 684 goto free_client;
682 685
683 if (!clnt->trans_mod) 686 if (!clnt->trans_mod)
684 clnt->trans_mod = v9fs_get_default_trans(); 687 clnt->trans_mod = v9fs_get_default_trans();
@@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
687 err = -EPROTONOSUPPORT; 690 err = -EPROTONOSUPPORT;
688 P9_DPRINTK(P9_DEBUG_ERROR, 691 P9_DPRINTK(P9_DEBUG_ERROR,
689 "No transport defined or default transport\n"); 692 "No transport defined or default transport\n");
690 goto error; 693 goto free_client;
694 }
695
696 clnt->fidpool = p9_idpool_create();
697 if (IS_ERR(clnt->fidpool)) {
698 err = PTR_ERR(clnt->fidpool);
699 clnt->fidpool = NULL;
700 goto put_trans;
691 } 701 }
692 702
693 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", 703 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n",
@@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
695 705
696 err = clnt->trans_mod->create(clnt, dev_name, options); 706 err = clnt->trans_mod->create(clnt, dev_name, options);
697 if (err) 707 if (err)
698 goto error; 708 goto destroy_fidpool;
699 709
700 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) 710 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
701 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; 711 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
702 712
703 err = p9_client_version(clnt); 713 err = p9_client_version(clnt);
704 if (err) 714 if (err)
705 goto error; 715 goto close_trans;
706 716
707 return clnt; 717 return clnt;
708 718
709error: 719close_trans:
710 p9_client_destroy(clnt); 720 clnt->trans_mod->close(clnt);
721destroy_fidpool:
722 p9_idpool_destroy(clnt->fidpool);
723put_trans:
724 v9fs_put_trans(clnt->trans_mod);
725free_client:
726 kfree(clnt);
711 return ERR_PTR(err); 727 return ERR_PTR(err);
712} 728}
713EXPORT_SYMBOL(p9_client_create); 729EXPORT_SYMBOL(p9_client_create);
@@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
1214{ 1230{
1215 int ret; 1231 int ret;
1216 1232
1233 /* NOTE: size shouldn't include its own length */
1217 /* size[2] type[2] dev[4] qid[13] */ 1234 /* size[2] type[2] dev[4] qid[13] */
1218 /* mode[4] atime[4] mtime[4] length[8]*/ 1235 /* mode[4] atime[4] mtime[4] length[8]*/
1219 /* name[s] uid[s] gid[s] muid[s] */ 1236 /* name[s] uid[s] gid[s] muid[s] */
1220 ret = 2+2+4+13+4+4+4+8+2+2+2+2; 1237 ret = 2+4+13+4+4+4+8+2+2+2+2;
1221 1238
1222 if (wst->name) 1239 if (wst->name)
1223 ret += strlen(wst->name); 1240 ret += strlen(wst->name);
@@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1258 wst->name, wst->uid, wst->gid, wst->muid, wst->extension, 1275 wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
1259 wst->n_uid, wst->n_gid, wst->n_muid); 1276 wst->n_uid, wst->n_gid, wst->n_muid);
1260 1277
1261 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); 1278 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
1262 if (IS_ERR(req)) { 1279 if (IS_ERR(req)) {
1263 err = PTR_ERR(req); 1280 err = PTR_ERR(req);
1264 goto error; 1281 goto error;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index be1cb909d8c0..31d0b05582a9 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
714 char *p; 714 char *p;
715 substring_t args[MAX_OPT_ARGS]; 715 substring_t args[MAX_OPT_ARGS];
716 int option; 716 int option;
717 char *options; 717 char *options, *tmp_options;
718 int ret; 718 int ret;
719 719
720 opts->port = P9_PORT; 720 opts->port = P9_PORT;
@@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
724 if (!params) 724 if (!params)
725 return 0; 725 return 0;
726 726
727 options = kstrdup(params, GFP_KERNEL); 727 tmp_options = kstrdup(params, GFP_KERNEL);
728 if (!options) { 728 if (!tmp_options) {
729 P9_DPRINTK(P9_DEBUG_ERROR, 729 P9_DPRINTK(P9_DEBUG_ERROR,
730 "failed to allocate copy of option string\n"); 730 "failed to allocate copy of option string\n");
731 return -ENOMEM; 731 return -ENOMEM;
732 } 732 }
733 options = tmp_options;
733 734
734 while ((p = strsep(&options, ",")) != NULL) { 735 while ((p = strsep(&options, ",")) != NULL) {
735 int token; 736 int token;
@@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
760 continue; 761 continue;
761 } 762 }
762 } 763 }
763 kfree(options); 764
765 kfree(tmp_options);
764 return 0; 766 return 0;
765} 767}
766 768
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 65cb29db03f8..2c95a89c0f46 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
166 char *p; 166 char *p;
167 substring_t args[MAX_OPT_ARGS]; 167 substring_t args[MAX_OPT_ARGS];
168 int option; 168 int option;
169 char *options; 169 char *options, *tmp_options;
170 int ret; 170 int ret;
171 171
172 opts->port = P9_PORT; 172 opts->port = P9_PORT;
@@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
177 if (!params) 177 if (!params)
178 return 0; 178 return 0;
179 179
180 options = kstrdup(params, GFP_KERNEL); 180 tmp_options = kstrdup(params, GFP_KERNEL);
181 if (!options) { 181 if (!tmp_options) {
182 P9_DPRINTK(P9_DEBUG_ERROR, 182 P9_DPRINTK(P9_DEBUG_ERROR,
183 "failed to allocate copy of option string\n"); 183 "failed to allocate copy of option string\n");
184 return -ENOMEM; 184 return -ENOMEM;
185 } 185 }
186 options = tmp_options;
186 187
187 while ((p = strsep(&options, ",")) != NULL) { 188 while ((p = strsep(&options, ",")) != NULL) {
188 int token; 189 int token;
@@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
216 } 217 }
217 /* RQ must be at least as large as the SQ */ 218 /* RQ must be at least as large as the SQ */
218 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); 219 opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
219 kfree(options); 220 kfree(tmp_options);
220 return 0; 221 return 0;
221} 222}
222 223
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index ea1e3daabefe..cb50f4ae5eef 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client)
102 struct virtio_chan *chan = client->trans; 102 struct virtio_chan *chan = client->trans;
103 103
104 mutex_lock(&virtio_9p_lock); 104 mutex_lock(&virtio_9p_lock);
105 chan->inuse = false; 105 if (chan)
106 chan->inuse = false;
106 mutex_unlock(&virtio_9p_lock); 107 mutex_unlock(&virtio_9p_lock);
107} 108}
108 109
@@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
311 } 312 }
312 313
313 client->trans = (void *)chan; 314 client->trans = (void *)chan;
315 client->status = Connected;
314 chan->client = client; 316 chan->client = client;
315 317
316 return 0; 318 return 0;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b7c4224f4e7d..b10e3cdb08f8 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
377 377
378 if (acl->state == BT_CONNECTED && 378 if (acl->state == BT_CONNECTED &&
379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
380 acl->power_save = 1;
381 hci_conn_enter_active_mode(acl);
382
380 if (lmp_esco_capable(hdev)) 383 if (lmp_esco_capable(hdev))
381 hci_setup_sync(sco, acl->handle); 384 hci_setup_sync(sco, acl->handle);
382 else 385 else
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 28517bad796c..592da5c909c1 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1699,6 +1699,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1699 break; 1699 break;
1700 1700
1701 case 0x1c: /* SCO interval rejected */ 1701 case 0x1c: /* SCO interval rejected */
1702 case 0x1a: /* Unsupported Remote Feature */
1702 case 0x1f: /* Unspecified error */ 1703 case 0x1f: /* Unspecified error */
1703 if (conn->out && conn->attempt < 2) { 1704 if (conn->out && conn->attempt < 2) {
1704 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 1705 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 6cf526d06e21..fc6ec1e72652 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -703,29 +703,9 @@ static void hidp_close(struct hid_device *hid)
703static int hidp_parse(struct hid_device *hid) 703static int hidp_parse(struct hid_device *hid)
704{ 704{
705 struct hidp_session *session = hid->driver_data; 705 struct hidp_session *session = hid->driver_data;
706 struct hidp_connadd_req *req = session->req;
707 unsigned char *buf;
708 int ret;
709
710 buf = kmalloc(req->rd_size, GFP_KERNEL);
711 if (!buf)
712 return -ENOMEM;
713
714 if (copy_from_user(buf, req->rd_data, req->rd_size)) {
715 kfree(buf);
716 return -EFAULT;
717 }
718
719 ret = hid_parse_report(session->hid, buf, req->rd_size);
720
721 kfree(buf);
722
723 if (ret)
724 return ret;
725
726 session->req = NULL;
727 706
728 return 0; 707 return hid_parse_report(session->hid, session->rd_data,
708 session->rd_size);
729} 709}
730 710
731static int hidp_start(struct hid_device *hid) 711static int hidp_start(struct hid_device *hid)
@@ -770,12 +750,24 @@ static int hidp_setup_hid(struct hidp_session *session,
770 bdaddr_t src, dst; 750 bdaddr_t src, dst;
771 int err; 751 int err;
772 752
753 session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
754 if (!session->rd_data)
755 return -ENOMEM;
756
757 if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
758 err = -EFAULT;
759 goto fault;
760 }
761 session->rd_size = req->rd_size;
762
773 hid = hid_allocate_device(); 763 hid = hid_allocate_device();
774 if (IS_ERR(hid)) 764 if (IS_ERR(hid)) {
775 return PTR_ERR(hid); 765 err = PTR_ERR(hid);
766 goto fault;
767 }
776 768
777 session->hid = hid; 769 session->hid = hid;
778 session->req = req; 770
779 hid->driver_data = session; 771 hid->driver_data = session;
780 772
781 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); 773 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -806,6 +798,10 @@ failed:
806 hid_destroy_device(hid); 798 hid_destroy_device(hid);
807 session->hid = NULL; 799 session->hid = NULL;
808 800
801fault:
802 kfree(session->rd_data);
803 session->rd_data = NULL;
804
809 return err; 805 return err;
810} 806}
811 807
@@ -900,6 +896,9 @@ unlink:
900 session->hid = NULL; 896 session->hid = NULL;
901 } 897 }
902 898
899 kfree(session->rd_data);
900 session->rd_data = NULL;
901
903purge: 902purge:
904 skb_queue_purge(&session->ctrl_transmit); 903 skb_queue_purge(&session->ctrl_transmit);
905 skb_queue_purge(&session->intr_transmit); 904 skb_queue_purge(&session->intr_transmit);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index faf3d74c3586..a4e215d50c10 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -154,7 +154,9 @@ struct hidp_session {
154 struct sk_buff_head ctrl_transmit; 154 struct sk_buff_head ctrl_transmit;
155 struct sk_buff_head intr_transmit; 155 struct sk_buff_head intr_transmit;
156 156
157 struct hidp_connadd_req *req; 157 /* Report descriptor */
158 __u8 *rd_data;
159 uint rd_size;
158}; 160};
159 161
160static inline void hidp_schedule(struct hidp_session *session) 162static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fc5ee3296e22..89f4a59eb82b 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg)
252 BT_DBG("session %p state %ld", s, s->state); 252 BT_DBG("session %p state %ld", s, s->state);
253 253
254 set_bit(RFCOMM_TIMED_OUT, &s->flags); 254 set_bit(RFCOMM_TIMED_OUT, &s->flags);
255 rfcomm_session_put(s);
256 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 255 rfcomm_schedule(RFCOMM_SCHED_TIMEO);
257} 256}
258 257
@@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1151 break; 1150 break;
1152 1151
1153 case BT_DISCONN: 1152 case BT_DISCONN:
1154 rfcomm_session_put(s); 1153 /* When socket is closed and we are not RFCOMM
1154 * initiator rfcomm_process_rx already calls
1155 * rfcomm_session_put() */
1156 if (s->sock->sk->sk_state != BT_CLOSED)
1157 rfcomm_session_put(s);
1155 break; 1158 break;
1156 } 1159 }
1157 } 1160 }
@@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void)
1920 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1923 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
1921 s->state = BT_DISCONN; 1924 s->state = BT_DISCONN;
1922 rfcomm_send_disc(s, 0); 1925 rfcomm_send_disc(s, 0);
1926 rfcomm_session_put(s);
1923 continue; 1927 continue;
1924 } 1928 }
1925 1929
diff --git a/net/core/dev.c b/net/core/dev.c
index be9924f60ec3..ec874218b206 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2761,7 +2761,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2761 switch (ret) { 2761 switch (ret) {
2762 case GRO_NORMAL: 2762 case GRO_NORMAL:
2763 case GRO_HELD: 2763 case GRO_HELD:
2764 skb->protocol = eth_type_trans(skb, napi->dev); 2764 skb->protocol = eth_type_trans(skb, skb->dev);
2765 2765
2766 if (ret == GRO_HELD) 2766 if (ret == GRO_HELD)
2767 skb_gro_pull(skb, -ETH_HLEN); 2767 skb_gro_pull(skb, -ETH_HLEN);
diff --git a/net/core/dst.c b/net/core/dst.c
index 57bc4d5b8d08..cb1b3488b739 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <net/net_namespace.h> 19#include <net/net_namespace.h>
20#include <linux/sched.h>
20 21
21#include <net/dst.h> 22#include <net/dst.h>
22 23
@@ -79,6 +80,7 @@ loop:
79 while ((dst = next) != NULL) { 80 while ((dst = next) != NULL) {
80 next = dst->next; 81 next = dst->next;
81 prefetch(&next->next); 82 prefetch(&next->next);
83 cond_resched();
82 if (likely(atomic_read(&dst->__refcnt))) { 84 if (likely(atomic_read(&dst->__refcnt))) {
83 last->next = dst; 85 last->next = dst;
84 last = dst; 86 last = dst;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d8aee584e8d1..236a9988ea91 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -927,6 +927,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
927 case ETHTOOL_GPERMADDR: 927 case ETHTOOL_GPERMADDR:
928 case ETHTOOL_GUFO: 928 case ETHTOOL_GUFO:
929 case ETHTOOL_GGSO: 929 case ETHTOOL_GGSO:
930 case ETHTOOL_GGRO:
930 case ETHTOOL_GFLAGS: 931 case ETHTOOL_GFLAGS:
931 case ETHTOOL_GPFLAGS: 932 case ETHTOOL_GPFLAGS:
932 case ETHTOOL_GRXFH: 933 case ETHTOOL_GRXFH:
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index fbc1c7472c5e..099c753c4213 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -410,7 +410,8 @@ static ssize_t wireless_show(struct device *d, char *buf,
410 const struct iw_statistics *iw; 410 const struct iw_statistics *iw;
411 ssize_t ret = -EINVAL; 411 ssize_t ret = -EINVAL;
412 412
413 rtnl_lock(); 413 if (!rtnl_trylock())
414 return restart_syscall();
414 if (dev_isalive(dev)) { 415 if (dev_isalive(dev)) {
415 iw = get_wireless_stats(dev); 416 iw = get_wireless_stats(dev);
416 if (iw) 417 if (iw)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index de0c2c726420..2e692afdc55d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3524,6 +3524,7 @@ static int pktgen_thread_worker(void *arg)
3524 wait_event_interruptible_timeout(t->queue, 3524 wait_event_interruptible_timeout(t->queue,
3525 t->control != 0, 3525 t->control != 0,
3526 HZ/10); 3526 HZ/10);
3527 try_to_freeze();
3527 continue; 3528 continue;
3528 } 3529 }
3529 3530
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 57dfb9c8c4f2..ff16e9df1969 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
83 va_list args; 83 va_list args;
84 84
85 va_start(args, fmt); 85 va_start(args, fmt);
86 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 86 vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
87 va_end(args); 87 va_end(args);
88 88
89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, 89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 269958bf7fe9..6df6f8ac9636 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -19,7 +19,9 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21
22#define CCID_MAX 255 22/* maximum value for a CCID (RFC 4340, 19.5) */
23#define CCID_MAX 255
24#define CCID_SLAB_NAME_LENGTH 32
23 25
24struct tcp_info; 26struct tcp_info;
25 27
@@ -49,8 +51,8 @@ struct ccid_operations {
49 const char *ccid_name; 51 const char *ccid_name;
50 struct kmem_cache *ccid_hc_rx_slab, 52 struct kmem_cache *ccid_hc_rx_slab,
51 *ccid_hc_tx_slab; 53 *ccid_hc_tx_slab;
52 char ccid_hc_rx_slab_name[32]; 54 char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
53 char ccid_hc_tx_slab_name[32]; 55 char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
54 __u32 ccid_hc_rx_obj_size, 56 __u32 ccid_hc_rx_obj_size,
55 ccid_hc_tx_obj_size; 57 ccid_hc_tx_obj_size;
56 /* Interface Routines */ 58 /* Interface Routines */
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index bace1d8cbcfd..f5b3464f1242 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -161,8 +161,8 @@ static __init int dccpprobe_init(void)
161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
162 goto err0; 162 goto err0;
163 163
164 ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), 164 try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
165 "dccp"); 165 "dccp");
166 if (ret) 166 if (ret)
167 goto err1; 167 goto err1;
168 168
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 040c4f05b653..26dec2be9615 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1317,14 +1317,19 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
1317{ 1317{
1318 int *valp = ctl->data; 1318 int *valp = ctl->data;
1319 int val = *valp; 1319 int val = *valp;
1320 loff_t pos = *ppos;
1320 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 1321 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1321 1322
1322 if (write && *valp != val) { 1323 if (write && *valp != val) {
1323 struct net *net = ctl->extra2; 1324 struct net *net = ctl->extra2;
1324 1325
1325 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) { 1326 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
1326 if (!rtnl_trylock()) 1327 if (!rtnl_trylock()) {
1328 /* Restore the original values before restarting */
1329 *valp = val;
1330 *ppos = pos;
1327 return restart_syscall(); 1331 return restart_syscall();
1332 }
1328 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { 1333 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
1329 inet_forward_change(net); 1334 inet_forward_change(net);
1330 } else if (*valp) { 1335 } else if (*valp) {
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 76c08402c933..a42f658e756a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -946,7 +946,6 @@ int igmp_rcv(struct sk_buff *skb)
946 break; 946 break;
947 case IGMP_HOST_MEMBERSHIP_REPORT: 947 case IGMP_HOST_MEMBERSHIP_REPORT:
948 case IGMPV2_HOST_MEMBERSHIP_REPORT: 948 case IGMPV2_HOST_MEMBERSHIP_REPORT:
949 case IGMPV3_HOST_MEMBERSHIP_REPORT:
950 /* Is it our report looped back? */ 949 /* Is it our report looped back? */
951 if (skb_rtable(skb)->fl.iif == 0) 950 if (skb_rtable(skb)->fl.iif == 0)
952 break; 951 break;
@@ -960,6 +959,7 @@ int igmp_rcv(struct sk_buff *skb)
960 in_dev_put(in_dev); 959 in_dev_put(in_dev);
961 return pim_rcv_v1(skb); 960 return pim_rcv_v1(skb);
962#endif 961#endif
962 case IGMPV3_HOST_MEMBERSHIP_REPORT:
963 case IGMP_DVMRP: 963 case IGMP_DVMRP:
964 case IGMP_TRACE: 964 case IGMP_TRACE:
965 case IGMP_HOST_LEAVE_MESSAGE: 965 case IGMP_HOST_LEAVE_MESSAGE:
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 38fbf04150ae..544ce0876f12 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -124,16 +124,12 @@ static int ipcomp4_init_state(struct xfrm_state *x)
124 if (x->props.mode == XFRM_MODE_TUNNEL) { 124 if (x->props.mode == XFRM_MODE_TUNNEL) {
125 err = ipcomp_tunnel_attach(x); 125 err = ipcomp_tunnel_attach(x);
126 if (err) 126 if (err)
127 goto error_tunnel; 127 goto out;
128 } 128 }
129 129
130 err = 0; 130 err = 0;
131out: 131out:
132 return err; 132 return err;
133
134error_tunnel:
135 ipcomp_destroy(x);
136 goto out;
137} 133}
138 134
139static const struct xfrm_type ipcomp_type = { 135static const struct xfrm_type ipcomp_type = {
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 06632762ba5f..90203e1b9187 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
925 if (t && !IS_ERR(t)) { 925 if (t && !IS_ERR(t)) {
926 struct arpt_getinfo info; 926 struct arpt_getinfo info;
927 const struct xt_table_info *private = t->private; 927 const struct xt_table_info *private = t->private;
928
929#ifdef CONFIG_COMPAT 928#ifdef CONFIG_COMPAT
929 struct xt_table_info tmp;
930
930 if (compat) { 931 if (compat) {
931 struct xt_table_info tmp;
932 ret = compat_table_info(private, &tmp); 932 ret = compat_table_info(private, &tmp);
933 xt_compat_flush_offsets(NFPROTO_ARP); 933 xt_compat_flush_offsets(NFPROTO_ARP);
934 private = &tmp; 934 private = &tmp;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 572330a552ef..3ce53cf13d5a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1132 if (t && !IS_ERR(t)) { 1132 if (t && !IS_ERR(t)) {
1133 struct ipt_getinfo info; 1133 struct ipt_getinfo info;
1134 const struct xt_table_info *private = t->private; 1134 const struct xt_table_info *private = t->private;
1135
1136#ifdef CONFIG_COMPAT 1135#ifdef CONFIG_COMPAT
1136 struct xt_table_info tmp;
1137
1137 if (compat) { 1138 if (compat) {
1138 struct xt_table_info tmp;
1139 ret = compat_table_info(private, &tmp); 1139 ret = compat_table_info(private, &tmp);
1140 xt_compat_flush_offsets(AF_INET); 1140 xt_compat_flush_offsets(AF_INET);
1141 private = &tmp; 1141 private = &tmp;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d171b123a656..d1ea38a7c490 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = {
210 }, 210 },
211 { 211 {
212 .procname = "ip_conntrack_buckets", 212 .procname = "ip_conntrack_buckets",
213 .data = &nf_conntrack_htable_size, 213 .data = &init_net.ct.htable_size,
214 .maxlen = sizeof(unsigned int), 214 .maxlen = sizeof(unsigned int),
215 .mode = 0444, 215 .mode = 0444,
216 .proc_handler = proc_dointvec, 216 .proc_handler = proc_dointvec,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 8668a3defda6..2fb7b76da94f 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
32 struct hlist_nulls_node *n; 32 struct hlist_nulls_node *n;
33 33
34 for (st->bucket = 0; 34 for (st->bucket = 0;
35 st->bucket < nf_conntrack_htable_size; 35 st->bucket < net->ct.htable_size;
36 st->bucket++) { 36 st->bucket++) {
37 n = rcu_dereference(net->ct.hash[st->bucket].first); 37 n = rcu_dereference(net->ct.hash[st->bucket].first);
38 if (!is_a_nulls(n)) 38 if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
50 head = rcu_dereference(head->next); 50 head = rcu_dereference(head->next);
51 while (is_a_nulls(head)) { 51 while (is_a_nulls(head)) {
52 if (likely(get_nulls_value(head) == st->bucket)) { 52 if (likely(get_nulls_value(head) == st->bucket)) {
53 if (++st->bucket >= nf_conntrack_htable_size) 53 if (++st->bucket >= net->ct.htable_size)
54 return NULL; 54 return NULL;
55 } 55 }
56 head = rcu_dereference(net->ct.hash[st->bucket].first); 56 head = rcu_dereference(net->ct.hash[st->bucket].first);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index fe1a64479dd0..26066a2327ad 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock);
35 35
36static struct nf_conntrack_l3proto *l3proto __read_mostly; 36static struct nf_conntrack_l3proto *l3proto __read_mostly;
37 37
38/* Calculated at init based on memory size */
39static unsigned int nf_nat_htable_size __read_mostly;
40
41#define MAX_IP_NAT_PROTO 256 38#define MAX_IP_NAT_PROTO 256
42static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 39static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
43 __read_mostly; 40 __read_mostly;
@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
72 69
73/* We keep an extra hash for each conntrack, for fast searching. */ 70/* We keep an extra hash for each conntrack, for fast searching. */
74static inline unsigned int 71static inline unsigned int
75hash_by_src(const struct nf_conntrack_tuple *tuple) 72hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
76{ 73{
77 unsigned int hash; 74 unsigned int hash;
78 75
@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple)
80 hash = jhash_3words((__force u32)tuple->src.u3.ip, 77 hash = jhash_3words((__force u32)tuple->src.u3.ip,
81 (__force u32)tuple->src.u.all, 78 (__force u32)tuple->src.u.all,
82 tuple->dst.protonum, 0); 79 tuple->dst.protonum, 0);
83 return ((u64)hash * nf_nat_htable_size) >> 32; 80 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
84} 81}
85 82
86/* Is this tuple already taken? (not by us) */ 83/* Is this tuple already taken? (not by us) */
@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net,
147 struct nf_conntrack_tuple *result, 144 struct nf_conntrack_tuple *result,
148 const struct nf_nat_range *range) 145 const struct nf_nat_range *range)
149{ 146{
150 unsigned int h = hash_by_src(tuple); 147 unsigned int h = hash_by_src(net, tuple);
151 const struct nf_conn_nat *nat; 148 const struct nf_conn_nat *nat;
152 const struct nf_conn *ct; 149 const struct nf_conn *ct;
153 const struct hlist_node *n; 150 const struct hlist_node *n;
@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct,
330 if (have_to_hash) { 327 if (have_to_hash) {
331 unsigned int srchash; 328 unsigned int srchash;
332 329
333 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 330 srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
334 spin_lock_bh(&nf_nat_lock); 331 spin_lock_bh(&nf_nat_lock);
335 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 332 /* nf_conntrack_alter_reply might re-allocate exntension aera */
336 nat = nfct_nat(ct); 333 nat = nfct_nat(ct);
@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
679 676
680static int __net_init nf_nat_net_init(struct net *net) 677static int __net_init nf_nat_net_init(struct net *net)
681{ 678{
682 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 679 /* Leave them the same for the moment. */
683 &net->ipv4.nat_vmalloced, 0); 680 net->ipv4.nat_htable_size = net->ct.htable_size;
681 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
682 &net->ipv4.nat_vmalloced, 0);
684 if (!net->ipv4.nat_bysource) 683 if (!net->ipv4.nat_bysource)
685 return -ENOMEM; 684 return -ENOMEM;
686 return 0; 685 return 0;
@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
703 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 702 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
704 synchronize_rcu(); 703 synchronize_rcu();
705 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, 704 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
706 nf_nat_htable_size); 705 net->ipv4.nat_htable_size);
707} 706}
708 707
709static struct pernet_operations nf_nat_net_ops = { 708static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +723,6 @@ static int __init nf_nat_init(void)
724 return ret; 723 return ret;
725 } 724 }
726 725
727 /* Leave them the same for the moment. */
728 nf_nat_htable_size = nf_conntrack_htable_size;
729
730 ret = register_pernet_subsys(&nf_nat_net_ops); 726 ret = register_pernet_subsys(&nf_nat_net_ops);
731 if (ret < 0) 727 if (ret < 0)
732 goto cleanup_extend; 728 goto cleanup_extend;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28e029632493..3fddc69ccccc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5783,11 +5783,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5783 5783
5784 /* tcp_ack considers this ACK as duplicate 5784 /* tcp_ack considers this ACK as duplicate
5785 * and does not calculate rtt. 5785 * and does not calculate rtt.
5786 * Fix it at least with timestamps. 5786 * Force it here.
5787 */ 5787 */
5788 if (tp->rx_opt.saw_tstamp && 5788 tcp_ack_update_rtt(sk, 0, 0);
5789 tp->rx_opt.rcv_tsecr && !tp->srtt)
5790 tcp_ack_saw_tstamp(sk, 0);
5791 5789
5792 if (tp->rx_opt.tstamp_ok) 5790 if (tp->rx_opt.tstamp_ok)
5793 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5791 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index de7a194a64ab..143791da062c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -502,8 +502,11 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
502 if (p == &net->ipv6.devconf_dflt->forwarding) 502 if (p == &net->ipv6.devconf_dflt->forwarding)
503 return 0; 503 return 0;
504 504
505 if (!rtnl_trylock()) 505 if (!rtnl_trylock()) {
506 /* Restore the original values before restarting */
507 *p = old;
506 return restart_syscall(); 508 return restart_syscall();
509 }
507 510
508 if (p == &net->ipv6.devconf_all->forwarding) { 511 if (p == &net->ipv6.devconf_all->forwarding) {
509 __s32 newf = net->ipv6.devconf_all->forwarding; 512 __s32 newf = net->ipv6.devconf_all->forwarding;
@@ -4028,12 +4031,15 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
4028{ 4031{
4029 int *valp = ctl->data; 4032 int *valp = ctl->data;
4030 int val = *valp; 4033 int val = *valp;
4034 loff_t pos = *ppos;
4031 int ret; 4035 int ret;
4032 4036
4033 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4037 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
4034 4038
4035 if (write) 4039 if (write)
4036 ret = addrconf_fixup_forwarding(ctl, valp, val); 4040 ret = addrconf_fixup_forwarding(ctl, valp, val);
4041 if (ret)
4042 *ppos = pos;
4037 return ret; 4043 return ret;
4038} 4044}
4039 4045
@@ -4075,8 +4081,11 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
4075 if (p == &net->ipv6.devconf_dflt->disable_ipv6) 4081 if (p == &net->ipv6.devconf_dflt->disable_ipv6)
4076 return 0; 4082 return 0;
4077 4083
4078 if (!rtnl_trylock()) 4084 if (!rtnl_trylock()) {
4085 /* Restore the original values before restarting */
4086 *p = old;
4079 return restart_syscall(); 4087 return restart_syscall();
4088 }
4080 4089
4081 if (p == &net->ipv6.devconf_all->disable_ipv6) { 4090 if (p == &net->ipv6.devconf_all->disable_ipv6) {
4082 __s32 newf = net->ipv6.devconf_all->disable_ipv6; 4091 __s32 newf = net->ipv6.devconf_all->disable_ipv6;
@@ -4095,12 +4104,15 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
4095{ 4104{
4096 int *valp = ctl->data; 4105 int *valp = ctl->data;
4097 int val = *valp; 4106 int val = *valp;
4107 loff_t pos = *ppos;
4098 int ret; 4108 int ret;
4099 4109
4100 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4110 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
4101 4111
4102 if (write) 4112 if (write)
4103 ret = addrconf_disable_ipv6(ctl, valp, val); 4113 ret = addrconf_disable_ipv6(ctl, valp, val);
4114 if (ret)
4115 *ppos = pos;
4104 return ret; 4116 return ret;
4105} 4117}
4106 4118
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 2f2a5ca2c878..002e6eef9120 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -154,16 +154,12 @@ static int ipcomp6_init_state(struct xfrm_state *x)
154 if (x->props.mode == XFRM_MODE_TUNNEL) { 154 if (x->props.mode == XFRM_MODE_TUNNEL) {
155 err = ipcomp6_tunnel_attach(x); 155 err = ipcomp6_tunnel_attach(x);
156 if (err) 156 if (err)
157 goto error_tunnel; 157 goto out;
158 } 158 }
159 159
160 err = 0; 160 err = 0;
161out: 161out:
162 return err; 162 return err;
163error_tunnel:
164 ipcomp_destroy(x);
165
166 goto out;
167} 163}
168 164
169static const struct xfrm_type ipcomp6_type = 165static const struct xfrm_type ipcomp6_type =
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 480d7f8c9802..8a7e0f52e177 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1164 if (t && !IS_ERR(t)) { 1164 if (t && !IS_ERR(t)) {
1165 struct ip6t_getinfo info; 1165 struct ip6t_getinfo info;
1166 const struct xt_table_info *private = t->private; 1166 const struct xt_table_info *private = t->private;
1167
1168#ifdef CONFIG_COMPAT 1167#ifdef CONFIG_COMPAT
1168 struct xt_table_info tmp;
1169
1169 if (compat) { 1170 if (compat) {
1170 struct xt_table_info tmp;
1171 ret = compat_table_info(private, &tmp); 1171 ret = compat_table_info(private, &tmp);
1172 xt_compat_flush_offsets(AF_INET6); 1172 xt_compat_flush_offsets(AF_INET6);
1173 private = &tmp; 1173 private = &tmp;
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 156020d138b5..6b3602de359a 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -698,15 +698,18 @@ dev_irnet_ioctl(
698 698
699 /* Query PPP channel and unit number */ 699 /* Query PPP channel and unit number */
700 case PPPIOCGCHAN: 700 case PPPIOCGCHAN:
701 lock_kernel();
701 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), 702 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
702 (int __user *)argp)) 703 (int __user *)argp))
703 err = 0; 704 err = 0;
705 unlock_kernel();
704 break; 706 break;
705 case PPPIOCGUNIT: 707 case PPPIOCGUNIT:
706 lock_kernel(); 708 lock_kernel();
707 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), 709 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
708 (int __user *)argp)) 710 (int __user *)argp))
709 err = 0; 711 err = 0;
712 unlock_kernel();
710 break; 713 break;
711 714
712 /* All these ioctls can be passed both directly and from ppp_generic, 715 /* All these ioctls can be passed both directly and from ppp_generic,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 76fa6fef6473..539f43bc97db 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = {
3794 3794
3795static void __exit ipsec_pfkey_exit(void) 3795static void __exit ipsec_pfkey_exit(void)
3796{ 3796{
3797 unregister_pernet_subsys(&pfkey_net_ops);
3798 xfrm_unregister_km(&pfkeyv2_mgr); 3797 xfrm_unregister_km(&pfkeyv2_mgr);
3799 sock_unregister(PF_KEY); 3798 sock_unregister(PF_KEY);
3799 unregister_pernet_subsys(&pfkey_net_ops);
3800 proto_unregister(&key_proto); 3800 proto_unregister(&key_proto);
3801} 3801}
3802 3802
@@ -3807,21 +3807,22 @@ static int __init ipsec_pfkey_init(void)
3807 if (err != 0) 3807 if (err != 0)
3808 goto out; 3808 goto out;
3809 3809
3810 err = sock_register(&pfkey_family_ops); 3810 err = register_pernet_subsys(&pfkey_net_ops);
3811 if (err != 0) 3811 if (err != 0)
3812 goto out_unregister_key_proto; 3812 goto out_unregister_key_proto;
3813 err = sock_register(&pfkey_family_ops);
3814 if (err != 0)
3815 goto out_unregister_pernet;
3813 err = xfrm_register_km(&pfkeyv2_mgr); 3816 err = xfrm_register_km(&pfkeyv2_mgr);
3814 if (err != 0) 3817 if (err != 0)
3815 goto out_sock_unregister; 3818 goto out_sock_unregister;
3816 err = register_pernet_subsys(&pfkey_net_ops);
3817 if (err != 0)
3818 goto out_xfrm_unregister_km;
3819out: 3819out:
3820 return err; 3820 return err;
3821out_xfrm_unregister_km: 3821
3822 xfrm_unregister_km(&pfkeyv2_mgr);
3823out_sock_unregister: 3822out_sock_unregister:
3824 sock_unregister(PF_KEY); 3823 sock_unregister(PF_KEY);
3824out_unregister_pernet:
3825 unregister_pernet_subsys(&pfkey_net_ops);
3825out_unregister_key_proto: 3826out_unregister_key_proto:
3826 proto_unregister(&key_proto); 3827 proto_unregister(&key_proto);
3827 goto out; 3828 goto out;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 1f2db647bb5c..22f0c2aa7a89 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -647,7 +647,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
647 } 647 }
648 if (pos[1] != 0 && 648 if (pos[1] != 0 &&
649 (pos[1] != ifibss->ssid_len || 649 (pos[1] != ifibss->ssid_len ||
650 !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { 650 memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
651 /* Ignore ProbeReq for foreign SSID */ 651 /* Ignore ProbeReq for foreign SSID */
652 return; 652 return;
653 } 653 }
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b9007f80cb92..12a2bff7dcdb 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -245,6 +245,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
245 info->control.rates[i].count = 1; 245 info->control.rates[i].count = 1;
246 } 246 }
247 247
248 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
249 return;
250
248 if (sta && sdata->force_unicast_rateidx > -1) { 251 if (sta && sdata->force_unicast_rateidx > -1) {
249 info->control.rates[0].idx = sdata->force_unicast_rateidx; 252 info->control.rates[0].idx = sdata->force_unicast_rateidx;
250 } else { 253 } else {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f934c9620b73..bc17cf7d68db 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -439,6 +439,16 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
439 if (local->scan_req) 439 if (local->scan_req)
440 return -EBUSY; 440 return -EBUSY;
441 441
442 if (req != local->int_scan_req &&
443 sdata->vif.type == NL80211_IFTYPE_STATION &&
444 !list_empty(&ifmgd->work_list)) {
445 /* actually wait for the work it's doing to finish/time out */
446 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
447 local->scan_req = req;
448 local->scan_sdata = sdata;
449 return 0;
450 }
451
442 if (local->ops->hw_scan) { 452 if (local->ops->hw_scan) {
443 u8 *ies; 453 u8 *ies;
444 454
@@ -463,14 +473,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
463 local->scan_req = req; 473 local->scan_req = req;
464 local->scan_sdata = sdata; 474 local->scan_sdata = sdata;
465 475
466 if (req != local->int_scan_req &&
467 sdata->vif.type == NL80211_IFTYPE_STATION &&
468 !list_empty(&ifmgd->work_list)) {
469 /* actually wait for the work it's doing to finish/time out */
470 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
471 return 0;
472 }
473
474 if (local->ops->hw_scan) 476 if (local->ops->hw_scan)
475 __set_bit(SCAN_HW_SCANNING, &local->scanning); 477 __set_bit(SCAN_HW_SCANNING, &local->scanning);
476 else 478 else
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0e98c3282d42..4d79e3c1616c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -30,6 +30,7 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/socket.h> 31#include <linux/socket.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/nsproxy.h>
33#include <linux/rculist_nulls.h> 34#include <linux/rculist_nulls.h>
34 35
35#include <net/netfilter/nf_conntrack.h> 36#include <net/netfilter/nf_conntrack.h>
@@ -63,8 +64,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
63struct nf_conn nf_conntrack_untracked __read_mostly; 64struct nf_conn nf_conntrack_untracked __read_mostly;
64EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 65EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
65 66
66static struct kmem_cache *nf_conntrack_cachep __read_mostly;
67
68static int nf_conntrack_hash_rnd_initted; 67static int nf_conntrack_hash_rnd_initted;
69static unsigned int nf_conntrack_hash_rnd; 68static unsigned int nf_conntrack_hash_rnd;
70 69
@@ -86,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
86 return ((u64)h * size) >> 32; 85 return ((u64)h * size) >> 32;
87} 86}
88 87
89static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 88static inline u_int32_t hash_conntrack(const struct net *net,
89 const struct nf_conntrack_tuple *tuple)
90{ 90{
91 return __hash_conntrack(tuple, nf_conntrack_htable_size, 91 return __hash_conntrack(tuple, net->ct.htable_size,
92 nf_conntrack_hash_rnd); 92 nf_conntrack_hash_rnd);
93} 93}
94 94
@@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
296{ 296{
297 struct nf_conntrack_tuple_hash *h; 297 struct nf_conntrack_tuple_hash *h;
298 struct hlist_nulls_node *n; 298 struct hlist_nulls_node *n;
299 unsigned int hash = hash_conntrack(tuple); 299 unsigned int hash = hash_conntrack(net, tuple);
300 300
301 /* Disable BHs the entire time since we normally need to disable them 301 /* Disable BHs the entire time since we normally need to disable them
302 * at least once for the stats anyway. 302 * at least once for the stats anyway.
@@ -366,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
366 366
367void nf_conntrack_hash_insert(struct nf_conn *ct) 367void nf_conntrack_hash_insert(struct nf_conn *ct)
368{ 368{
369 struct net *net = nf_ct_net(ct);
369 unsigned int hash, repl_hash; 370 unsigned int hash, repl_hash;
370 371
371 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 372 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
372 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 373 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
373 374
374 __nf_conntrack_hash_insert(ct, hash, repl_hash); 375 __nf_conntrack_hash_insert(ct, hash, repl_hash);
375} 376}
@@ -397,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
397 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 398 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
398 return NF_ACCEPT; 399 return NF_ACCEPT;
399 400
400 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 401 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
401 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 402 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
402 403
403 /* We're not in hash table, and we refuse to set up related 404 /* We're not in hash table, and we refuse to set up related
404 connections for unconfirmed conns. But packet copies and 405 connections for unconfirmed conns. But packet copies and
@@ -468,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
468 struct net *net = nf_ct_net(ignored_conntrack); 469 struct net *net = nf_ct_net(ignored_conntrack);
469 struct nf_conntrack_tuple_hash *h; 470 struct nf_conntrack_tuple_hash *h;
470 struct hlist_nulls_node *n; 471 struct hlist_nulls_node *n;
471 unsigned int hash = hash_conntrack(tuple); 472 unsigned int hash = hash_conntrack(net, tuple);
472 473
473 /* Disable BHs the entire time since we need to disable them at 474 /* Disable BHs the entire time since we need to disable them at
474 * least once for the stats anyway. 475 * least once for the stats anyway.
@@ -503,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
503 int dropped = 0; 504 int dropped = 0;
504 505
505 rcu_read_lock(); 506 rcu_read_lock();
506 for (i = 0; i < nf_conntrack_htable_size; i++) { 507 for (i = 0; i < net->ct.htable_size; i++) {
507 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 508 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
508 hnnode) { 509 hnnode) {
509 tmp = nf_ct_tuplehash_to_ctrack(h); 510 tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -523,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
523 if (cnt >= NF_CT_EVICTION_RANGE) 524 if (cnt >= NF_CT_EVICTION_RANGE)
524 break; 525 break;
525 526
526 hash = (hash + 1) % nf_conntrack_htable_size; 527 hash = (hash + 1) % net->ct.htable_size;
527 } 528 }
528 rcu_read_unlock(); 529 rcu_read_unlock();
529 530
@@ -557,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
557 558
558 if (nf_conntrack_max && 559 if (nf_conntrack_max &&
559 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 560 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
560 unsigned int hash = hash_conntrack(orig); 561 unsigned int hash = hash_conntrack(net, orig);
561 if (!early_drop(net, hash)) { 562 if (!early_drop(net, hash)) {
562 atomic_dec(&net->ct.count); 563 atomic_dec(&net->ct.count);
563 if (net_ratelimit()) 564 if (net_ratelimit())
@@ -572,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
572 * Do not use kmem_cache_zalloc(), as this cache uses 573 * Do not use kmem_cache_zalloc(), as this cache uses
573 * SLAB_DESTROY_BY_RCU. 574 * SLAB_DESTROY_BY_RCU.
574 */ 575 */
575 ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); 576 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
576 if (ct == NULL) { 577 if (ct == NULL) {
577 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 578 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
578 atomic_dec(&net->ct.count); 579 atomic_dec(&net->ct.count);
@@ -611,7 +612,7 @@ void nf_conntrack_free(struct nf_conn *ct)
611 nf_ct_ext_destroy(ct); 612 nf_ct_ext_destroy(ct);
612 atomic_dec(&net->ct.count); 613 atomic_dec(&net->ct.count);
613 nf_ct_ext_free(ct); 614 nf_ct_ext_free(ct);
614 kmem_cache_free(nf_conntrack_cachep, ct); 615 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
615} 616}
616EXPORT_SYMBOL_GPL(nf_conntrack_free); 617EXPORT_SYMBOL_GPL(nf_conntrack_free);
617 618
@@ -1014,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1014 struct hlist_nulls_node *n; 1015 struct hlist_nulls_node *n;
1015 1016
1016 spin_lock_bh(&nf_conntrack_lock); 1017 spin_lock_bh(&nf_conntrack_lock);
1017 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 1018 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1018 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1019 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1019 ct = nf_ct_tuplehash_to_ctrack(h); 1020 ct = nf_ct_tuplehash_to_ctrack(h);
1020 if (iter(ct, data)) 1021 if (iter(ct, data))
@@ -1113,9 +1114,12 @@ static void nf_ct_release_dying_list(struct net *net)
1113 1114
1114static void nf_conntrack_cleanup_init_net(void) 1115static void nf_conntrack_cleanup_init_net(void)
1115{ 1116{
1117 /* wait until all references to nf_conntrack_untracked are dropped */
1118 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1119 schedule();
1120
1116 nf_conntrack_helper_fini(); 1121 nf_conntrack_helper_fini();
1117 nf_conntrack_proto_fini(); 1122 nf_conntrack_proto_fini();
1118 kmem_cache_destroy(nf_conntrack_cachep);
1119} 1123}
1120 1124
1121static void nf_conntrack_cleanup_net(struct net *net) 1125static void nf_conntrack_cleanup_net(struct net *net)
@@ -1127,15 +1131,14 @@ static void nf_conntrack_cleanup_net(struct net *net)
1127 schedule(); 1131 schedule();
1128 goto i_see_dead_people; 1132 goto i_see_dead_people;
1129 } 1133 }
1130 /* wait until all references to nf_conntrack_untracked are dropped */
1131 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1132 schedule();
1133 1134
1134 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1135 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1135 nf_conntrack_htable_size); 1136 net->ct.htable_size);
1136 nf_conntrack_ecache_fini(net); 1137 nf_conntrack_ecache_fini(net);
1137 nf_conntrack_acct_fini(net); 1138 nf_conntrack_acct_fini(net);
1138 nf_conntrack_expect_fini(net); 1139 nf_conntrack_expect_fini(net);
1140 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1141 kfree(net->ct.slabname);
1139 free_percpu(net->ct.stat); 1142 free_percpu(net->ct.stat);
1140} 1143}
1141 1144
@@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1190{ 1193{
1191 int i, bucket, vmalloced, old_vmalloced; 1194 int i, bucket, vmalloced, old_vmalloced;
1192 unsigned int hashsize, old_size; 1195 unsigned int hashsize, old_size;
1193 int rnd;
1194 struct hlist_nulls_head *hash, *old_hash; 1196 struct hlist_nulls_head *hash, *old_hash;
1195 struct nf_conntrack_tuple_hash *h; 1197 struct nf_conntrack_tuple_hash *h;
1196 1198
1199 if (current->nsproxy->net_ns != &init_net)
1200 return -EOPNOTSUPP;
1201
1197 /* On boot, we can set this without any fancy locking. */ 1202 /* On boot, we can set this without any fancy locking. */
1198 if (!nf_conntrack_htable_size) 1203 if (!nf_conntrack_htable_size)
1199 return param_set_uint(val, kp); 1204 return param_set_uint(val, kp);
@@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1206 if (!hash) 1211 if (!hash)
1207 return -ENOMEM; 1212 return -ENOMEM;
1208 1213
1209 /* We have to rehahs for the new table anyway, so we also can
1210 * use a newrandom seed */
1211 get_random_bytes(&rnd, sizeof(rnd));
1212
1213 /* Lookups in the old hash might happen in parallel, which means we 1214 /* Lookups in the old hash might happen in parallel, which means we
1214 * might get false negatives during connection lookup. New connections 1215 * might get false negatives during connection lookup. New connections
1215 * created because of a false negative won't make it into the hash 1216 * created because of a false negative won't make it into the hash
1216 * though since that required taking the lock. 1217 * though since that required taking the lock.
1217 */ 1218 */
1218 spin_lock_bh(&nf_conntrack_lock); 1219 spin_lock_bh(&nf_conntrack_lock);
1219 for (i = 0; i < nf_conntrack_htable_size; i++) { 1220 for (i = 0; i < init_net.ct.htable_size; i++) {
1220 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1221 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1221 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1222 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1222 struct nf_conntrack_tuple_hash, hnnode); 1223 struct nf_conntrack_tuple_hash, hnnode);
1223 hlist_nulls_del_rcu(&h->hnnode); 1224 hlist_nulls_del_rcu(&h->hnnode);
1224 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1225 bucket = __hash_conntrack(&h->tuple, hashsize,
1226 nf_conntrack_hash_rnd);
1225 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1227 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1226 } 1228 }
1227 } 1229 }
1228 old_size = nf_conntrack_htable_size; 1230 old_size = init_net.ct.htable_size;
1229 old_vmalloced = init_net.ct.hash_vmalloc; 1231 old_vmalloced = init_net.ct.hash_vmalloc;
1230 old_hash = init_net.ct.hash; 1232 old_hash = init_net.ct.hash;
1231 1233
1232 nf_conntrack_htable_size = hashsize; 1234 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1233 init_net.ct.hash_vmalloc = vmalloced; 1235 init_net.ct.hash_vmalloc = vmalloced;
1234 init_net.ct.hash = hash; 1236 init_net.ct.hash = hash;
1235 nf_conntrack_hash_rnd = rnd;
1236 spin_unlock_bh(&nf_conntrack_lock); 1237 spin_unlock_bh(&nf_conntrack_lock);
1237 1238
1238 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1239 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1271,15 +1272,6 @@ static int nf_conntrack_init_init_net(void)
1271 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1272 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1272 nf_conntrack_max); 1273 nf_conntrack_max);
1273 1274
1274 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1275 sizeof(struct nf_conn),
1276 0, SLAB_DESTROY_BY_RCU, NULL);
1277 if (!nf_conntrack_cachep) {
1278 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1279 ret = -ENOMEM;
1280 goto err_cache;
1281 }
1282
1283 ret = nf_conntrack_proto_init(); 1275 ret = nf_conntrack_proto_init();
1284 if (ret < 0) 1276 if (ret < 0)
1285 goto err_proto; 1277 goto err_proto;
@@ -1288,13 +1280,19 @@ static int nf_conntrack_init_init_net(void)
1288 if (ret < 0) 1280 if (ret < 0)
1289 goto err_helper; 1281 goto err_helper;
1290 1282
1283 /* Set up fake conntrack: to never be deleted, not in any hashes */
1284#ifdef CONFIG_NET_NS
1285 nf_conntrack_untracked.ct_net = &init_net;
1286#endif
1287 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1288 /* - and look it like as a confirmed connection */
1289 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1290
1291 return 0; 1291 return 0;
1292 1292
1293err_helper: 1293err_helper:
1294 nf_conntrack_proto_fini(); 1294 nf_conntrack_proto_fini();
1295err_proto: 1295err_proto:
1296 kmem_cache_destroy(nf_conntrack_cachep);
1297err_cache:
1298 return ret; 1296 return ret;
1299} 1297}
1300 1298
@@ -1316,7 +1314,24 @@ static int nf_conntrack_init_net(struct net *net)
1316 ret = -ENOMEM; 1314 ret = -ENOMEM;
1317 goto err_stat; 1315 goto err_stat;
1318 } 1316 }
1319 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1317
1318 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1319 if (!net->ct.slabname) {
1320 ret = -ENOMEM;
1321 goto err_slabname;
1322 }
1323
1324 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1325 sizeof(struct nf_conn), 0,
1326 SLAB_DESTROY_BY_RCU, NULL);
1327 if (!net->ct.nf_conntrack_cachep) {
1328 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1329 ret = -ENOMEM;
1330 goto err_cache;
1331 }
1332
1333 net->ct.htable_size = nf_conntrack_htable_size;
1334 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
1320 &net->ct.hash_vmalloc, 1); 1335 &net->ct.hash_vmalloc, 1);
1321 if (!net->ct.hash) { 1336 if (!net->ct.hash) {
1322 ret = -ENOMEM; 1337 ret = -ENOMEM;
@@ -1333,15 +1348,6 @@ static int nf_conntrack_init_net(struct net *net)
1333 if (ret < 0) 1348 if (ret < 0)
1334 goto err_ecache; 1349 goto err_ecache;
1335 1350
1336 /* Set up fake conntrack:
1337 - to never be deleted, not in any hashes */
1338#ifdef CONFIG_NET_NS
1339 nf_conntrack_untracked.ct_net = &init_net;
1340#endif
1341 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1342 /* - and look it like as a confirmed connection */
1343 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1344
1345 return 0; 1351 return 0;
1346 1352
1347err_ecache: 1353err_ecache:
@@ -1350,8 +1356,12 @@ err_acct:
1350 nf_conntrack_expect_fini(net); 1356 nf_conntrack_expect_fini(net);
1351err_expect: 1357err_expect:
1352 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1358 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1353 nf_conntrack_htable_size); 1359 net->ct.htable_size);
1354err_hash: 1360err_hash:
1361 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1362err_cache:
1363 kfree(net->ct.slabname);
1364err_slabname:
1355 free_percpu(net->ct.stat); 1365 free_percpu(net->ct.stat);
1356err_stat: 1366err_stat:
1357 return ret; 1367 return ret;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index fdf5d2a1d9b4..2f25ff610982 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net)
569#endif /* CONFIG_PROC_FS */ 569#endif /* CONFIG_PROC_FS */
570} 570}
571 571
572module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); 572module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
573 573
574int nf_conntrack_expect_init(struct net *net) 574int nf_conntrack_expect_init(struct net *net)
575{ 575{
@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net)
577 577
578 if (net_eq(net, &init_net)) { 578 if (net_eq(net, &init_net)) {
579 if (!nf_ct_expect_hsize) { 579 if (!nf_ct_expect_hsize) {
580 nf_ct_expect_hsize = nf_conntrack_htable_size / 256; 580 nf_ct_expect_hsize = net->ct.htable_size / 256;
581 if (!nf_ct_expect_hsize) 581 if (!nf_ct_expect_hsize)
582 nf_ct_expect_hsize = 1; 582 nf_ct_expect_hsize = 1;
583 } 583 }
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 65c2a7bc3afc..4b1a56bd074c 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
192 /* Get rid of expecteds, set helpers to NULL. */ 192 /* Get rid of expecteds, set helpers to NULL. */
193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
194 unhelp(h, me); 194 unhelp(h, me);
195 for (i = 0; i < nf_conntrack_htable_size; i++) { 195 for (i = 0; i < net->ct.htable_size; i++) {
196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
197 unhelp(h, me); 197 unhelp(h, me);
198 } 198 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 42f21c01a93e..0ffe689dfe97 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
594 594
595 rcu_read_lock(); 595 rcu_read_lock();
596 last = (struct nf_conn *)cb->args[1]; 596 last = (struct nf_conn *)cb->args[1];
597 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 597 for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
598restart: 598restart:
599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
600 hnnode) { 600 hnnode) {
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 028aba667ef7..e310f1561bb2 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
51 struct hlist_nulls_node *n; 51 struct hlist_nulls_node *n;
52 52
53 for (st->bucket = 0; 53 for (st->bucket = 0;
54 st->bucket < nf_conntrack_htable_size; 54 st->bucket < net->ct.htable_size;
55 st->bucket++) { 55 st->bucket++) {
56 n = rcu_dereference(net->ct.hash[st->bucket].first); 56 n = rcu_dereference(net->ct.hash[st->bucket].first);
57 if (!is_a_nulls(n)) 57 if (!is_a_nulls(n))
@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
69 head = rcu_dereference(head->next); 69 head = rcu_dereference(head->next);
70 while (is_a_nulls(head)) { 70 while (is_a_nulls(head)) {
71 if (likely(get_nulls_value(head) == st->bucket)) { 71 if (likely(get_nulls_value(head) == st->bucket)) {
72 if (++st->bucket >= nf_conntrack_htable_size) 72 if (++st->bucket >= net->ct.htable_size)
73 return NULL; 73 return NULL;
74 } 74 }
75 head = rcu_dereference(net->ct.hash[st->bucket].first); 75 head = rcu_dereference(net->ct.hash[st->bucket].first);
@@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = {
355 }, 355 },
356 { 356 {
357 .procname = "nf_conntrack_buckets", 357 .procname = "nf_conntrack_buckets",
358 .data = &nf_conntrack_htable_size, 358 .data = &init_net.ct.htable_size,
359 .maxlen = sizeof(unsigned int), 359 .maxlen = sizeof(unsigned int),
360 .mode = 0444, 360 .mode = 0444,
361 .proc_handler = proc_dointvec, 361 .proc_handler = proc_dointvec,
@@ -421,6 +421,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
421 goto out_kmemdup; 421 goto out_kmemdup;
422 422
423 table[1].data = &net->ct.count; 423 table[1].data = &net->ct.count;
424 table[2].data = &net->ct.htable_size;
424 table[3].data = &net->ct.sysctl_checksum; 425 table[3].data = &net->ct.sysctl_checksum;
425 table[4].data = &net->ct.sysctl_log_invalid; 426 table[4].data = &net->ct.sysctl_log_invalid;
426 427
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a4957bf2ca60..4c5972ba8c78 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
455 if (nl_table[protocol].registered && 455 if (nl_table[protocol].registered &&
456 try_module_get(nl_table[protocol].module)) 456 try_module_get(nl_table[protocol].module))
457 module = nl_table[protocol].module; 457 module = nl_table[protocol].module;
458 else
459 err = -EPROTONOSUPPORT;
458 cb_mutex = nl_table[protocol].cb_mutex; 460 cb_mutex = nl_table[protocol].cb_mutex;
459 netlink_unlock_table(); 461 netlink_unlock_table();
460 462
463 if (err < 0)
464 goto out;
465
461 err = __netlink_create(net, sock, cb_mutex, protocol); 466 err = __netlink_create(net, sock, cb_mutex, protocol);
462 if (err < 0) 467 if (err < 0)
463 goto out_module; 468 goto out_module;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 929218a47620..21f9c7678aa3 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -433,7 +433,7 @@ config NET_ACT_POLICE
433 module. 433 module.
434 434
435 To compile this code as a module, choose M here: the 435 To compile this code as a module, choose M here: the
436 module will be called police. 436 module will be called act_police.
437 437
438config NET_ACT_GACT 438config NET_ACT_GACT
439 tristate "Generic actions" 439 tristate "Generic actions"
@@ -443,7 +443,7 @@ config NET_ACT_GACT
443 accepting packets. 443 accepting packets.
444 444
445 To compile this code as a module, choose M here: the 445 To compile this code as a module, choose M here: the
446 module will be called gact. 446 module will be called act_gact.
447 447
448config GACT_PROB 448config GACT_PROB
449 bool "Probability support" 449 bool "Probability support"
@@ -459,7 +459,7 @@ config NET_ACT_MIRRED
459 other devices. 459 other devices.
460 460
461 To compile this code as a module, choose M here: the 461 To compile this code as a module, choose M here: the
462 module will be called mirred. 462 module will be called act_mirred.
463 463
464config NET_ACT_IPT 464config NET_ACT_IPT
465 tristate "IPtables targets" 465 tristate "IPtables targets"
@@ -469,7 +469,7 @@ config NET_ACT_IPT
469 classification. 469 classification.
470 470
471 To compile this code as a module, choose M here: the 471 To compile this code as a module, choose M here: the
472 module will be called ipt. 472 module will be called act_ipt.
473 473
474config NET_ACT_NAT 474config NET_ACT_NAT
475 tristate "Stateless NAT" 475 tristate "Stateless NAT"
@@ -479,7 +479,7 @@ config NET_ACT_NAT
479 netfilter for NAT unless you know what you are doing. 479 netfilter for NAT unless you know what you are doing.
480 480
481 To compile this code as a module, choose M here: the 481 To compile this code as a module, choose M here: the
482 module will be called nat. 482 module will be called act_nat.
483 483
484config NET_ACT_PEDIT 484config NET_ACT_PEDIT
485 tristate "Packet Editing" 485 tristate "Packet Editing"
@@ -488,7 +488,7 @@ config NET_ACT_PEDIT
488 Say Y here if you want to mangle the content of packets. 488 Say Y here if you want to mangle the content of packets.
489 489
490 To compile this code as a module, choose M here: the 490 To compile this code as a module, choose M here: the
491 module will be called pedit. 491 module will be called act_pedit.
492 492
493config NET_ACT_SIMP 493config NET_ACT_SIMP
494 tristate "Simple Example (Debug)" 494 tristate "Simple Example (Debug)"
@@ -502,7 +502,7 @@ config NET_ACT_SIMP
502 If unsure, say N. 502 If unsure, say N.
503 503
504 To compile this code as a module, choose M here: the 504 To compile this code as a module, choose M here: the
505 module will be called simple. 505 module will be called act_simple.
506 506
507config NET_ACT_SKBEDIT 507config NET_ACT_SKBEDIT
508 tristate "SKB Editing" 508 tristate "SKB Editing"
@@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT
513 If unsure, say N. 513 If unsure, say N.
514 514
515 To compile this code as a module, choose M here: the 515 To compile this code as a module, choose M here: the
516 module will be called skbedit. 516 module will be called act_skbedit.
517 517
518config NET_CLS_IND 518config NET_CLS_IND
519 bool "Incoming device classification" 519 bool "Incoming device classification"
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b36cc344474b..f445ea1c5f52 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1102,7 +1102,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1102 int err = -ENOMEM; 1102 int err = -ENOMEM;
1103 struct xfrm_state *x = xfrm_state_alloc(net); 1103 struct xfrm_state *x = xfrm_state_alloc(net);
1104 if (!x) 1104 if (!x)
1105 goto error; 1105 goto out;
1106 1106
1107 memcpy(&x->id, &orig->id, sizeof(x->id)); 1107 memcpy(&x->id, &orig->id, sizeof(x->id));
1108 memcpy(&x->sel, &orig->sel, sizeof(x->sel)); 1108 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
@@ -1160,16 +1160,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1160 return x; 1160 return x;
1161 1161
1162 error: 1162 error:
1163 xfrm_state_put(x);
1164out:
1163 if (errp) 1165 if (errp)
1164 *errp = err; 1166 *errp = err;
1165 if (x) {
1166 kfree(x->aalg);
1167 kfree(x->ealg);
1168 kfree(x->calg);
1169 kfree(x->encap);
1170 kfree(x->coaddr);
1171 }
1172 kfree(x);
1173 return NULL; 1167 return NULL;
1174} 1168}
1175 1169
diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c
index c69cbe9b2426..bd0f337afcab 100644
--- a/samples/hw_breakpoint/data_breakpoint.c
+++ b/samples/hw_breakpoint/data_breakpoint.c
@@ -34,7 +34,7 @@
34#include <linux/perf_event.h> 34#include <linux/perf_event.h>
35#include <linux/hw_breakpoint.h> 35#include <linux/hw_breakpoint.h>
36 36
37struct perf_event **sample_hbp; 37struct perf_event * __percpu *sample_hbp;
38 38
39static char ksym_name[KSYM_NAME_LEN] = "pid_max"; 39static char ksym_name[KSYM_NAME_LEN] = "pid_max";
40module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO); 40module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
@@ -61,8 +61,8 @@ static int __init hw_break_module_init(void)
61 attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; 61 attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
62 62
63 sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler); 63 sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler);
64 if (IS_ERR(sample_hbp)) { 64 if (IS_ERR((void __force *)sample_hbp)) {
65 ret = PTR_ERR(sample_hbp); 65 ret = PTR_ERR((void __force *)sample_hbp);
66 goto fail; 66 goto fail;
67 } 67 }
68 68
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index c41afe6639a0..47fb65d1fcbd 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -65,7 +65,6 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
65 const char *cause, int result, int info); 65 const char *cause, int result, int info);
66 66
67/* Internal IMA function definitions */ 67/* Internal IMA function definitions */
68void ima_iintcache_init(void);
69int ima_init(void); 68int ima_init(void);
70void ima_cleanup(void); 69void ima_cleanup(void);
71int ima_fs_init(void); 70int ima_fs_init(void);
@@ -131,7 +130,7 @@ void iint_free(struct kref *kref);
131void iint_rcu_free(struct rcu_head *rcu); 130void iint_rcu_free(struct rcu_head *rcu);
132 131
133/* IMA policy related functions */ 132/* IMA policy related functions */
134enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK }; 133enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
135 134
136int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask); 135int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
137void ima_init_policy(void); 136void ima_init_policy(void);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 3cd58b60afd2..2a5e0bcf3887 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -95,12 +95,12 @@ err_out:
95 * ima_must_measure - measure decision based on policy. 95 * ima_must_measure - measure decision based on policy.
96 * @inode: pointer to inode to measure 96 * @inode: pointer to inode to measure
97 * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) 97 * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
98 * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP) 98 * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP)
99 * 99 *
100 * The policy is defined in terms of keypairs: 100 * The policy is defined in terms of keypairs:
101 * subj=, obj=, type=, func=, mask=, fsmagic= 101 * subj=, obj=, type=, func=, mask=, fsmagic=
102 * subj,obj, and type: are LSM specific. 102 * subj,obj, and type: are LSM specific.
103 * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP 103 * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP
104 * mask: contains the permission mask 104 * mask: contains the permission mask
105 * fsmagic: hex value 105 * fsmagic: hex value
106 * 106 *
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index fa592ff1ac1c..0d83edcfc402 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -52,9 +52,6 @@ int ima_inode_alloc(struct inode *inode)
52 struct ima_iint_cache *iint = NULL; 52 struct ima_iint_cache *iint = NULL;
53 int rc = 0; 53 int rc = 0;
54 54
55 if (!ima_initialized)
56 return 0;
57
58 iint = kmem_cache_alloc(iint_cache, GFP_NOFS); 55 iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
59 if (!iint) 56 if (!iint)
60 return -ENOMEM; 57 return -ENOMEM;
@@ -118,8 +115,6 @@ void ima_inode_free(struct inode *inode)
118{ 115{
119 struct ima_iint_cache *iint; 116 struct ima_iint_cache *iint;
120 117
121 if (!ima_initialized)
122 return;
123 spin_lock(&ima_iint_lock); 118 spin_lock(&ima_iint_lock);
124 iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode); 119 iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
125 spin_unlock(&ima_iint_lock); 120 spin_unlock(&ima_iint_lock);
@@ -141,9 +136,11 @@ static void init_once(void *foo)
141 kref_set(&iint->refcount, 1); 136 kref_set(&iint->refcount, 1);
142} 137}
143 138
144void __init ima_iintcache_init(void) 139static int __init ima_iintcache_init(void)
145{ 140{
146 iint_cache = 141 iint_cache =
147 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, 142 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
148 SLAB_PANIC, init_once); 143 SLAB_PANIC, init_once);
144 return 0;
149} 145}
146security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index a89f44d5e030..294b005d6520 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -14,7 +14,7 @@
14 * 14 *
15 * File: ima_main.c 15 * File: ima_main.c
16 * implements the IMA hooks: ima_bprm_check, ima_file_mmap, 16 * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
17 * and ima_path_check. 17 * and ima_file_check.
18 */ 18 */
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/file.h> 20#include <linux/file.h>
@@ -84,6 +84,36 @@ out:
84 return found; 84 return found;
85} 85}
86 86
87/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
88 *
89 * When opening a file for read, if the file is already open for write,
90 * the file could change, resulting in a file measurement error.
91 *
92 * Opening a file for write, if the file is already open for read, results
93 * in a time of measure, time of use (ToMToU) error.
94 *
95 * In either case invalidate the PCR.
96 */
97enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
98static void ima_read_write_check(enum iint_pcr_error error,
99 struct ima_iint_cache *iint,
100 struct inode *inode,
101 const unsigned char *filename)
102{
103 switch (error) {
104 case TOMTOU:
105 if (iint->readcount > 0)
106 ima_add_violation(inode, filename, "invalid_pcr",
107 "ToMToU");
108 break;
109 case OPEN_WRITERS:
110 if (iint->writecount > 0)
111 ima_add_violation(inode, filename, "invalid_pcr",
112 "open_writers");
113 break;
114 }
115}
116
87/* 117/*
88 * Update the counts given an fmode_t 118 * Update the counts given an fmode_t
89 */ 119 */
@@ -99,6 +129,47 @@ static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode)
99} 129}
100 130
101/* 131/*
132 * ima_counts_get - increment file counts
133 *
134 * Maintain read/write counters for all files, but only
135 * invalidate the PCR for measured files:
136 * - Opening a file for write when already open for read,
137 * results in a time of measure, time of use (ToMToU) error.
138 * - Opening a file for read when already open for write,
139 * could result in a file measurement error.
140 *
141 */
142void ima_counts_get(struct file *file)
143{
144 struct dentry *dentry = file->f_path.dentry;
145 struct inode *inode = dentry->d_inode;
146 fmode_t mode = file->f_mode;
147 struct ima_iint_cache *iint;
148 int rc;
149
150 if (!ima_initialized || !S_ISREG(inode->i_mode))
151 return;
152 iint = ima_iint_find_get(inode);
153 if (!iint)
154 return;
155 mutex_lock(&iint->mutex);
156 rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
157 if (rc < 0)
158 goto out;
159
160 if (mode & FMODE_WRITE) {
161 ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name);
162 goto out;
163 }
164 ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name);
165out:
166 ima_inc_counts(iint, file->f_mode);
167 mutex_unlock(&iint->mutex);
168
169 kref_put(&iint->refcount, iint_free);
170}
171
172/*
102 * Decrement ima counts 173 * Decrement ima counts
103 */ 174 */
104static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode, 175static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode,
@@ -153,123 +224,6 @@ void ima_file_free(struct file *file)
153 kref_put(&iint->refcount, iint_free); 224 kref_put(&iint->refcount, iint_free);
154} 225}
155 226
156/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
157 *
158 * When opening a file for read, if the file is already open for write,
159 * the file could change, resulting in a file measurement error.
160 *
161 * Opening a file for write, if the file is already open for read, results
162 * in a time of measure, time of use (ToMToU) error.
163 *
164 * In either case invalidate the PCR.
165 */
166enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
167static void ima_read_write_check(enum iint_pcr_error error,
168 struct ima_iint_cache *iint,
169 struct inode *inode,
170 const unsigned char *filename)
171{
172 switch (error) {
173 case TOMTOU:
174 if (iint->readcount > 0)
175 ima_add_violation(inode, filename, "invalid_pcr",
176 "ToMToU");
177 break;
178 case OPEN_WRITERS:
179 if (iint->writecount > 0)
180 ima_add_violation(inode, filename, "invalid_pcr",
181 "open_writers");
182 break;
183 }
184}
185
186static int get_path_measurement(struct ima_iint_cache *iint, struct file *file,
187 const unsigned char *filename)
188{
189 int rc = 0;
190
191 ima_inc_counts(iint, file->f_mode);
192
193 rc = ima_collect_measurement(iint, file);
194 if (!rc)
195 ima_store_measurement(iint, file, filename);
196 return rc;
197}
198
199/**
200 * ima_path_check - based on policy, collect/store measurement.
201 * @path: contains a pointer to the path to be measured
202 * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
203 *
204 * Measure the file being open for readonly, based on the
205 * ima_must_measure() policy decision.
206 *
207 * Keep read/write counters for all files, but only
208 * invalidate the PCR for measured files:
209 * - Opening a file for write when already open for read,
210 * results in a time of measure, time of use (ToMToU) error.
211 * - Opening a file for read when already open for write,
212 * could result in a file measurement error.
213 *
214 * Always return 0 and audit dentry_open failures.
215 * (Return code will be based upon measurement appraisal.)
216 */
217int ima_path_check(struct path *path, int mask)
218{
219 struct inode *inode = path->dentry->d_inode;
220 struct ima_iint_cache *iint;
221 struct file *file = NULL;
222 int rc;
223
224 if (!ima_initialized || !S_ISREG(inode->i_mode))
225 return 0;
226 iint = ima_iint_find_get(inode);
227 if (!iint)
228 return 0;
229
230 mutex_lock(&iint->mutex);
231
232 rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK);
233 if (rc < 0)
234 goto out;
235
236 if ((mask & MAY_WRITE) || (mask == 0))
237 ima_read_write_check(TOMTOU, iint, inode,
238 path->dentry->d_name.name);
239
240 if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ)
241 goto out;
242
243 ima_read_write_check(OPEN_WRITERS, iint, inode,
244 path->dentry->d_name.name);
245 if (!(iint->flags & IMA_MEASURED)) {
246 struct dentry *dentry = dget(path->dentry);
247 struct vfsmount *mnt = mntget(path->mnt);
248
249 file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE,
250 current_cred());
251 if (IS_ERR(file)) {
252 int audit_info = 0;
253
254 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
255 dentry->d_name.name,
256 "add_measurement",
257 "dentry_open failed",
258 1, audit_info);
259 file = NULL;
260 goto out;
261 }
262 rc = get_path_measurement(iint, file, dentry->d_name.name);
263 }
264out:
265 mutex_unlock(&iint->mutex);
266 if (file)
267 fput(file);
268 kref_put(&iint->refcount, iint_free);
269 return 0;
270}
271EXPORT_SYMBOL_GPL(ima_path_check);
272
273static int process_measurement(struct file *file, const unsigned char *filename, 227static int process_measurement(struct file *file, const unsigned char *filename,
274 int mask, int function) 228 int mask, int function)
275{ 229{
@@ -297,33 +251,6 @@ out:
297 return rc; 251 return rc;
298} 252}
299 253
300/*
301 * ima_counts_get - increment file counts
302 *
303 * - for IPC shm and shmat file.
304 * - for nfsd exported files.
305 *
306 * Increment the counts for these files to prevent unnecessary
307 * imbalance messages.
308 */
309void ima_counts_get(struct file *file)
310{
311 struct inode *inode = file->f_dentry->d_inode;
312 struct ima_iint_cache *iint;
313
314 if (!ima_initialized || !S_ISREG(inode->i_mode))
315 return;
316 iint = ima_iint_find_get(inode);
317 if (!iint)
318 return;
319 mutex_lock(&iint->mutex);
320 ima_inc_counts(iint, file->f_mode);
321 mutex_unlock(&iint->mutex);
322
323 kref_put(&iint->refcount, iint_free);
324}
325EXPORT_SYMBOL_GPL(ima_counts_get);
326
327/** 254/**
328 * ima_file_mmap - based on policy, collect/store measurement. 255 * ima_file_mmap - based on policy, collect/store measurement.
329 * @file: pointer to the file to be measured (May be NULL) 256 * @file: pointer to the file to be measured (May be NULL)
@@ -369,11 +296,31 @@ int ima_bprm_check(struct linux_binprm *bprm)
369 return 0; 296 return 0;
370} 297}
371 298
299/**
300 * ima_path_check - based on policy, collect/store measurement.
301 * @file: pointer to the file to be measured
302 * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
303 *
304 * Measure files based on the ima_must_measure() policy decision.
305 *
306 * Always return 0 and audit dentry_open failures.
307 * (Return code will be based upon measurement appraisal.)
308 */
309int ima_file_check(struct file *file, int mask)
310{
311 int rc;
312
313 rc = process_measurement(file, file->f_dentry->d_name.name,
314 mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
315 FILE_CHECK);
316 return 0;
317}
318EXPORT_SYMBOL_GPL(ima_file_check);
319
372static int __init init_ima(void) 320static int __init init_ima(void)
373{ 321{
374 int error; 322 int error;
375 323
376 ima_iintcache_init();
377 error = ima_init(); 324 error = ima_init();
378 ima_initialized = 1; 325 ima_initialized = 1;
379 return error; 326 return error;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index e1278399b345..4759d0f99335 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -67,7 +67,7 @@ static struct ima_measure_rule_entry default_rules[] = {
67 .flags = IMA_FUNC | IMA_MASK}, 67 .flags = IMA_FUNC | IMA_MASK},
68 {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, 68 {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
69 .flags = IMA_FUNC | IMA_MASK}, 69 .flags = IMA_FUNC | IMA_MASK},
70 {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0, 70 {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0,
71 .flags = IMA_FUNC | IMA_MASK | IMA_UID}, 71 .flags = IMA_FUNC | IMA_MASK | IMA_UID},
72}; 72};
73 73
@@ -282,8 +282,11 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
282 break; 282 break;
283 case Opt_func: 283 case Opt_func:
284 audit_log_format(ab, "func=%s ", args[0].from); 284 audit_log_format(ab, "func=%s ", args[0].from);
285 if (strcmp(args[0].from, "PATH_CHECK") == 0) 285 if (strcmp(args[0].from, "FILE_CHECK") == 0)
286 entry->func = PATH_CHECK; 286 entry->func = FILE_CHECK;
287 /* PATH_CHECK is for backwards compat */
288 else if (strcmp(args[0].from, "PATH_CHECK") == 0)
289 entry->func = FILE_CHECK;
287 else if (strcmp(args[0].from, "FILE_MMAP") == 0) 290 else if (strcmp(args[0].from, "FILE_MMAP") == 0)
288 entry->func = FILE_MMAP; 291 entry->func = FILE_MMAP;
289 else if (strcmp(args[0].from, "BPRM_CHECK") == 0) 292 else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
diff --git a/security/security.c b/security/security.c
index 24e060be9fa5..122b748d0f4c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -666,8 +666,6 @@ int security_file_alloc(struct file *file)
666void security_file_free(struct file *file) 666void security_file_free(struct file *file)
667{ 667{
668 security_ops->file_free_security(file); 668 security_ops->file_free_security(file);
669 if (file->f_dentry)
670 ima_file_free(file);
671} 669}
672 670
673int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 671int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index cb65bd0dd35b..459c1f62783b 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -166,18 +166,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
166 166
167static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index) 167static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
168{ 168{
169 struct ct_vm *vm; 169 return atc->vm->get_ptp_phys(atc->vm, index);
170 void *kvirt_addr;
171 unsigned long phys_addr;
172
173 vm = atc->vm;
174 kvirt_addr = vm->get_ptp_virt(vm, index);
175 if (kvirt_addr == NULL)
176 phys_addr = (~0UL);
177 else
178 phys_addr = virt_to_phys(kvirt_addr);
179
180 return phys_addr;
181} 170}
182 171
183static unsigned int convert_format(snd_pcm_format_t snd_format) 172static unsigned int convert_format(snd_pcm_format_t snd_format)
@@ -1669,7 +1658,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
1669 } 1658 }
1670 1659
1671 /* Set up device virtual memory management object */ 1660 /* Set up device virtual memory management object */
1672 err = ct_vm_create(&atc->vm); 1661 err = ct_vm_create(&atc->vm, pci);
1673 if (err < 0) 1662 if (err < 0)
1674 goto error1; 1663 goto error1;
1675 1664
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 6b78752e9503..65da6e466f80 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
138 return NULL; 138 return NULL;
139 } 139 }
140 140
141 ptp = vm->ptp[0]; 141 ptp = (unsigned long *)vm->ptp[0].area;
142 pte_start = (block->addr >> CT_PAGE_SHIFT); 142 pte_start = (block->addr >> CT_PAGE_SHIFT);
143 pages = block->size >> CT_PAGE_SHIFT; 143 pages = block->size >> CT_PAGE_SHIFT;
144 for (i = 0; i < pages; i++) { 144 for (i = 0; i < pages; i++) {
@@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
158} 158}
159 159
160/* * 160/* *
161 * return the host (kmalloced) addr of the @index-th device 161 * return the host physical addr of the @index-th device
162 * page talbe page on success, or NULL on failure. 162 * page table page on success, or ~0UL on failure.
163 * The first returned NULL indicates the termination. 163 * The first returned ~0UL indicates the termination.
164 * */ 164 * */
165static void * 165static dma_addr_t
166ct_get_ptp_virt(struct ct_vm *vm, int index) 166ct_get_ptp_phys(struct ct_vm *vm, int index)
167{ 167{
168 void *addr; 168 dma_addr_t addr;
169 169
170 addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index]; 170 addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
171 171
172 return addr; 172 return addr;
173} 173}
174 174
175int ct_vm_create(struct ct_vm **rvm) 175int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
176{ 176{
177 struct ct_vm *vm; 177 struct ct_vm *vm;
178 struct ct_vm_block *block; 178 struct ct_vm_block *block;
179 int i; 179 int i, err = 0;
180 180
181 *rvm = NULL; 181 *rvm = NULL;
182 182
@@ -188,23 +188,21 @@ int ct_vm_create(struct ct_vm **rvm)
188 188
189 /* Allocate page table pages */ 189 /* Allocate page table pages */
190 for (i = 0; i < CT_PTP_NUM; i++) { 190 for (i = 0; i < CT_PTP_NUM; i++) {
191 vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); 191 err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
192 if (!vm->ptp[i]) 192 snd_dma_pci_data(pci),
193 PAGE_SIZE, &vm->ptp[i]);
194 if (err < 0)
193 break; 195 break;
194 } 196 }
195 if (!i) { 197 if (err < 0) {
196 /* no page table pages are allocated */ 198 /* no page table pages are allocated */
197 kfree(vm); 199 ct_vm_destroy(vm);
198 return -ENOMEM; 200 return -ENOMEM;
199 } 201 }
200 vm->size = CT_ADDRS_PER_PAGE * i; 202 vm->size = CT_ADDRS_PER_PAGE * i;
201 /* Initialise remaining ptps */
202 for (; i < CT_PTP_NUM; i++)
203 vm->ptp[i] = NULL;
204
205 vm->map = ct_vm_map; 203 vm->map = ct_vm_map;
206 vm->unmap = ct_vm_unmap; 204 vm->unmap = ct_vm_unmap;
207 vm->get_ptp_virt = ct_get_ptp_virt; 205 vm->get_ptp_phys = ct_get_ptp_phys;
208 INIT_LIST_HEAD(&vm->unused); 206 INIT_LIST_HEAD(&vm->unused);
209 INIT_LIST_HEAD(&vm->used); 207 INIT_LIST_HEAD(&vm->used);
210 block = kzalloc(sizeof(*block), GFP_KERNEL); 208 block = kzalloc(sizeof(*block), GFP_KERNEL);
@@ -242,7 +240,7 @@ void ct_vm_destroy(struct ct_vm *vm)
242 240
243 /* free allocated page table pages */ 241 /* free allocated page table pages */
244 for (i = 0; i < CT_PTP_NUM; i++) 242 for (i = 0; i < CT_PTP_NUM; i++)
245 kfree(vm->ptp[i]); 243 snd_dma_free_pages(&vm->ptp[i]);
246 244
247 vm->size = 0; 245 vm->size = 0;
248 246
diff --git a/sound/pci/ctxfi/ctvmem.h b/sound/pci/ctxfi/ctvmem.h
index 01e4fd0386a3..b23adfca4de6 100644
--- a/sound/pci/ctxfi/ctvmem.h
+++ b/sound/pci/ctxfi/ctvmem.h
@@ -22,6 +22,8 @@
22 22
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/pci.h>
26#include <sound/memalloc.h>
25 27
26/* The chip can handle the page table of 4k pages 28/* The chip can handle the page table of 4k pages
27 * (emu20k1 can handle even 8k pages, but we don't use it right now) 29 * (emu20k1 can handle even 8k pages, but we don't use it right now)
@@ -41,7 +43,7 @@ struct snd_pcm_substream;
41 43
42/* Virtual memory management object for card device */ 44/* Virtual memory management object for card device */
43struct ct_vm { 45struct ct_vm {
44 void *ptp[CT_PTP_NUM]; /* Device page table pages */ 46 struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */
45 unsigned int size; /* Available addr space in bytes */ 47 unsigned int size; /* Available addr space in bytes */
46 struct list_head unused; /* List of unused blocks */ 48 struct list_head unused; /* List of unused blocks */
47 struct list_head used; /* List of used blocks */ 49 struct list_head used; /* List of used blocks */
@@ -52,10 +54,10 @@ struct ct_vm {
52 int size); 54 int size);
53 /* Unmap device logical addr area. */ 55 /* Unmap device logical addr area. */
54 void (*unmap)(struct ct_vm *, struct ct_vm_block *block); 56 void (*unmap)(struct ct_vm *, struct ct_vm_block *block);
55 void *(*get_ptp_virt)(struct ct_vm *vm, int index); 57 dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index);
56}; 58};
57 59
58int ct_vm_create(struct ct_vm **rvm); 60int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci);
59void ct_vm_destroy(struct ct_vm *vm); 61void ct_vm_destroy(struct ct_vm *vm);
60 62
61#endif /* CTVMEM_H */ 63#endif /* CTVMEM_H */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 565de38a3fc7..ff6da6f386d1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -426,6 +426,7 @@ struct azx {
426 426
427 /* flags */ 427 /* flags */
428 int position_fix; 428 int position_fix;
429 int poll_count;
429 unsigned int running :1; 430 unsigned int running :1;
430 unsigned int initialized :1; 431 unsigned int initialized :1;
431 unsigned int single_cmd :1; 432 unsigned int single_cmd :1;
@@ -506,7 +507,7 @@ static char *driver_short_names[] __devinitdata = {
506#define get_azx_dev(substream) (substream->runtime->private_data) 507#define get_azx_dev(substream) (substream->runtime->private_data)
507 508
508static int azx_acquire_irq(struct azx *chip, int do_disconnect); 509static int azx_acquire_irq(struct azx *chip, int do_disconnect);
509 510static int azx_send_cmd(struct hda_bus *bus, unsigned int val);
510/* 511/*
511 * Interface for HD codec 512 * Interface for HD codec
512 */ 513 */
@@ -664,11 +665,12 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
664{ 665{
665 struct azx *chip = bus->private_data; 666 struct azx *chip = bus->private_data;
666 unsigned long timeout; 667 unsigned long timeout;
668 int do_poll = 0;
667 669
668 again: 670 again:
669 timeout = jiffies + msecs_to_jiffies(1000); 671 timeout = jiffies + msecs_to_jiffies(1000);
670 for (;;) { 672 for (;;) {
671 if (chip->polling_mode) { 673 if (chip->polling_mode || do_poll) {
672 spin_lock_irq(&chip->reg_lock); 674 spin_lock_irq(&chip->reg_lock);
673 azx_update_rirb(chip); 675 azx_update_rirb(chip);
674 spin_unlock_irq(&chip->reg_lock); 676 spin_unlock_irq(&chip->reg_lock);
@@ -676,6 +678,9 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
676 if (!chip->rirb.cmds[addr]) { 678 if (!chip->rirb.cmds[addr]) {
677 smp_rmb(); 679 smp_rmb();
678 bus->rirb_error = 0; 680 bus->rirb_error = 0;
681
682 if (!do_poll)
683 chip->poll_count = 0;
679 return chip->rirb.res[addr]; /* the last value */ 684 return chip->rirb.res[addr]; /* the last value */
680 } 685 }
681 if (time_after(jiffies, timeout)) 686 if (time_after(jiffies, timeout))
@@ -688,6 +693,16 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
688 } 693 }
689 } 694 }
690 695
696 if (!chip->polling_mode && chip->poll_count < 2) {
697 snd_printdd(SFX "azx_get_response timeout, "
698 "polling the codec once: last cmd=0x%08x\n",
699 chip->last_cmd[addr]);
700 do_poll = 1;
701 chip->poll_count++;
702 goto again;
703 }
704
705
691 if (!chip->polling_mode) { 706 if (!chip->polling_mode) {
692 snd_printk(KERN_WARNING SFX "azx_get_response timeout, " 707 snd_printk(KERN_WARNING SFX "azx_get_response timeout, "
693 "switching to polling mode: last cmd=0x%08x\n", 708 "switching to polling mode: last cmd=0x%08x\n",
@@ -1878,6 +1893,9 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
1878 1893
1879 if (!bdl_pos_adj[chip->dev_index]) 1894 if (!bdl_pos_adj[chip->dev_index])
1880 return 1; /* no delayed ack */ 1895 return 1; /* no delayed ack */
1896 if (WARN_ONCE(!azx_dev->period_bytes,
1897 "hda-intel: zero azx_dev->period_bytes"))
1898 return 0; /* this shouldn't happen! */
1881 if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2) 1899 if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
1882 return 0; /* NG - it's below the period boundary */ 1900 return 0; /* NG - it's below the period boundary */
1883 return 1; /* OK, it's fine */ 1901 return 1; /* OK, it's fine */
@@ -2043,7 +2061,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect)
2043{ 2061{
2044 if (request_irq(chip->pci->irq, azx_interrupt, 2062 if (request_irq(chip->pci->irq, azx_interrupt,
2045 chip->msi ? 0 : IRQF_SHARED, 2063 chip->msi ? 0 : IRQF_SHARED,
2046 "HDA Intel", chip)) { 2064 "hda_intel", chip)) {
2047 printk(KERN_ERR "hda-intel: unable to grab IRQ %d, " 2065 printk(KERN_ERR "hda-intel: unable to grab IRQ %d, "
2048 "disabling device\n", chip->pci->irq); 2066 "disabling device\n", chip->pci->irq);
2049 if (do_disconnect) 2067 if (do_disconnect)
@@ -2332,7 +2350,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
2332 */ 2350 */
2333static struct snd_pci_quirk msi_black_list[] __devinitdata = { 2351static struct snd_pci_quirk msi_black_list[] __devinitdata = {
2334 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ 2352 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
2335 SND_PCI_QUIRK(0x1043, 0x829c, "ASUS", 0), /* nvidia */ 2353 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
2336 {} 2354 {}
2337}; 2355};
2338 2356
diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
index 765d7bd4c3d4..9e66f6d306f8 100644
--- a/sound/pci/ice1712/aureon.c
+++ b/sound/pci/ice1712/aureon.c
@@ -703,11 +703,13 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho
703{ 703{
704 unsigned char nvol; 704 unsigned char nvol;
705 705
706 if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) 706 if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) {
707 nvol = 0; 707 nvol = 0;
708 else 708 } else {
709 nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) / 709 nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) /
710 WM_VOL_MAX; 710 WM_VOL_MAX;
711 nvol += 0x1b;
712 }
711 713
712 wm_put(ice, index, nvol); 714 wm_put(ice, index, nvol);
713 wm_put_nocache(ice, index, 0x180 | nvol); 715 wm_put_nocache(ice, index, 0x180 | nvol);
@@ -778,7 +780,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
778 for (ch = 0; ch < 2; ch++) { 780 for (ch = 0; ch < 2; ch++) {
779 unsigned int vol = ucontrol->value.integer.value[ch]; 781 unsigned int vol = ucontrol->value.integer.value[ch];
780 if (vol > WM_VOL_MAX) 782 if (vol > WM_VOL_MAX)
781 continue; 783 vol = WM_VOL_MAX;
782 vol |= spec->master[ch] & WM_VOL_MUTE; 784 vol |= spec->master[ch] & WM_VOL_MUTE;
783 if (vol != spec->master[ch]) { 785 if (vol != spec->master[ch]) {
784 int dac; 786 int dac;
@@ -834,8 +836,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *
834 for (i = 0; i < voices; i++) { 836 for (i = 0; i < voices; i++) {
835 unsigned int vol = ucontrol->value.integer.value[i]; 837 unsigned int vol = ucontrol->value.integer.value[i];
836 if (vol > WM_VOL_MAX) 838 if (vol > WM_VOL_MAX)
837 continue; 839 vol = WM_VOL_MAX;
838 vol |= spec->vol[ofs+i]; 840 vol |= spec->vol[ofs+i] & WM_VOL_MUTE;
839 if (vol != spec->vol[ofs+i]) { 841 if (vol != spec->vol[ofs+i]) {
840 spec->vol[ofs+i] = vol; 842 spec->vol[ofs+i] = vol;
841 idx = WM_DAC_ATTEN + ofs + i; 843 idx = WM_DAC_ATTEN + ofs + i;
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 71b2c161158d..68980c19a3bc 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -145,6 +145,7 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = {
145}; 145};
146 146
147static const struct snd_soc_dapm_route omap3pandora_out_map[] = { 147static const struct snd_soc_dapm_route omap3pandora_out_map[] = {
148 {"PCM DAC", NULL, "APLL Enable"},
148 {"Headphone Amplifier", NULL, "PCM DAC"}, 149 {"Headphone Amplifier", NULL, "PCM DAC"},
149 {"Line Out", NULL, "PCM DAC"}, 150 {"Line Out", NULL, "PCM DAC"},
150 {"Headphone Jack", NULL, "Headphone Amplifier"}, 151 {"Headphone Jack", NULL, "Headphone Amplifier"},
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 124760bb37b5..e1d60d780784 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -14,6 +14,7 @@ perf*.html
14common-cmds.h 14common-cmds.h
15perf.data 15perf.data
16perf.data.old 16perf.data.old
17perf-archive
17tags 18tags
18TAGS 19TAGS
19cscope* 20cscope*
diff --git a/tools/perf/Documentation/perf-archive.txt b/tools/perf/Documentation/perf-archive.txt
new file mode 100644
index 000000000000..fae174dc7d01
--- /dev/null
+++ b/tools/perf/Documentation/perf-archive.txt
@@ -0,0 +1,22 @@
1perf-archive(1)
2===============
3
4NAME
5----
6perf-archive - Create archive with object files with build-ids found in perf.data file
7
8SYNOPSIS
9--------
10[verse]
11'perf archive' [file]
12
13DESCRIPTION
14-----------
15This command runs runs perf-buildid-list --with-hits, and collects the files
16with the buildids found so that analisys of perf.data contents can be possible
17on another machine.
18
19
20SEE ALSO
21--------
22linkperf:perf-record[1], linkperf:perf-buildid-list[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
new file mode 100644
index 000000000000..88bc3b519746
--- /dev/null
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -0,0 +1,33 @@
1perf-buildid-cache(1)
2=====================
3
4NAME
5----
6perf-buildid-cache - Manage build-id cache.
7
8SYNOPSIS
9--------
10[verse]
11'perf buildid-list <options>'
12
13DESCRIPTION
14-----------
15This command manages the build-id cache. It can add and remove files to the
16cache. In the future it should as well purge older entries, set upper limits
17for the space used by the cache, etc.
18
19OPTIONS
20-------
21-a::
22--add=::
23 Add specified file to the cache.
24-r::
25--remove=::
26 Remove specified file to the cache.
27-v::
28--verbose::
29 Be more verbose.
30
31SEE ALSO
32--------
33linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
new file mode 100644
index 000000000000..b317102138c8
--- /dev/null
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -0,0 +1,29 @@
1perf-lock(1)
2============
3
4NAME
5----
6perf-lock - Analyze lock events
7
8SYNOPSIS
9--------
10[verse]
11'perf lock' {record|report|trace}
12
13DESCRIPTION
14-----------
15You can analyze various lock behaviours
16and statistics with this 'perf lock' command.
17
18 'perf lock record <command>' records lock events
19 between start and end <command>. And this command
20 produces the file "perf.data" which contains tracing
21 results of lock events.
22
23 'perf lock trace' shows raw lock events.
24
25 'perf lock report' reports statistical data.
26
27SEE ALSO
28--------
29linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 250e391b4bc8..2de34075f6a4 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -15,6 +15,8 @@ or
15'perf probe' [options] --del='[GROUP:]EVENT' [...] 15'perf probe' [options] --del='[GROUP:]EVENT' [...]
16or 16or
17'perf probe' --list 17'perf probe' --list
18or
19'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
18 20
19DESCRIPTION 21DESCRIPTION
20----------- 22-----------
@@ -45,6 +47,11 @@ OPTIONS
45--list:: 47--list::
46 List up current probe events. 48 List up current probe events.
47 49
50-L::
51--line=::
52 Show source code lines which can be probed. This needs an argument
53 which specifies a range of the source code.
54
48PROBE SYNTAX 55PROBE SYNTAX
49------------ 56------------
50Probe points are defined by following syntax. 57Probe points are defined by following syntax.
@@ -56,6 +63,19 @@ Probe points are defined by following syntax.
56It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number. 63It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number.
57'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc). 64'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
58 65
66LINE SYNTAX
67-----------
68Line range is descripted by following syntax.
69
70 "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]"
71
72FUNC specifies the function name of showing lines. 'RLN' is the start line
73number from function entry line, and 'RLN2' is the end line number. As same as
74probe syntax, 'SRC' means the source file path, 'ALN' is start line number,
75and 'ALN2' is end line number in the file. It is also possible to specify how
76many lines to show by using 'NUM'.
77So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
78
59SEE ALSO 79SEE ALSO
60-------- 80--------
61linkperf:perf-trace[1], linkperf:perf-record[1] 81linkperf:perf-trace[1], linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 4a7d558dc309..785b9fc32a46 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -74,7 +74,7 @@ OPTIONS
74 74
75-s <symbol>:: 75-s <symbol>::
76--sym-annotate=<symbol>:: 76--sym-annotate=<symbol>::
77 Annotate this symbol. Requires -k option. 77 Annotate this symbol.
78 78
79-v:: 79-v::
80--verbose:: 80--verbose::
diff --git a/tools/perf/Documentation/perf-trace-perl.txt b/tools/perf/Documentation/perf-trace-perl.txt
index c5f55f439091..d729cee8d987 100644
--- a/tools/perf/Documentation/perf-trace-perl.txt
+++ b/tools/perf/Documentation/perf-trace-perl.txt
@@ -8,7 +8,7 @@ perf-trace-perl - Process trace data with a Perl script
8SYNOPSIS 8SYNOPSIS
9-------- 9--------
10[verse] 10[verse]
11'perf trace' [-s [lang]:script[.ext] ] 11'perf trace' [-s [Perl]:script[.pl] ]
12 12
13DESCRIPTION 13DESCRIPTION
14----------- 14-----------
diff --git a/tools/perf/Documentation/perf-trace-python.txt b/tools/perf/Documentation/perf-trace-python.txt
new file mode 100644
index 000000000000..a241aca77184
--- /dev/null
+++ b/tools/perf/Documentation/perf-trace-python.txt
@@ -0,0 +1,625 @@
1perf-trace-python(1)
2==================
3
4NAME
5----
6perf-trace-python - Process trace data with a Python script
7
8SYNOPSIS
9--------
10[verse]
11'perf trace' [-s [Python]:script[.py] ]
12
13DESCRIPTION
14-----------
15
16This perf trace option is used to process perf trace data using perf's
17built-in Python interpreter. It reads and processes the input file and
18displays the results of the trace analysis implemented in the given
19Python script, if any.
20
21A QUICK EXAMPLE
22---------------
23
24This section shows the process, start to finish, of creating a working
25Python script that aggregates and extracts useful information from a
26raw perf trace stream. You can avoid reading the rest of this
27document if an example is enough for you; the rest of the document
28provides more details on each step and lists the library functions
29available to script writers.
30
31This example actually details the steps that were used to create the
32'syscall-counts' script you see when you list the available perf trace
33scripts via 'perf trace -l'. As such, this script also shows how to
34integrate your script into the list of general-purpose 'perf trace'
35scripts listed by that command.
36
37The syscall-counts script is a simple script, but demonstrates all the
38basic ideas necessary to create a useful script. Here's an example
39of its output (syscall names are not yet supported, they will appear
40as numbers):
41
42----
43syscall events:
44
45event count
46---------------------------------------- -----------
47sys_write 455067
48sys_getdents 4072
49sys_close 3037
50sys_swapoff 1769
51sys_read 923
52sys_sched_setparam 826
53sys_open 331
54sys_newfstat 326
55sys_mmap 217
56sys_munmap 216
57sys_futex 141
58sys_select 102
59sys_poll 84
60sys_setitimer 12
61sys_writev 8
6215 8
63sys_lseek 7
64sys_rt_sigprocmask 6
65sys_wait4 3
66sys_ioctl 3
67sys_set_robust_list 1
68sys_exit 1
6956 1
70sys_access 1
71----
72
73Basically our task is to keep a per-syscall tally that gets updated
74every time a system call occurs in the system. Our script will do
75that, but first we need to record the data that will be processed by
76that script. Theoretically, there are a couple of ways we could do
77that:
78
79- we could enable every event under the tracing/events/syscalls
80 directory, but this is over 600 syscalls, well beyond the number
81 allowable by perf. These individual syscall events will however be
82 useful if we want to later use the guidance we get from the
83 general-purpose scripts to drill down and get more detail about
84 individual syscalls of interest.
85
86- we can enable the sys_enter and/or sys_exit syscalls found under
87 tracing/events/raw_syscalls. These are called for all syscalls; the
88 'id' field can be used to distinguish between individual syscall
89 numbers.
90
91For this script, we only need to know that a syscall was entered; we
92don't care how it exited, so we'll use 'perf record' to record only
93the sys_enter events:
94
95----
96# perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
97
98^C[ perf record: Woken up 1 times to write data ]
99[ perf record: Captured and wrote 56.545 MB perf.data (~2470503 samples) ]
100----
101
102The options basically say to collect data for every syscall event
103system-wide and multiplex the per-cpu output into a single stream.
104That single stream will be recorded in a file in the current directory
105called perf.data.
106
107Once we have a perf.data file containing our data, we can use the -g
108'perf trace' option to generate a Python script that will contain a
109callback handler for each event type found in the perf.data trace
110stream (for more details, see the STARTER SCRIPTS section).
111
112----
113# perf trace -g python
114generated Python script: perf-trace.py
115
116The output file created also in the current directory is named
117perf-trace.py. Here's the file in its entirety:
118
119# perf trace event handlers, generated by perf trace -g python
120# Licensed under the terms of the GNU GPL License version 2
121
122# The common_* event handler fields are the most useful fields common to
123# all events. They don't necessarily correspond to the 'common_*' fields
124# in the format files. Those fields not available as handler params can
125# be retrieved using Python functions of the form common_*(context).
126# See the perf-trace-python Documentation for the list of available functions.
127
128import os
129import sys
130
131sys.path.append(os.environ['PERF_EXEC_PATH'] + \
132 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
133
134from perf_trace_context import *
135from Core import *
136
137def trace_begin():
138 print "in trace_begin"
139
140def trace_end():
141 print "in trace_end"
142
143def raw_syscalls__sys_enter(event_name, context, common_cpu,
144 common_secs, common_nsecs, common_pid, common_comm,
145 id, args):
146 print_header(event_name, common_cpu, common_secs, common_nsecs,
147 common_pid, common_comm)
148
149 print "id=%d, args=%s\n" % \
150 (id, args),
151
152def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
153 common_pid, common_comm):
154 print_header(event_name, common_cpu, common_secs, common_nsecs,
155 common_pid, common_comm)
156
157def print_header(event_name, cpu, secs, nsecs, pid, comm):
158 print "%-20s %5u %05u.%09u %8u %-20s " % \
159 (event_name, cpu, secs, nsecs, pid, comm),
160----
161
162At the top is a comment block followed by some import statements and a
163path append which every perf trace script should include.
164
165Following that are a couple generated functions, trace_begin() and
166trace_end(), which are called at the beginning and the end of the
167script respectively (for more details, see the SCRIPT_LAYOUT section
168below).
169
170Following those are the 'event handler' functions generated one for
171every event in the 'perf record' output. The handler functions take
172the form subsystem__event_name, and contain named parameters, one for
173each field in the event; in this case, there's only one event,
174raw_syscalls__sys_enter(). (see the EVENT HANDLERS section below for
175more info on event handlers).
176
177The final couple of functions are, like the begin and end functions,
178generated for every script. The first, trace_unhandled(), is called
179every time the script finds an event in the perf.data file that
180doesn't correspond to any event handler in the script. This could
181mean either that the record step recorded event types that it wasn't
182really interested in, or the script was run against a trace file that
183doesn't correspond to the script.
184
185The script generated by -g option option simply prints a line for each
186event found in the trace stream i.e. it basically just dumps the event
187and its parameter values to stdout. The print_header() function is
188simply a utility function used for that purpose. Let's rename the
189script and run it to see the default output:
190
191----
192# mv perf-trace.py syscall-counts.py
193# perf trace -s syscall-counts.py
194
195raw_syscalls__sys_enter 1 00840.847582083 7506 perf id=1, args=
196raw_syscalls__sys_enter 1 00840.847595764 7506 perf id=1, args=
197raw_syscalls__sys_enter 1 00840.847620860 7506 perf id=1, args=
198raw_syscalls__sys_enter 1 00840.847710478 6533 npviewer.bin id=78, args=
199raw_syscalls__sys_enter 1 00840.847719204 6533 npviewer.bin id=142, args=
200raw_syscalls__sys_enter 1 00840.847755445 6533 npviewer.bin id=3, args=
201raw_syscalls__sys_enter 1 00840.847775601 6533 npviewer.bin id=3, args=
202raw_syscalls__sys_enter 1 00840.847781820 6533 npviewer.bin id=3, args=
203.
204.
205.
206----
207
208Of course, for this script, we're not interested in printing every
209trace event, but rather aggregating it in a useful way. So we'll get
210rid of everything to do with printing as well as the trace_begin() and
211trace_unhandled() functions, which we won't be using. That leaves us
212with this minimalistic skeleton:
213
214----
215import os
216import sys
217
218sys.path.append(os.environ['PERF_EXEC_PATH'] + \
219 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
220
221from perf_trace_context import *
222from Core import *
223
224def trace_end():
225 print "in trace_end"
226
227def raw_syscalls__sys_enter(event_name, context, common_cpu,
228 common_secs, common_nsecs, common_pid, common_comm,
229 id, args):
230----
231
232In trace_end(), we'll simply print the results, but first we need to
233generate some results to print. To do that we need to have our
234sys_enter() handler do the necessary tallying until all events have
235been counted. A hash table indexed by syscall id is a good way to
236store that information; every time the sys_enter() handler is called,
237we simply increment a count associated with that hash entry indexed by
238that syscall id:
239
240----
241 syscalls = autodict()
242
243 try:
244 syscalls[id] += 1
245 except TypeError:
246 syscalls[id] = 1
247----
248
249The syscalls 'autodict' object is a special kind of Python dictionary
250(implemented in Core.py) that implements Perl's 'autovivifying' hashes
251in Python i.e. with autovivifying hashes, you can assign nested hash
252values without having to go to the trouble of creating intermediate
253levels if they don't exist e.g syscalls[comm][pid][id] = 1 will create
254the intermediate hash levels and finally assign the value 1 to the
255hash entry for 'id' (because the value being assigned isn't a hash
256object itself, the initial value is assigned in the TypeError
257exception. Well, there may be a better way to do this in Python but
258that's what works for now).
259
260Putting that code into the raw_syscalls__sys_enter() handler, we
261effectively end up with a single-level dictionary keyed on syscall id
262and having the counts we've tallied as values.
263
264The print_syscall_totals() function iterates over the entries in the
265dictionary and displays a line for each entry containing the syscall
266name (the dictonary keys contain the syscall ids, which are passed to
267the Util function syscall_name(), which translates the raw syscall
268numbers to the corresponding syscall name strings). The output is
269displayed after all the events in the trace have been processed, by
270calling the print_syscall_totals() function from the trace_end()
271handler called at the end of script processing.
272
273The final script producing the output shown above is shown in its
274entirety below (syscall_name() helper is not yet available, you can
275only deal with id's for now):
276
277----
278import os
279import sys
280
281sys.path.append(os.environ['PERF_EXEC_PATH'] + \
282 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
283
284from perf_trace_context import *
285from Core import *
286from Util import *
287
288syscalls = autodict()
289
290def trace_end():
291 print_syscall_totals()
292
293def raw_syscalls__sys_enter(event_name, context, common_cpu,
294 common_secs, common_nsecs, common_pid, common_comm,
295 id, args):
296 try:
297 syscalls[id] += 1
298 except TypeError:
299 syscalls[id] = 1
300
301def print_syscall_totals():
302 if for_comm is not None:
303 print "\nsyscall events for %s:\n\n" % (for_comm),
304 else:
305 print "\nsyscall events:\n\n",
306
307 print "%-40s %10s\n" % ("event", "count"),
308 print "%-40s %10s\n" % ("----------------------------------------", \
309 "-----------"),
310
311 for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
312 reverse = True):
313 print "%-40s %10d\n" % (syscall_name(id), val),
314----
315
316The script can be run just as before:
317
318 # perf trace -s syscall-counts.py
319
320So those are the essential steps in writing and running a script. The
321process can be generalized to any tracepoint or set of tracepoints
322you're interested in - basically find the tracepoint(s) you're
323interested in by looking at the list of available events shown by
324'perf list' and/or look in /sys/kernel/debug/tracing events for
325detailed event and field info, record the corresponding trace data
326using 'perf record', passing it the list of interesting events,
327generate a skeleton script using 'perf trace -g python' and modify the
328code to aggregate and display it for your particular needs.
329
330After you've done that you may end up with a general-purpose script
331that you want to keep around and have available for future use. By
332writing a couple of very simple shell scripts and putting them in the
333right place, you can have your script listed alongside the other
334scripts listed by the 'perf trace -l' command e.g.:
335
336----
337root@tropicana:~# perf trace -l
338List of available trace scripts:
339 workqueue-stats workqueue stats (ins/exe/create/destroy)
340 wakeup-latency system-wide min/max/avg wakeup latency
341 rw-by-file <comm> r/w activity for a program, by file
342 rw-by-pid system-wide r/w activity
343----
344
345A nice side effect of doing this is that you also then capture the
346probably lengthy 'perf record' command needed to record the events for
347the script.
348
349To have the script appear as a 'built-in' script, you write two simple
350scripts, one for recording and one for 'reporting'.
351
352The 'record' script is a shell script with the same base name as your
353script, but with -record appended. The shell script should be put
354into the perf/scripts/python/bin directory in the kernel source tree.
355In that script, you write the 'perf record' command-line needed for
356your script:
357
358----
359# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-record
360
361#!/bin/bash
362perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
363----
364
365The 'report' script is also a shell script with the same base name as
366your script, but with -report appended. It should also be located in
367the perf/scripts/python/bin directory. In that script, you write the
368'perf trace -s' command-line needed for running your script:
369
370----
371# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-report
372
373#!/bin/bash
374# description: system-wide syscall counts
375perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py
376----
377
378Note that the location of the Python script given in the shell script
379is in the libexec/perf-core/scripts/python directory - this is where
380the script will be copied by 'make install' when you install perf.
381For the installation to install your script there, your script needs
382to be located in the perf/scripts/python directory in the kernel
383source tree:
384
385----
386# ls -al kernel-source/tools/perf/scripts/python
387
388root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
389total 32
390drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
391drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
392drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin
393-rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-trace.py
394drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 Perf-Trace-Util
395-rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py
396----
397
398Once you've done that (don't forget to do a new 'make install',
399otherwise your script won't show up at run-time), 'perf trace -l'
400should show a new entry for your script:
401
402----
403root@tropicana:~# perf trace -l
404List of available trace scripts:
405 workqueue-stats workqueue stats (ins/exe/create/destroy)
406 wakeup-latency system-wide min/max/avg wakeup latency
407 rw-by-file <comm> r/w activity for a program, by file
408 rw-by-pid system-wide r/w activity
409 syscall-counts system-wide syscall counts
410----
411
412You can now perform the record step via 'perf trace record':
413
414 # perf trace record syscall-counts
415
416and display the output using 'perf trace report':
417
418 # perf trace report syscall-counts
419
420STARTER SCRIPTS
421---------------
422
423You can quickly get started writing a script for a particular set of
424trace data by generating a skeleton script using 'perf trace -g
425python' in the same directory as an existing perf.data trace file.
426That will generate a starter script containing a handler for each of
427the event types in the trace file; it simply prints every available
428field for each event in the trace file.
429
430You can also look at the existing scripts in
431~/libexec/perf-core/scripts/python for typical examples showing how to
432do basic things like aggregate event data, print results, etc. Also,
433the check-perf-trace.py script, while not interesting for its results,
434attempts to exercise all of the main scripting features.
435
436EVENT HANDLERS
437--------------
438
439When perf trace is invoked using a trace script, a user-defined
440'handler function' is called for each event in the trace. If there's
441no handler function defined for a given event type, the event is
442ignored (or passed to a 'trace_handled' function, see below) and the
443next event is processed.
444
445Most of the event's field values are passed as arguments to the
446handler function; some of the less common ones aren't - those are
447available as calls back into the perf executable (see below).
448
449As an example, the following perf record command can be used to record
450all sched_wakeup events in the system:
451
452 # perf record -c 1 -f -a -M -R -e sched:sched_wakeup
453
454Traces meant to be processed using a script should be recorded with
455the above options: -c 1 says to sample every event, -a to enable
456system-wide collection, -M to multiplex the output, and -R to collect
457raw samples.
458
459The format file for the sched_wakep event defines the following fields
460(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
461
462----
463 format:
464 field:unsigned short common_type;
465 field:unsigned char common_flags;
466 field:unsigned char common_preempt_count;
467 field:int common_pid;
468 field:int common_lock_depth;
469
470 field:char comm[TASK_COMM_LEN];
471 field:pid_t pid;
472 field:int prio;
473 field:int success;
474 field:int target_cpu;
475----
476
477The handler function for this event would be defined as:
478
479----
480def sched__sched_wakeup(event_name, context, common_cpu, common_secs,
481 common_nsecs, common_pid, common_comm,
482 comm, pid, prio, success, target_cpu):
483 pass
484----
485
486The handler function takes the form subsystem__event_name.
487
488The common_* arguments in the handler's argument list are the set of
489arguments passed to all event handlers; some of the fields correspond
490to the common_* fields in the format file, but some are synthesized,
491and some of the common_* fields aren't common enough to to be passed
492to every event as arguments but are available as library functions.
493
494Here's a brief description of each of the invariant event args:
495
496 event_name the name of the event as text
497 context an opaque 'cookie' used in calls back into perf
498 common_cpu the cpu the event occurred on
499 common_secs the secs portion of the event timestamp
500 common_nsecs the nsecs portion of the event timestamp
501 common_pid the pid of the current task
502 common_comm the name of the current process
503
504All of the remaining fields in the event's format file have
505counterparts as handler function arguments of the same name, as can be
506seen in the example above.
507
508The above provides the basics needed to directly access every field of
509every event in a trace, which covers 90% of what you need to know to
510write a useful trace script. The sections below cover the rest.
511
512SCRIPT LAYOUT
513-------------
514
515Every perf trace Python script should start by setting up a Python
516module search path and 'import'ing a few support modules (see module
517descriptions below):
518
519----
520 import os
521 import sys
522
523 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
524 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
525
526 from perf_trace_context import *
527 from Core import *
528----
529
530The rest of the script can contain handler functions and support
531functions in any order.
532
533Aside from the event handler functions discussed above, every script
534can implement a set of optional functions:
535
536*trace_begin*, if defined, is called before any event is processed and
537gives scripts a chance to do setup tasks:
538
539----
540def trace_begin:
541 pass
542----
543
544*trace_end*, if defined, is called after all events have been
545 processed and gives scripts a chance to do end-of-script tasks, such
546 as display results:
547
548----
549def trace_end:
550 pass
551----
552
553*trace_unhandled*, if defined, is called after for any event that
554 doesn't have a handler explicitly defined for it. The standard set
555 of common arguments are passed into it:
556
557----
558def trace_unhandled(event_name, context, common_cpu, common_secs,
559 common_nsecs, common_pid, common_comm):
560 pass
561----
562
563The remaining sections provide descriptions of each of the available
564built-in perf trace Python modules and their associated functions.
565
566AVAILABLE MODULES AND FUNCTIONS
567-------------------------------
568
569The following sections describe the functions and variables available
570via the various perf trace Python modules. To use the functions and
571variables from the given module, add the corresponding 'from XXXX
572import' line to your perf trace script.
573
574Core.py Module
575~~~~~~~~~~~~~~
576
577These functions provide some essential functions to user scripts.
578
579The *flag_str* and *symbol_str* functions provide human-readable
580strings for flag and symbolic fields. These correspond to the strings
581and values parsed from the 'print fmt' fields of the event format
582files:
583
584 flag_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the flag field field_name of event event_name
585 symbol_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the symbolic field field_name of event event_name
586
587The *autodict* function returns a special special kind of Python
588dictionary that implements Perl's 'autovivifying' hashes in Python
589i.e. with autovivifying hashes, you can assign nested hash values
590without having to go to the trouble of creating intermediate levels if
591they don't exist.
592
593 autodict() - returns an autovivifying dictionary instance
594
595
596perf_trace_context Module
597~~~~~~~~~~~~~~~~~~~~~~~~~
598
599Some of the 'common' fields in the event format file aren't all that
600common, but need to be made accessible to user scripts nonetheless.
601
602perf_trace_context defines a set of functions that can be used to
603access this data in the context of the current event. Each of these
604functions expects a context variable, which is the same as the
605context variable passed into every event handler as the second
606argument.
607
608 common_pc(context) - returns common_preempt count for the current event
609 common_flags(context) - returns common_flags for the current event
610 common_lock_depth(context) - returns common_lock_depth for the current event
611
612Util.py Module
613~~~~~~~~~~~~~~
614
615Various utility functions for use with perf trace:
616
617 nsecs(secs, nsecs) - returns total nsecs given secs/nsecs pair
618 nsecs_secs(nsecs) - returns whole secs portion given nsecs
619 nsecs_nsecs(nsecs) - returns nsecs remainder given nsecs
620 nsecs_str(nsecs) - returns printable string in the form secs.nsecs
621 avg(total, n) - returns average given a sum and a total number of values
622
623SEE ALSO
624--------
625linkperf:perf-trace[1]
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 60e5900da483..8879299cd9df 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -19,6 +19,11 @@ There are several variants of perf trace:
19 'perf trace' to see a detailed trace of the workload that was 19 'perf trace' to see a detailed trace of the workload that was
20 recorded. 20 recorded.
21 21
22 You can also run a set of pre-canned scripts that aggregate and
23 summarize the raw trace data in various ways (the list of scripts is
24 available via 'perf trace -l'). The following variants allow you to
25 record and run those scripts:
26
22 'perf trace record <script>' to record the events required for 'perf 27 'perf trace record <script>' to record the events required for 'perf
23 trace report'. <script> is the name displayed in the output of 28 trace report'. <script> is the name displayed in the output of
24 'perf trace --list' i.e. the actual script name minus any language 29 'perf trace --list' i.e. the actual script name minus any language
@@ -31,6 +36,9 @@ There are several variants of perf trace:
31 record <script>' is used and should be present for this command to 36 record <script>' is used and should be present for this command to
32 succeed. 37 succeed.
33 38
39 See the 'SEE ALSO' section for links to language-specific
40 information on how to write and run your own trace scripts.
41
34OPTIONS 42OPTIONS
35------- 43-------
36-D:: 44-D::
@@ -45,9 +53,11 @@ OPTIONS
45--list=:: 53--list=::
46 Display a list of available trace scripts. 54 Display a list of available trace scripts.
47 55
48-s:: 56-s ['lang']::
49--script=:: 57--script=::
50 Process trace data with the given script ([lang]:script[.ext]). 58 Process trace data with the given script ([lang]:script[.ext]).
59 If the string 'lang' is specified in place of a script name, a
60 list of supported languages will be displayed instead.
51 61
52-g:: 62-g::
53--gen-script=:: 63--gen-script=::
@@ -56,4 +66,5 @@ OPTIONS
56 66
57SEE ALSO 67SEE ALSO
58-------- 68--------
59linkperf:perf-record[1], linkperf:perf-trace-perl[1] 69linkperf:perf-record[1], linkperf:perf-trace-perl[1],
70linkperf:perf-trace-python[1]
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 69c832557199..0eeb247dc7d2 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -12,7 +12,7 @@ SYNOPSIS
12 12
13DESCRIPTION 13DESCRIPTION
14----------- 14-----------
15Performance counters for Linux are are a new kernel-based subsystem 15Performance counters for Linux are a new kernel-based subsystem
16that provide a framework for all things performance analysis. It 16that provide a framework for all things performance analysis. It
17covers hardware level (CPU/PMU, Performance Monitoring Unit) features 17covers hardware level (CPU/PMU, Performance Monitoring Unit) features
18and software features (software counters, tracepoints) as well. 18and software features (software counters, tracepoints) as well.
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 2e7fa3a06806..54a5b50ff312 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -286,11 +286,7 @@ SCRIPT_PERL =
286SCRIPT_SH = 286SCRIPT_SH =
287TEST_PROGRAMS = 287TEST_PROGRAMS =
288 288
289# 289SCRIPT_SH += perf-archive.sh
290# No scripts right now:
291#
292
293# SCRIPT_SH += perf-am.sh
294 290
295# 291#
296# No Perl scripts right now: 292# No Perl scripts right now:
@@ -315,9 +311,6 @@ PROGRAMS += perf
315# List built-in command $C whose implementation cmd_$C() is not in 311# List built-in command $C whose implementation cmd_$C() is not in
316# builtin-$C.o but is linked in as part of some other command. 312# builtin-$C.o but is linked in as part of some other command.
317# 313#
318# None right now:
319#
320# BUILT_INS += perf-init $X
321 314
322# what 'all' will build and 'install' will install, in perfexecdir 315# what 'all' will build and 'install' will install, in perfexecdir
323ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) 316ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
@@ -340,6 +333,7 @@ LIB_FILE=libperf.a
340LIB_H += ../../include/linux/perf_event.h 333LIB_H += ../../include/linux/perf_event.h
341LIB_H += ../../include/linux/rbtree.h 334LIB_H += ../../include/linux/rbtree.h
342LIB_H += ../../include/linux/list.h 335LIB_H += ../../include/linux/list.h
336LIB_H += ../../include/linux/hash.h
343LIB_H += ../../include/linux/stringify.h 337LIB_H += ../../include/linux/stringify.h
344LIB_H += util/include/linux/bitmap.h 338LIB_H += util/include/linux/bitmap.h
345LIB_H += util/include/linux/bitops.h 339LIB_H += util/include/linux/bitops.h
@@ -363,12 +357,14 @@ LIB_H += util/include/asm/uaccess.h
363LIB_H += perf.h 357LIB_H += perf.h
364LIB_H += util/cache.h 358LIB_H += util/cache.h
365LIB_H += util/callchain.h 359LIB_H += util/callchain.h
360LIB_H += util/build-id.h
366LIB_H += util/debug.h 361LIB_H += util/debug.h
367LIB_H += util/debugfs.h 362LIB_H += util/debugfs.h
368LIB_H += util/event.h 363LIB_H += util/event.h
369LIB_H += util/exec_cmd.h 364LIB_H += util/exec_cmd.h
370LIB_H += util/types.h 365LIB_H += util/types.h
371LIB_H += util/levenshtein.h 366LIB_H += util/levenshtein.h
367LIB_H += util/map.h
372LIB_H += util/parse-options.h 368LIB_H += util/parse-options.h
373LIB_H += util/parse-events.h 369LIB_H += util/parse-events.h
374LIB_H += util/quote.h 370LIB_H += util/quote.h
@@ -389,12 +385,12 @@ LIB_H += util/sort.h
389LIB_H += util/hist.h 385LIB_H += util/hist.h
390LIB_H += util/thread.h 386LIB_H += util/thread.h
391LIB_H += util/trace-event.h 387LIB_H += util/trace-event.h
392LIB_H += util/trace-event-perl.h
393LIB_H += util/probe-finder.h 388LIB_H += util/probe-finder.h
394LIB_H += util/probe-event.h 389LIB_H += util/probe-event.h
395 390
396LIB_OBJS += util/abspath.o 391LIB_OBJS += util/abspath.o
397LIB_OBJS += util/alias.o 392LIB_OBJS += util/alias.o
393LIB_OBJS += util/build-id.o
398LIB_OBJS += util/config.o 394LIB_OBJS += util/config.o
399LIB_OBJS += util/ctype.o 395LIB_OBJS += util/ctype.o
400LIB_OBJS += util/debugfs.o 396LIB_OBJS += util/debugfs.o
@@ -431,12 +427,12 @@ LIB_OBJS += util/thread.o
431LIB_OBJS += util/trace-event-parse.o 427LIB_OBJS += util/trace-event-parse.o
432LIB_OBJS += util/trace-event-read.o 428LIB_OBJS += util/trace-event-read.o
433LIB_OBJS += util/trace-event-info.o 429LIB_OBJS += util/trace-event-info.o
434LIB_OBJS += util/trace-event-perl.o 430LIB_OBJS += util/trace-event-scripting.o
435LIB_OBJS += util/svghelper.o 431LIB_OBJS += util/svghelper.o
436LIB_OBJS += util/sort.o 432LIB_OBJS += util/sort.o
437LIB_OBJS += util/hist.o 433LIB_OBJS += util/hist.o
438LIB_OBJS += util/data_map.o
439LIB_OBJS += util/probe-event.o 434LIB_OBJS += util/probe-event.o
435LIB_OBJS += util/util.o
440 436
441BUILTIN_OBJS += builtin-annotate.o 437BUILTIN_OBJS += builtin-annotate.o
442 438
@@ -451,6 +447,7 @@ BUILTIN_OBJS += builtin-diff.o
451BUILTIN_OBJS += builtin-help.o 447BUILTIN_OBJS += builtin-help.o
452BUILTIN_OBJS += builtin-sched.o 448BUILTIN_OBJS += builtin-sched.o
453BUILTIN_OBJS += builtin-buildid-list.o 449BUILTIN_OBJS += builtin-buildid-list.o
450BUILTIN_OBJS += builtin-buildid-cache.o
454BUILTIN_OBJS += builtin-list.o 451BUILTIN_OBJS += builtin-list.o
455BUILTIN_OBJS += builtin-record.o 452BUILTIN_OBJS += builtin-record.o
456BUILTIN_OBJS += builtin-report.o 453BUILTIN_OBJS += builtin-report.o
@@ -460,6 +457,7 @@ BUILTIN_OBJS += builtin-top.o
460BUILTIN_OBJS += builtin-trace.o 457BUILTIN_OBJS += builtin-trace.o
461BUILTIN_OBJS += builtin-probe.o 458BUILTIN_OBJS += builtin-probe.o
462BUILTIN_OBJS += builtin-kmem.o 459BUILTIN_OBJS += builtin-kmem.o
460BUILTIN_OBJS += builtin-lock.o
463 461
464PERFLIBS = $(LIB_FILE) 462PERFLIBS = $(LIB_FILE)
465 463
@@ -520,9 +518,23 @@ ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; e
520 BASIC_CFLAGS += -DNO_LIBPERL 518 BASIC_CFLAGS += -DNO_LIBPERL
521else 519else
522 ALL_LDFLAGS += $(PERL_EMBED_LDOPTS) 520 ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
521 LIB_OBJS += util/scripting-engines/trace-event-perl.o
523 LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o 522 LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o
524endif 523endif
525 524
525ifndef NO_LIBPYTHON
526PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
527PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
528endif
529
530ifneq ($(shell sh -c "(echo '\#include <Python.h>'; echo 'int main(void) { Py_Initialize(); return 0; }') | $(CC) -x c - $(PYTHON_EMBED_CCOPTS) -o /dev/null $(PYTHON_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
531 BASIC_CFLAGS += -DNO_LIBPYTHON
532else
533 ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
534 LIB_OBJS += util/scripting-engines/trace-event-python.o
535 LIB_OBJS += scripts/python/Perf-Trace-Util/Context.o
536endif
537
526ifdef NO_DEMANGLE 538ifdef NO_DEMANGLE
527 BASIC_CFLAGS += -DNO_DEMANGLE 539 BASIC_CFLAGS += -DNO_DEMANGLE
528else 540else
@@ -894,12 +906,18 @@ util/hweight.o: ../../lib/hweight.c PERF-CFLAGS
894util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS 906util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS
895 $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< 907 $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
896 908
897util/trace-event-perl.o: util/trace-event-perl.c PERF-CFLAGS 909util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c PERF-CFLAGS
898 $(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< 910 $(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
899 911
900scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS 912scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS
901 $(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< 913 $(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
902 914
915util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c PERF-CFLAGS
916 $(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-python.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
917
918scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c PERF-CFLAGS
919 $(QUIET_CC)$(CC) -o scripts/python/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
920
903perf-%$X: %.o $(PERFLIBS) 921perf-%$X: %.o $(PERFLIBS)
904 $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) 922 $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
905 923
@@ -1009,9 +1027,16 @@ install: all
1009 $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' 1027 $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
1010 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 1028 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
1011 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' 1029 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
1030 $(INSTALL) perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
1012 $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 1031 $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
1013 $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' 1032 $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
1014 $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' 1033 $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
1034 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
1035 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
1036 $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
1037 $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
1038 $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
1039
1015ifdef BUILT_INS 1040ifdef BUILT_INS
1016 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 1041 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
1017 $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 1042 $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 593ff25006de..5ec5de995872 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -53,32 +53,20 @@ struct sym_priv {
53 53
54static const char *sym_hist_filter; 54static const char *sym_hist_filter;
55 55
56static int symbol_filter(struct map *map __used, struct symbol *sym) 56static int sym__alloc_hist(struct symbol *self)
57{ 57{
58 if (sym_hist_filter == NULL || 58 struct sym_priv *priv = symbol__priv(self);
59 strcmp(sym->name, sym_hist_filter) == 0) { 59 const int size = (sizeof(*priv->hist) +
60 struct sym_priv *priv = symbol__priv(sym); 60 (self->end - self->start) * sizeof(u64));
61 const int size = (sizeof(*priv->hist) +
62 (sym->end - sym->start) * sizeof(u64));
63 61
64 priv->hist = malloc(size); 62 priv->hist = zalloc(size);
65 if (priv->hist) 63 return priv->hist == NULL ? -1 : 0;
66 memset(priv->hist, 0, size);
67 return 0;
68 }
69 /*
70 * FIXME: We should really filter it out, as we don't want to go thru symbols
71 * we're not interested, and if a DSO ends up with no symbols, delete it too,
72 * but right now the kernel loading routines in symbol.c bail out if no symbols
73 * are found, fix it later.
74 */
75 return 0;
76} 64}
77 65
78/* 66/*
79 * collect histogram counts 67 * collect histogram counts
80 */ 68 */
81static void hist_hit(struct hist_entry *he, u64 ip) 69static int annotate__hist_hit(struct hist_entry *he, u64 ip)
82{ 70{
83 unsigned int sym_size, offset; 71 unsigned int sym_size, offset;
84 struct symbol *sym = he->sym; 72 struct symbol *sym = he->sym;
@@ -88,83 +76,127 @@ static void hist_hit(struct hist_entry *he, u64 ip)
88 he->count++; 76 he->count++;
89 77
90 if (!sym || !he->map) 78 if (!sym || !he->map)
91 return; 79 return 0;
92 80
93 priv = symbol__priv(sym); 81 priv = symbol__priv(sym);
94 if (!priv->hist) 82 if (priv->hist == NULL && sym__alloc_hist(sym) < 0)
95 return; 83 return -ENOMEM;
96 84
97 sym_size = sym->end - sym->start; 85 sym_size = sym->end - sym->start;
98 offset = ip - sym->start; 86 offset = ip - sym->start;
99 87
100 if (verbose) 88 pr_debug3("%s: ip=%#Lx\n", __func__, he->map->unmap_ip(he->map, ip));
101 fprintf(stderr, "%s: ip=%Lx\n", __func__,
102 he->map->unmap_ip(he->map, ip));
103 89
104 if (offset >= sym_size) 90 if (offset >= sym_size)
105 return; 91 return 0;
106 92
107 h = priv->hist; 93 h = priv->hist;
108 h->sum++; 94 h->sum++;
109 h->ip[offset]++; 95 h->ip[offset]++;
110 96
111 if (verbose >= 3) 97 pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", he->sym->start,
112 printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", 98 he->sym->name, ip, ip - he->sym->start, h->ip[offset]);
113 (void *)(unsigned long)he->sym->start, 99 return 0;
114 he->sym->name,
115 (void *)(unsigned long)ip, ip - he->sym->start,
116 h->ip[offset]);
117} 100}
118 101
119static int perf_session__add_hist_entry(struct perf_session *self, 102static int perf_session__add_hist_entry(struct perf_session *self,
120 struct addr_location *al, u64 count) 103 struct addr_location *al, u64 count)
121{ 104{
122 bool hit; 105 bool hit;
123 struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL, 106 struct hist_entry *he;
124 count, &hit); 107
108 if (sym_hist_filter != NULL &&
109 (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
110 /* We're only interested in a symbol named sym_hist_filter */
111 if (al->sym != NULL) {
112 rb_erase(&al->sym->rb_node,
113 &al->map->dso->symbols[al->map->type]);
114 symbol__delete(al->sym);
115 }
116 return 0;
117 }
118
119 he = __perf_session__add_hist_entry(self, al, NULL, count, &hit);
125 if (he == NULL) 120 if (he == NULL)
126 return -ENOMEM; 121 return -ENOMEM;
127 hist_hit(he, al->addr); 122
128 return 0; 123 return annotate__hist_hit(he, al->addr);
129} 124}
130 125
131static int process_sample_event(event_t *event, struct perf_session *session) 126static int process_sample_event(event_t *event, struct perf_session *session)
132{ 127{
133 struct addr_location al; 128 struct addr_location al;
134 129
135 dump_printf("(IP, %d): %d: %p\n", event->header.misc, 130 dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc,
136 event->ip.pid, (void *)(long)event->ip.ip); 131 event->ip.pid, event->ip.ip);
137 132
138 if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) { 133 if (event__preprocess_sample(event, session, &al, NULL) < 0) {
139 fprintf(stderr, "problem processing %d event, skipping it.\n", 134 pr_warning("problem processing %d event, skipping it.\n",
140 event->header.type); 135 event->header.type);
141 return -1; 136 return -1;
142 } 137 }
143 138
144 if (!al.filtered && perf_session__add_hist_entry(session, &al, 1)) { 139 if (!al.filtered && perf_session__add_hist_entry(session, &al, 1)) {
145 fprintf(stderr, "problem incrementing symbol count, " 140 pr_warning("problem incrementing symbol count, "
146 "skipping event\n"); 141 "skipping event\n");
147 return -1; 142 return -1;
148 } 143 }
149 144
150 return 0; 145 return 0;
151} 146}
152 147
153static int parse_line(FILE *file, struct hist_entry *he, u64 len) 148struct objdump_line {
149 struct list_head node;
150 s64 offset;
151 char *line;
152};
153
154static struct objdump_line *objdump_line__new(s64 offset, char *line)
155{
156 struct objdump_line *self = malloc(sizeof(*self));
157
158 if (self != NULL) {
159 self->offset = offset;
160 self->line = line;
161 }
162
163 return self;
164}
165
166static void objdump_line__free(struct objdump_line *self)
167{
168 free(self->line);
169 free(self);
170}
171
172static void objdump__add_line(struct list_head *head, struct objdump_line *line)
173{
174 list_add_tail(&line->node, head);
175}
176
177static struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
178 struct objdump_line *pos)
179{
180 list_for_each_entry_continue(pos, head, node)
181 if (pos->offset >= 0)
182 return pos;
183
184 return NULL;
185}
186
187static int parse_line(FILE *file, struct hist_entry *he,
188 struct list_head *head)
154{ 189{
155 struct symbol *sym = he->sym; 190 struct symbol *sym = he->sym;
191 struct objdump_line *objdump_line;
156 char *line = NULL, *tmp, *tmp2; 192 char *line = NULL, *tmp, *tmp2;
157 static const char *prev_line;
158 static const char *prev_color;
159 unsigned int offset;
160 size_t line_len; 193 size_t line_len;
161 u64 start; 194 s64 line_ip, offset = -1;
162 s64 line_ip;
163 int ret;
164 char *c; 195 char *c;
165 196
166 if (getline(&line, &line_len, file) < 0) 197 if (getline(&line, &line_len, file) < 0)
167 return -1; 198 return -1;
199
168 if (!line) 200 if (!line)
169 return -1; 201 return -1;
170 202
@@ -173,8 +205,6 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
173 *c = 0; 205 *c = 0;
174 206
175 line_ip = -1; 207 line_ip = -1;
176 offset = 0;
177 ret = -2;
178 208
179 /* 209 /*
180 * Strip leading spaces: 210 * Strip leading spaces:
@@ -195,9 +225,30 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
195 line_ip = -1; 225 line_ip = -1;
196 } 226 }
197 227
198 start = he->map->unmap_ip(he->map, sym->start);
199
200 if (line_ip != -1) { 228 if (line_ip != -1) {
229 u64 start = map__rip_2objdump(he->map, sym->start);
230 offset = line_ip - start;
231 }
232
233 objdump_line = objdump_line__new(offset, line);
234 if (objdump_line == NULL) {
235 free(line);
236 return -1;
237 }
238 objdump__add_line(head, objdump_line);
239
240 return 0;
241}
242
243static int objdump_line__print(struct objdump_line *self,
244 struct list_head *head,
245 struct hist_entry *he, u64 len)
246{
247 struct symbol *sym = he->sym;
248 static const char *prev_line;
249 static const char *prev_color;
250
251 if (self->offset != -1) {
201 const char *path = NULL; 252 const char *path = NULL;
202 unsigned int hits = 0; 253 unsigned int hits = 0;
203 double percent = 0.0; 254 double percent = 0.0;
@@ -205,15 +256,22 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
205 struct sym_priv *priv = symbol__priv(sym); 256 struct sym_priv *priv = symbol__priv(sym);
206 struct sym_ext *sym_ext = priv->ext; 257 struct sym_ext *sym_ext = priv->ext;
207 struct sym_hist *h = priv->hist; 258 struct sym_hist *h = priv->hist;
259 s64 offset = self->offset;
260 struct objdump_line *next = objdump__get_next_ip_line(head, self);
261
262 while (offset < (s64)len &&
263 (next == NULL || offset < next->offset)) {
264 if (sym_ext) {
265 if (path == NULL)
266 path = sym_ext[offset].path;
267 percent += sym_ext[offset].percent;
268 } else
269 hits += h->ip[offset];
270
271 ++offset;
272 }
208 273
209 offset = line_ip - start; 274 if (sym_ext == NULL && h->sum)
210 if (offset < len)
211 hits = h->ip[offset];
212
213 if (offset < len && sym_ext) {
214 path = sym_ext[offset].path;
215 percent = sym_ext[offset].percent;
216 } else if (h->sum)
217 percent = 100.0 * hits / h->sum; 275 percent = 100.0 * hits / h->sum;
218 276
219 color = get_percent_color(percent); 277 color = get_percent_color(percent);
@@ -234,12 +292,12 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
234 292
235 color_fprintf(stdout, color, " %7.2f", percent); 293 color_fprintf(stdout, color, " %7.2f", percent);
236 printf(" : "); 294 printf(" : ");
237 color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", line); 295 color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", self->line);
238 } else { 296 } else {
239 if (!*line) 297 if (!*self->line)
240 printf(" :\n"); 298 printf(" :\n");
241 else 299 else
242 printf(" : %s\n", line); 300 printf(" : %s\n", self->line);
243 } 301 }
244 302
245 return 0; 303 return 0;
@@ -365,6 +423,20 @@ static void print_summary(const char *filename)
365 } 423 }
366} 424}
367 425
426static void hist_entry__print_hits(struct hist_entry *self)
427{
428 struct symbol *sym = self->sym;
429 struct sym_priv *priv = symbol__priv(sym);
430 struct sym_hist *h = priv->hist;
431 u64 len = sym->end - sym->start, offset;
432
433 for (offset = 0; offset < len; ++offset)
434 if (h->ip[offset] != 0)
435 printf("%*Lx: %Lu\n", BITS_PER_LONG / 2,
436 sym->start + offset, h->ip[offset]);
437 printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
438}
439
368static void annotate_sym(struct hist_entry *he) 440static void annotate_sym(struct hist_entry *he)
369{ 441{
370 struct map *map = he->map; 442 struct map *map = he->map;
@@ -374,15 +446,15 @@ static void annotate_sym(struct hist_entry *he)
374 u64 len; 446 u64 len;
375 char command[PATH_MAX*2]; 447 char command[PATH_MAX*2];
376 FILE *file; 448 FILE *file;
449 LIST_HEAD(head);
450 struct objdump_line *pos, *n;
377 451
378 if (!filename) 452 if (!filename)
379 return; 453 return;
380 454
381 if (verbose) 455 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
382 fprintf(stderr, "%s: filename=%s, sym=%s, start=%Lx, end=%Lx\n", 456 filename, sym->name, map->unmap_ip(map, sym->start),
383 __func__, filename, sym->name, 457 map->unmap_ip(map, sym->end));
384 map->unmap_ip(map, sym->start),
385 map->unmap_ip(map, sym->end));
386 458
387 if (full_paths) 459 if (full_paths)
388 d_filename = filename; 460 d_filename = filename;
@@ -405,7 +477,8 @@ static void annotate_sym(struct hist_entry *he)
405 dso, dso->long_name, sym, sym->name); 477 dso, dso->long_name, sym, sym->name);
406 478
407 sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s", 479 sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
408 map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end), 480 map__rip_2objdump(map, sym->start),
481 map__rip_2objdump(map, sym->end),
409 filename, filename); 482 filename, filename);
410 483
411 if (verbose >= 3) 484 if (verbose >= 3)
@@ -416,11 +489,21 @@ static void annotate_sym(struct hist_entry *he)
416 return; 489 return;
417 490
418 while (!feof(file)) { 491 while (!feof(file)) {
419 if (parse_line(file, he, len) < 0) 492 if (parse_line(file, he, &head) < 0)
420 break; 493 break;
421 } 494 }
422 495
423 pclose(file); 496 pclose(file);
497
498 if (verbose)
499 hist_entry__print_hits(he);
500
501 list_for_each_entry_safe(pos, n, &head, node) {
502 objdump_line__print(pos, &head, he, len);
503 list_del(&pos->node);
504 objdump_line__free(pos);
505 }
506
424 if (print_line) 507 if (print_line)
425 free_source_line(he, len); 508 free_source_line(he, len);
426} 509}
@@ -451,10 +534,10 @@ static void perf_session__find_annotations(struct perf_session *self)
451} 534}
452 535
453static struct perf_event_ops event_ops = { 536static struct perf_event_ops event_ops = {
454 .process_sample_event = process_sample_event, 537 .sample = process_sample_event,
455 .process_mmap_event = event__process_mmap, 538 .mmap = event__process_mmap,
456 .process_comm_event = event__process_comm, 539 .comm = event__process_comm,
457 .process_fork_event = event__process_task, 540 .fork = event__process_task,
458}; 541};
459 542
460static int __cmd_annotate(void) 543static int __cmd_annotate(void)
@@ -542,9 +625,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
542 setup_pager(); 625 setup_pager();
543 626
544 if (field_sep && *field_sep == '.') { 627 if (field_sep && *field_sep == '.') {
545 fputs("'.' is the only non valid --field-separator argument\n", 628 pr_err("'.' is the only non valid --field-separator argument\n");
546 stderr); 629 return -1;
547 exit(129);
548 } 630 }
549 631
550 return __cmd_annotate(); 632 return __cmd_annotate();
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
new file mode 100644
index 000000000000..30a05f552c96
--- /dev/null
+++ b/tools/perf/builtin-buildid-cache.c
@@ -0,0 +1,133 @@
1/*
2 * builtin-buildid-cache.c
3 *
4 * Builtin buildid-cache command: Manages build-id cache
5 *
6 * Copyright (C) 2010, Red Hat Inc.
7 * Copyright (C) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
8 */
9#include "builtin.h"
10#include "perf.h"
11#include "util/cache.h"
12#include "util/debug.h"
13#include "util/header.h"
14#include "util/parse-options.h"
15#include "util/strlist.h"
16#include "util/symbol.h"
17
18static char const *add_name_list_str, *remove_name_list_str;
19
20static const char * const buildid_cache_usage[] = {
21 "perf buildid-cache [<options>]",
22 NULL
23};
24
25static const struct option buildid_cache_options[] = {
26 OPT_STRING('a', "add", &add_name_list_str,
27 "file list", "file(s) to add"),
28 OPT_STRING('r', "remove", &remove_name_list_str, "file list",
29 "file(s) to remove"),
30 OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"),
31 OPT_END()
32};
33
34static int build_id_cache__add_file(const char *filename, const char *debugdir)
35{
36 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
37 u8 build_id[BUILD_ID_SIZE];
38 int err;
39
40 if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
41 pr_debug("Couldn't read a build-id in %s\n", filename);
42 return -1;
43 }
44
45 build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
46 err = build_id_cache__add_s(sbuild_id, debugdir, filename, false);
47 if (verbose)
48 pr_info("Adding %s %s: %s\n", sbuild_id, filename,
49 err ? "FAIL" : "Ok");
50 return err;
51}
52
53static int build_id_cache__remove_file(const char *filename __used,
54 const char *debugdir __used)
55{
56 u8 build_id[BUILD_ID_SIZE];
57 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
58
59 int err;
60
61 if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
62 pr_debug("Couldn't read a build-id in %s\n", filename);
63 return -1;
64 }
65
66 build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
67 err = build_id_cache__remove_s(sbuild_id, debugdir);
68 if (verbose)
69 pr_info("Removing %s %s: %s\n", sbuild_id, filename,
70 err ? "FAIL" : "Ok");
71
72 return err;
73}
74
75static int __cmd_buildid_cache(void)
76{
77 struct strlist *list;
78 struct str_node *pos;
79 char debugdir[PATH_MAX];
80
81 snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
82 DEBUG_CACHE_DIR);
83
84 if (add_name_list_str) {
85 list = strlist__new(true, add_name_list_str);
86 if (list) {
87 strlist__for_each(pos, list)
88 if (build_id_cache__add_file(pos->s, debugdir)) {
89 if (errno == EEXIST) {
90 pr_debug("%s already in the cache\n",
91 pos->s);
92 continue;
93 }
94 pr_warning("Couldn't add %s: %s\n",
95 pos->s, strerror(errno));
96 }
97
98 strlist__delete(list);
99 }
100 }
101
102 if (remove_name_list_str) {
103 list = strlist__new(true, remove_name_list_str);
104 if (list) {
105 strlist__for_each(pos, list)
106 if (build_id_cache__remove_file(pos->s, debugdir)) {
107 if (errno == ENOENT) {
108 pr_debug("%s wasn't in the cache\n",
109 pos->s);
110 continue;
111 }
112 pr_warning("Couldn't remove %s: %s\n",
113 pos->s, strerror(errno));
114 }
115
116 strlist__delete(list);
117 }
118 }
119
120 return 0;
121}
122
123int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used)
124{
125 argc = parse_options(argc, argv, buildid_cache_options,
126 buildid_cache_usage, 0);
127
128 if (symbol__init() < 0)
129 return -1;
130
131 setup_pager();
132 return __cmd_buildid_cache();
133}
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 1e99ac806913..d0675c02f81e 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include "builtin.h" 9#include "builtin.h"
10#include "perf.h" 10#include "perf.h"
11#include "util/build-id.h"
11#include "util/cache.h" 12#include "util/cache.h"
12#include "util/debug.h" 13#include "util/debug.h"
13#include "util/parse-options.h" 14#include "util/parse-options.h"
@@ -16,6 +17,7 @@
16 17
17static char const *input_name = "perf.data"; 18static char const *input_name = "perf.data";
18static int force; 19static int force;
20static bool with_hits;
19 21
20static const char * const buildid_list_usage[] = { 22static const char * const buildid_list_usage[] = {
21 "perf buildid-list [<options>]", 23 "perf buildid-list [<options>]",
@@ -23,6 +25,7 @@ static const char * const buildid_list_usage[] = {
23}; 25};
24 26
25static const struct option options[] = { 27static const struct option options[] = {
28 OPT_BOOLEAN('H', "with-hits", &with_hits, "Show only DSOs with hits"),
26 OPT_STRING('i', "input", &input_name, "file", 29 OPT_STRING('i', "input", &input_name, "file",
27 "input file name"), 30 "input file name"),
28 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 31 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
@@ -31,26 +34,6 @@ static const struct option options[] = {
31 OPT_END() 34 OPT_END()
32}; 35};
33 36
34static int perf_file_section__process_buildids(struct perf_file_section *self,
35 int feat, int fd)
36{
37 if (feat != HEADER_BUILD_ID)
38 return 0;
39
40 if (lseek(fd, self->offset, SEEK_SET) < 0) {
41 pr_warning("Failed to lseek to %Ld offset for buildids!\n",
42 self->offset);
43 return -1;
44 }
45
46 if (perf_header__read_build_ids(fd, self->offset, self->size)) {
47 pr_warning("Failed to read buildids!\n");
48 return -1;
49 }
50
51 return 0;
52}
53
54static int __cmd_buildid_list(void) 37static int __cmd_buildid_list(void)
55{ 38{
56 int err = -1; 39 int err = -1;
@@ -60,10 +43,10 @@ static int __cmd_buildid_list(void)
60 if (session == NULL) 43 if (session == NULL)
61 return -1; 44 return -1;
62 45
63 err = perf_header__process_sections(&session->header, session->fd, 46 if (with_hits)
64 perf_file_section__process_buildids); 47 perf_session__process_events(session, &build_id__mark_dso_hit_ops);
65 if (err >= 0) 48
66 dsos__fprintf_buildid(stdout); 49 dsos__fprintf_buildid(stdout, with_hits);
67 50
68 perf_session__delete(session); 51 perf_session__delete(session);
69 return err; 52 return err;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index bd71b8ceafb7..18b3f505f9db 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -42,8 +42,8 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
42 struct addr_location al; 42 struct addr_location al;
43 struct sample_data data = { .period = 1, }; 43 struct sample_data data = { .period = 1, };
44 44
45 dump_printf("(IP, %d): %d: %p\n", event->header.misc, 45 dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc,
46 event->ip.pid, (void *)(long)event->ip.ip); 46 event->ip.pid, event->ip.ip);
47 47
48 if (event__preprocess_sample(event, session, &al, NULL) < 0) { 48 if (event__preprocess_sample(event, session, &al, NULL) < 0) {
49 pr_warning("problem processing %d event, skipping it.\n", 49 pr_warning("problem processing %d event, skipping it.\n",
@@ -51,12 +51,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
51 return -1; 51 return -1;
52 } 52 }
53 53
54 if (al.filtered) 54 if (al.filtered || al.sym == NULL)
55 return 0; 55 return 0;
56 56
57 event__parse_sample(event, session->sample_type, &data); 57 event__parse_sample(event, session->sample_type, &data);
58 58
59 if (al.sym && perf_session__add_hist_entry(session, &al, data.period)) { 59 if (perf_session__add_hist_entry(session, &al, data.period)) {
60 pr_warning("problem incrementing symbol count, skipping event\n"); 60 pr_warning("problem incrementing symbol count, skipping event\n");
61 return -1; 61 return -1;
62 } 62 }
@@ -66,12 +66,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
66} 66}
67 67
68static struct perf_event_ops event_ops = { 68static struct perf_event_ops event_ops = {
69 .process_sample_event = diff__process_sample_event, 69 .sample = diff__process_sample_event,
70 .process_mmap_event = event__process_mmap, 70 .mmap = event__process_mmap,
71 .process_comm_event = event__process_comm, 71 .comm = event__process_comm,
72 .process_exit_event = event__process_task, 72 .exit = event__process_task,
73 .process_fork_event = event__process_task, 73 .fork = event__process_task,
74 .process_lost_event = event__process_lost, 74 .lost = event__process_lost,
75}; 75};
76 76
77static void perf_session__insert_hist_entry_by_name(struct rb_root *root, 77static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
@@ -82,29 +82,19 @@ static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
82 struct hist_entry *iter; 82 struct hist_entry *iter;
83 83
84 while (*p != NULL) { 84 while (*p != NULL) {
85 int cmp;
86 parent = *p; 85 parent = *p;
87 iter = rb_entry(parent, struct hist_entry, rb_node); 86 iter = rb_entry(parent, struct hist_entry, rb_node);
88 87 if (hist_entry__cmp(he, iter) < 0)
89 cmp = strcmp(he->map->dso->name, iter->map->dso->name);
90 if (cmp > 0)
91 p = &(*p)->rb_left; 88 p = &(*p)->rb_left;
92 else if (cmp < 0) 89 else
93 p = &(*p)->rb_right; 90 p = &(*p)->rb_right;
94 else {
95 cmp = strcmp(he->sym->name, iter->sym->name);
96 if (cmp > 0)
97 p = &(*p)->rb_left;
98 else
99 p = &(*p)->rb_right;
100 }
101 } 91 }
102 92
103 rb_link_node(&he->rb_node, parent, p); 93 rb_link_node(&he->rb_node, parent, p);
104 rb_insert_color(&he->rb_node, root); 94 rb_insert_color(&he->rb_node, root);
105} 95}
106 96
107static void perf_session__resort_by_name(struct perf_session *self) 97static void perf_session__resort_hist_entries(struct perf_session *self)
108{ 98{
109 unsigned long position = 1; 99 unsigned long position = 1;
110 struct rb_root tmp = RB_ROOT; 100 struct rb_root tmp = RB_ROOT;
@@ -122,29 +112,28 @@ static void perf_session__resort_by_name(struct perf_session *self)
122 self->hists = tmp; 112 self->hists = tmp;
123} 113}
124 114
115static void perf_session__set_hist_entries_positions(struct perf_session *self)
116{
117 perf_session__output_resort(self, self->events_stats.total);
118 perf_session__resort_hist_entries(self);
119}
120
125static struct hist_entry * 121static struct hist_entry *
126perf_session__find_hist_entry_by_name(struct perf_session *self, 122perf_session__find_hist_entry(struct perf_session *self,
127 struct hist_entry *he) 123 struct hist_entry *he)
128{ 124{
129 struct rb_node *n = self->hists.rb_node; 125 struct rb_node *n = self->hists.rb_node;
130 126
131 while (n) { 127 while (n) {
132 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); 128 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
133 int cmp = strcmp(he->map->dso->name, iter->map->dso->name); 129 int64_t cmp = hist_entry__cmp(he, iter);
134 130
135 if (cmp > 0) 131 if (cmp < 0)
136 n = n->rb_left; 132 n = n->rb_left;
137 else if (cmp < 0) 133 else if (cmp > 0)
138 n = n->rb_right; 134 n = n->rb_right;
139 else { 135 else
140 cmp = strcmp(he->sym->name, iter->sym->name); 136 return iter;
141 if (cmp > 0)
142 n = n->rb_left;
143 else if (cmp < 0)
144 n = n->rb_right;
145 else
146 return iter;
147 }
148 } 137 }
149 138
150 return NULL; 139 return NULL;
@@ -155,11 +144,9 @@ static void perf_session__match_hists(struct perf_session *old_session,
155{ 144{
156 struct rb_node *nd; 145 struct rb_node *nd;
157 146
158 perf_session__resort_by_name(old_session);
159
160 for (nd = rb_first(&new_session->hists); nd; nd = rb_next(nd)) { 147 for (nd = rb_first(&new_session->hists); nd; nd = rb_next(nd)) {
161 struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node); 148 struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node);
162 pos->pair = perf_session__find_hist_entry_by_name(old_session, pos); 149 pos->pair = perf_session__find_hist_entry(old_session, pos);
163 } 150 }
164} 151}
165 152
@@ -177,9 +164,12 @@ static int __cmd_diff(void)
177 ret = perf_session__process_events(session[i], &event_ops); 164 ret = perf_session__process_events(session[i], &event_ops);
178 if (ret) 165 if (ret)
179 goto out_delete; 166 goto out_delete;
180 perf_session__output_resort(session[i], session[i]->events_stats.total);
181 } 167 }
182 168
169 perf_session__output_resort(session[1], session[1]->events_stats.total);
170 if (show_displacement)
171 perf_session__set_hist_entries_positions(session[0]);
172
183 perf_session__match_hists(session[0], session[1]); 173 perf_session__match_hists(session[0], session[1]);
184 perf_session__fprintf_hists(session[1], session[0], 174 perf_session__fprintf_hists(session[1], session[0],
185 show_displacement, stdout); 175 show_displacement, stdout);
@@ -204,7 +194,7 @@ static const struct option options[] = {
204 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 194 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
205 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 195 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
206 "load module symbols - WARNING: use only with -k and LIVE kernel"), 196 "load module symbols - WARNING: use only with -k and LIVE kernel"),
207 OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths, 197 OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
208 "Don't shorten the pathnames taking into account the cwd"), 198 "Don't shorten the pathnames taking into account the cwd"),
209 OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", 199 OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
210 "only consider symbols in these dsos"), 200 "only consider symbols in these dsos"),
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 9f810b17c25c..215b584007b1 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -286,8 +286,7 @@ void list_common_cmds_help(void)
286 286
287 puts(" The most commonly used perf commands are:"); 287 puts(" The most commonly used perf commands are:");
288 for (i = 0; i < ARRAY_SIZE(common_cmds); i++) { 288 for (i = 0; i < ARRAY_SIZE(common_cmds); i++) {
289 printf(" %s ", common_cmds[i].name); 289 printf(" %-*s ", longest, common_cmds[i].name);
290 mput_char(' ', longest - strlen(common_cmds[i].name));
291 puts(common_cmds[i].help); 290 puts(common_cmds[i].help);
292 } 291 }
293} 292}
@@ -314,8 +313,6 @@ static const char *cmd_to_page(const char *perf_cmd)
314 return "perf"; 313 return "perf";
315 else if (!prefixcmp(perf_cmd, "perf")) 314 else if (!prefixcmp(perf_cmd, "perf"))
316 return perf_cmd; 315 return perf_cmd;
317 else if (is_perf_command(perf_cmd))
318 return prepend("perf-", perf_cmd);
319 else 316 else
320 return prepend("perf-", perf_cmd); 317 return prepend("perf-", perf_cmd);
321} 318}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 93c67bf53d2c..924a9518931a 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -92,23 +92,18 @@ static void setup_cpunode_map(void)
92 if (!dir1) 92 if (!dir1)
93 return; 93 return;
94 94
95 while (true) { 95 while ((dent1 = readdir(dir1)) != NULL) {
96 dent1 = readdir(dir1); 96 if (dent1->d_type != DT_DIR ||
97 if (!dent1) 97 sscanf(dent1->d_name, "node%u", &mem) < 1)
98 break;
99
100 if (sscanf(dent1->d_name, "node%u", &mem) < 1)
101 continue; 98 continue;
102 99
103 snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name); 100 snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
104 dir2 = opendir(buf); 101 dir2 = opendir(buf);
105 if (!dir2) 102 if (!dir2)
106 continue; 103 continue;
107 while (true) { 104 while ((dent2 = readdir(dir2)) != NULL) {
108 dent2 = readdir(dir2); 105 if (dent2->d_type != DT_LNK ||
109 if (!dent2) 106 sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
110 break;
111 if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
112 continue; 107 continue;
113 cpunode_map[cpu] = mem; 108 cpunode_map[cpu] = mem;
114 } 109 }
@@ -321,11 +316,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
321 316
322 event__parse_sample(event, session->sample_type, &data); 317 event__parse_sample(event, session->sample_type, &data);
323 318
324 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 319 dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
325 event->header.misc, 320 data.pid, data.tid, data.ip, data.period);
326 data.pid, data.tid,
327 (void *)(long)data.ip,
328 (long long)data.period);
329 321
330 thread = perf_session__findnew(session, event->ip.pid); 322 thread = perf_session__findnew(session, event->ip.pid);
331 if (thread == NULL) { 323 if (thread == NULL) {
@@ -342,22 +334,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
342 return 0; 334 return 0;
343} 335}
344 336
345static int sample_type_check(struct perf_session *session)
346{
347 if (!(session->sample_type & PERF_SAMPLE_RAW)) {
348 fprintf(stderr,
349 "No trace sample to read. Did you call perf record "
350 "without -R?");
351 return -1;
352 }
353
354 return 0;
355}
356
357static struct perf_event_ops event_ops = { 337static struct perf_event_ops event_ops = {
358 .process_sample_event = process_sample_event, 338 .sample = process_sample_event,
359 .process_comm_event = event__process_comm, 339 .comm = event__process_comm,
360 .sample_type_check = sample_type_check,
361}; 340};
362 341
363static double fragmentation(unsigned long n_req, unsigned long n_alloc) 342static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -390,7 +369,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
390 if (is_caller) { 369 if (is_caller) {
391 addr = data->call_site; 370 addr = data->call_site;
392 if (!raw_ip) 371 if (!raw_ip)
393 sym = map_groups__find_function(&session->kmaps, session, addr, NULL); 372 sym = map_groups__find_function(&session->kmaps, addr, NULL);
394 } else 373 } else
395 addr = data->ptr; 374 addr = data->ptr;
396 375
@@ -504,11 +483,14 @@ static void sort_result(void)
504 483
505static int __cmd_kmem(void) 484static int __cmd_kmem(void)
506{ 485{
507 int err; 486 int err = -EINVAL;
508 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 487 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
509 if (session == NULL) 488 if (session == NULL)
510 return -ENOMEM; 489 return -ENOMEM;
511 490
491 if (!perf_session__has_traces(session, "kmem record"))
492 goto out_delete;
493
512 setup_pager(); 494 setup_pager();
513 err = perf_session__process_events(session, &event_ops); 495 err = perf_session__process_events(session, &event_ops);
514 if (err != 0) 496 if (err != 0)
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
new file mode 100644
index 000000000000..e12c844df1e2
--- /dev/null
+++ b/tools/perf/builtin-lock.c
@@ -0,0 +1,822 @@
1#include "builtin.h"
2#include "perf.h"
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
9
10#include "util/parse-options.h"
11#include "util/trace-event.h"
12
13#include "util/debug.h"
14#include "util/session.h"
15
16#include <sys/types.h>
17#include <sys/prctl.h>
18#include <semaphore.h>
19#include <pthread.h>
20#include <math.h>
21#include <limits.h>
22
23#include <linux/list.h>
24#include <linux/hash.h>
25
26/* based on kernel/lockdep.c */
27#define LOCKHASH_BITS 12
28#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
29
30static struct list_head lockhash_table[LOCKHASH_SIZE];
31
32#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
33#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
34
35#define LOCK_STATE_UNLOCKED 0 /* initial state */
36#define LOCK_STATE_LOCKED 1
37
38struct lock_stat {
39 struct list_head hash_entry;
40 struct rb_node rb; /* used for sorting */
41
42 /*
43 * FIXME: raw_field_value() returns unsigned long long,
44 * so address of lockdep_map should be dealed as 64bit.
45 * Is there more better solution?
46 */
47 void *addr; /* address of lockdep_map, used as ID */
48 char *name; /* for strcpy(), we cannot use const */
49
50 int state;
51 u64 prev_event_time; /* timestamp of previous event */
52
53 unsigned int nr_acquired;
54 unsigned int nr_acquire;
55 unsigned int nr_contended;
56 unsigned int nr_release;
57
58 /* these times are in nano sec. */
59 u64 wait_time_total;
60 u64 wait_time_min;
61 u64 wait_time_max;
62};
63
64/* build simple key function one is bigger than two */
65#define SINGLE_KEY(member) \
66 static int lock_stat_key_ ## member(struct lock_stat *one, \
67 struct lock_stat *two) \
68 { \
69 return one->member > two->member; \
70 }
71
72SINGLE_KEY(nr_acquired)
73SINGLE_KEY(nr_contended)
74SINGLE_KEY(wait_time_total)
75SINGLE_KEY(wait_time_min)
76SINGLE_KEY(wait_time_max)
77
78struct lock_key {
79 /*
80 * name: the value for specify by user
81 * this should be simpler than raw name of member
82 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
83 */
84 const char *name;
85 int (*key)(struct lock_stat*, struct lock_stat*);
86};
87
88static const char *sort_key = "acquired";
89
90static int (*compare)(struct lock_stat *, struct lock_stat *);
91
92static struct rb_root result; /* place to store sorted data */
93
94#define DEF_KEY_LOCK(name, fn_suffix) \
95 { #name, lock_stat_key_ ## fn_suffix }
96struct lock_key keys[] = {
97 DEF_KEY_LOCK(acquired, nr_acquired),
98 DEF_KEY_LOCK(contended, nr_contended),
99 DEF_KEY_LOCK(wait_total, wait_time_total),
100 DEF_KEY_LOCK(wait_min, wait_time_min),
101 DEF_KEY_LOCK(wait_max, wait_time_max),
102
103 /* extra comparisons much complicated should be here */
104
105 { NULL, NULL }
106};
107
108static void select_key(void)
109{
110 int i;
111
112 for (i = 0; keys[i].name; i++) {
113 if (!strcmp(keys[i].name, sort_key)) {
114 compare = keys[i].key;
115 return;
116 }
117 }
118
119 die("Unknown compare key:%s\n", sort_key);
120}
121
122static void insert_to_result(struct lock_stat *st,
123 int (*bigger)(struct lock_stat *, struct lock_stat *))
124{
125 struct rb_node **rb = &result.rb_node;
126 struct rb_node *parent = NULL;
127 struct lock_stat *p;
128
129 while (*rb) {
130 p = container_of(*rb, struct lock_stat, rb);
131 parent = *rb;
132
133 if (bigger(st, p))
134 rb = &(*rb)->rb_left;
135 else
136 rb = &(*rb)->rb_right;
137 }
138
139 rb_link_node(&st->rb, parent, rb);
140 rb_insert_color(&st->rb, &result);
141}
142
143/* returns left most element of result, and erase it */
144static struct lock_stat *pop_from_result(void)
145{
146 struct rb_node *node = result.rb_node;
147
148 if (!node)
149 return NULL;
150
151 while (node->rb_left)
152 node = node->rb_left;
153
154 rb_erase(node, &result);
155 return container_of(node, struct lock_stat, rb);
156}
157
158static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
159{
160 struct list_head *entry = lockhashentry(addr);
161 struct lock_stat *ret, *new;
162
163 list_for_each_entry(ret, entry, hash_entry) {
164 if (ret->addr == addr)
165 return ret;
166 }
167
168 new = zalloc(sizeof(struct lock_stat));
169 if (!new)
170 goto alloc_failed;
171
172 new->addr = addr;
173 new->name = zalloc(sizeof(char) * strlen(name) + 1);
174 if (!new->name)
175 goto alloc_failed;
176 strcpy(new->name, name);
177
178 /* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
179 new->state = LOCK_STATE_UNLOCKED;
180 new->wait_time_min = ULLONG_MAX;
181
182 list_add(&new->hash_entry, entry);
183 return new;
184
185alloc_failed:
186 die("memory allocation failed\n");
187}
188
189static char const *input_name = "perf.data";
190
191static int profile_cpu = -1;
192
193struct raw_event_sample {
194 u32 size;
195 char data[0];
196};
197
198struct trace_acquire_event {
199 void *addr;
200 const char *name;
201};
202
203struct trace_acquired_event {
204 void *addr;
205 const char *name;
206};
207
208struct trace_contended_event {
209 void *addr;
210 const char *name;
211};
212
213struct trace_release_event {
214 void *addr;
215 const char *name;
216};
217
218struct trace_lock_handler {
219 void (*acquire_event)(struct trace_acquire_event *,
220 struct event *,
221 int cpu,
222 u64 timestamp,
223 struct thread *thread);
224
225 void (*acquired_event)(struct trace_acquired_event *,
226 struct event *,
227 int cpu,
228 u64 timestamp,
229 struct thread *thread);
230
231 void (*contended_event)(struct trace_contended_event *,
232 struct event *,
233 int cpu,
234 u64 timestamp,
235 struct thread *thread);
236
237 void (*release_event)(struct trace_release_event *,
238 struct event *,
239 int cpu,
240 u64 timestamp,
241 struct thread *thread);
242};
243
244static void
245report_lock_acquire_event(struct trace_acquire_event *acquire_event,
246 struct event *__event __used,
247 int cpu __used,
248 u64 timestamp,
249 struct thread *thread __used)
250{
251 struct lock_stat *st;
252
253 st = lock_stat_findnew(acquire_event->addr, acquire_event->name);
254
255 switch (st->state) {
256 case LOCK_STATE_UNLOCKED:
257 break;
258 case LOCK_STATE_LOCKED:
259 break;
260 default:
261 BUG_ON(1);
262 break;
263 }
264
265 st->prev_event_time = timestamp;
266}
267
268static void
269report_lock_acquired_event(struct trace_acquired_event *acquired_event,
270 struct event *__event __used,
271 int cpu __used,
272 u64 timestamp,
273 struct thread *thread __used)
274{
275 struct lock_stat *st;
276
277 st = lock_stat_findnew(acquired_event->addr, acquired_event->name);
278
279 switch (st->state) {
280 case LOCK_STATE_UNLOCKED:
281 st->state = LOCK_STATE_LOCKED;
282 st->nr_acquired++;
283 break;
284 case LOCK_STATE_LOCKED:
285 break;
286 default:
287 BUG_ON(1);
288 break;
289 }
290
291 st->prev_event_time = timestamp;
292}
293
294static void
295report_lock_contended_event(struct trace_contended_event *contended_event,
296 struct event *__event __used,
297 int cpu __used,
298 u64 timestamp,
299 struct thread *thread __used)
300{
301 struct lock_stat *st;
302
303 st = lock_stat_findnew(contended_event->addr, contended_event->name);
304
305 switch (st->state) {
306 case LOCK_STATE_UNLOCKED:
307 break;
308 case LOCK_STATE_LOCKED:
309 st->nr_contended++;
310 break;
311 default:
312 BUG_ON(1);
313 break;
314 }
315
316 st->prev_event_time = timestamp;
317}
318
319static void
320report_lock_release_event(struct trace_release_event *release_event,
321 struct event *__event __used,
322 int cpu __used,
323 u64 timestamp,
324 struct thread *thread __used)
325{
326 struct lock_stat *st;
327 u64 hold_time;
328
329 st = lock_stat_findnew(release_event->addr, release_event->name);
330
331 switch (st->state) {
332 case LOCK_STATE_UNLOCKED:
333 break;
334 case LOCK_STATE_LOCKED:
335 st->state = LOCK_STATE_UNLOCKED;
336 hold_time = timestamp - st->prev_event_time;
337
338 if (timestamp < st->prev_event_time) {
339 /* terribly, this can happen... */
340 goto end;
341 }
342
343 if (st->wait_time_min > hold_time)
344 st->wait_time_min = hold_time;
345 if (st->wait_time_max < hold_time)
346 st->wait_time_max = hold_time;
347 st->wait_time_total += hold_time;
348
349 st->nr_release++;
350 break;
351 default:
352 BUG_ON(1);
353 break;
354 }
355
356end:
357 st->prev_event_time = timestamp;
358}
359
360/* lock oriented handlers */
361/* TODO: handlers for CPU oriented, thread oriented */
362static struct trace_lock_handler report_lock_ops = {
363 .acquire_event = report_lock_acquire_event,
364 .acquired_event = report_lock_acquired_event,
365 .contended_event = report_lock_contended_event,
366 .release_event = report_lock_release_event,
367};
368
369static struct trace_lock_handler *trace_handler;
370
371static void
372process_lock_acquire_event(void *data,
373 struct event *event __used,
374 int cpu __used,
375 u64 timestamp __used,
376 struct thread *thread __used)
377{
378 struct trace_acquire_event acquire_event;
379 u64 tmp; /* this is required for casting... */
380
381 tmp = raw_field_value(event, "lockdep_addr", data);
382 memcpy(&acquire_event.addr, &tmp, sizeof(void *));
383 acquire_event.name = (char *)raw_field_ptr(event, "name", data);
384
385 if (trace_handler->acquire_event)
386 trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
387}
388
389static void
390process_lock_acquired_event(void *data,
391 struct event *event __used,
392 int cpu __used,
393 u64 timestamp __used,
394 struct thread *thread __used)
395{
396 struct trace_acquired_event acquired_event;
397 u64 tmp; /* this is required for casting... */
398
399 tmp = raw_field_value(event, "lockdep_addr", data);
400 memcpy(&acquired_event.addr, &tmp, sizeof(void *));
401 acquired_event.name = (char *)raw_field_ptr(event, "name", data);
402
403 if (trace_handler->acquire_event)
404 trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
405}
406
407static void
408process_lock_contended_event(void *data,
409 struct event *event __used,
410 int cpu __used,
411 u64 timestamp __used,
412 struct thread *thread __used)
413{
414 struct trace_contended_event contended_event;
415 u64 tmp; /* this is required for casting... */
416
417 tmp = raw_field_value(event, "lockdep_addr", data);
418 memcpy(&contended_event.addr, &tmp, sizeof(void *));
419 contended_event.name = (char *)raw_field_ptr(event, "name", data);
420
421 if (trace_handler->acquire_event)
422 trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
423}
424
425static void
426process_lock_release_event(void *data,
427 struct event *event __used,
428 int cpu __used,
429 u64 timestamp __used,
430 struct thread *thread __used)
431{
432 struct trace_release_event release_event;
433 u64 tmp; /* this is required for casting... */
434
435 tmp = raw_field_value(event, "lockdep_addr", data);
436 memcpy(&release_event.addr, &tmp, sizeof(void *));
437 release_event.name = (char *)raw_field_ptr(event, "name", data);
438
439 if (trace_handler->acquire_event)
440 trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
441}
442
443static void
444process_raw_event(void *data, int cpu,
445 u64 timestamp, struct thread *thread)
446{
447 struct event *event;
448 int type;
449
450 type = trace_parse_common_type(data);
451 event = trace_find_event(type);
452
453 if (!strcmp(event->name, "lock_acquire"))
454 process_lock_acquire_event(data, event, cpu, timestamp, thread);
455 if (!strcmp(event->name, "lock_acquired"))
456 process_lock_acquired_event(data, event, cpu, timestamp, thread);
457 if (!strcmp(event->name, "lock_contended"))
458 process_lock_contended_event(data, event, cpu, timestamp, thread);
459 if (!strcmp(event->name, "lock_release"))
460 process_lock_release_event(data, event, cpu, timestamp, thread);
461}
462
463struct raw_event_queue {
464 u64 timestamp;
465 int cpu;
466 void *data;
467 struct thread *thread;
468 struct list_head list;
469};
470
471static LIST_HEAD(raw_event_head);
472
473#define FLUSH_PERIOD (5 * NSEC_PER_SEC)
474
475static u64 flush_limit = ULLONG_MAX;
476static u64 last_flush = 0;
477struct raw_event_queue *last_inserted;
478
479static void flush_raw_event_queue(u64 limit)
480{
481 struct raw_event_queue *tmp, *iter;
482
483 list_for_each_entry_safe(iter, tmp, &raw_event_head, list) {
484 if (iter->timestamp > limit)
485 return;
486
487 if (iter == last_inserted)
488 last_inserted = NULL;
489
490 process_raw_event(iter->data, iter->cpu, iter->timestamp,
491 iter->thread);
492
493 last_flush = iter->timestamp;
494 list_del(&iter->list);
495 free(iter->data);
496 free(iter);
497 }
498}
499
500static void __queue_raw_event_end(struct raw_event_queue *new)
501{
502 struct raw_event_queue *iter;
503
504 list_for_each_entry_reverse(iter, &raw_event_head, list) {
505 if (iter->timestamp < new->timestamp) {
506 list_add(&new->list, &iter->list);
507 return;
508 }
509 }
510
511 list_add(&new->list, &raw_event_head);
512}
513
514static void __queue_raw_event_before(struct raw_event_queue *new,
515 struct raw_event_queue *iter)
516{
517 list_for_each_entry_continue_reverse(iter, &raw_event_head, list) {
518 if (iter->timestamp < new->timestamp) {
519 list_add(&new->list, &iter->list);
520 return;
521 }
522 }
523
524 list_add(&new->list, &raw_event_head);
525}
526
527static void __queue_raw_event_after(struct raw_event_queue *new,
528 struct raw_event_queue *iter)
529{
530 list_for_each_entry_continue(iter, &raw_event_head, list) {
531 if (iter->timestamp > new->timestamp) {
532 list_add_tail(&new->list, &iter->list);
533 return;
534 }
535 }
536 list_add_tail(&new->list, &raw_event_head);
537}
538
539/* The queue is ordered by time */
540static void __queue_raw_event(struct raw_event_queue *new)
541{
542 if (!last_inserted) {
543 __queue_raw_event_end(new);
544 return;
545 }
546
547 /*
548 * Most of the time the current event has a timestamp
549 * very close to the last event inserted, unless we just switched
550 * to another event buffer. Having a sorting based on a list and
551 * on the last inserted event that is close to the current one is
552 * probably more efficient than an rbtree based sorting.
553 */
554 if (last_inserted->timestamp >= new->timestamp)
555 __queue_raw_event_before(new, last_inserted);
556 else
557 __queue_raw_event_after(new, last_inserted);
558}
559
560static void queue_raw_event(void *data, int raw_size, int cpu,
561 u64 timestamp, struct thread *thread)
562{
563 struct raw_event_queue *new;
564
565 if (flush_limit == ULLONG_MAX)
566 flush_limit = timestamp + FLUSH_PERIOD;
567
568 if (timestamp < last_flush) {
569 printf("Warning: Timestamp below last timeslice flush\n");
570 return;
571 }
572
573 new = malloc(sizeof(*new));
574 if (!new)
575 die("Not enough memory\n");
576
577 new->timestamp = timestamp;
578 new->cpu = cpu;
579 new->thread = thread;
580
581 new->data = malloc(raw_size);
582 if (!new->data)
583 die("Not enough memory\n");
584
585 memcpy(new->data, data, raw_size);
586
587 __queue_raw_event(new);
588 last_inserted = new;
589
590 /*
591 * We want to have a slice of events covering 2 * FLUSH_PERIOD
592 * If FLUSH_PERIOD is big enough, it ensures every events that occured
593 * in the first half of the timeslice have all been buffered and there
594 * are none remaining (we need that because of the weakly ordered
595 * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
596 * timeslice, we flush the first half to be gentle with the memory
597 * (the second half can still get new events in the middle, so wait
598 * another period to flush it)
599 */
600 if (new->timestamp > flush_limit &&
601 new->timestamp - flush_limit > FLUSH_PERIOD) {
602 flush_limit += FLUSH_PERIOD;
603 flush_raw_event_queue(flush_limit);
604 }
605}
606
607static int process_sample_event(event_t *event, struct perf_session *session)
608{
609 struct thread *thread;
610 struct sample_data data;
611
612 bzero(&data, sizeof(struct sample_data));
613 event__parse_sample(event, session->sample_type, &data);
614 thread = perf_session__findnew(session, data.pid);
615
616 if (thread == NULL) {
617 pr_debug("problem processing %d event, skipping it.\n",
618 event->header.type);
619 return -1;
620 }
621
622 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
623
624 if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
625 return 0;
626
627 queue_raw_event(data.raw_data, data.raw_size, data.cpu, data.time, thread);
628
629 return 0;
630}
631
632/* TODO: various way to print, coloring, nano or milli sec */
633static void print_result(void)
634{
635 struct lock_stat *st;
636 char cut_name[20];
637
638 printf("%18s ", "ID");
639 printf("%20s ", "Name");
640 printf("%10s ", "acquired");
641 printf("%10s ", "contended");
642
643 printf("%15s ", "total wait (ns)");
644 printf("%15s ", "max wait (ns)");
645 printf("%15s ", "min wait (ns)");
646
647 printf("\n\n");
648
649 while ((st = pop_from_result())) {
650 bzero(cut_name, 20);
651
652 printf("%p ", st->addr);
653
654 if (strlen(st->name) < 16) {
655 /* output raw name */
656 printf("%20s ", st->name);
657 } else {
658 strncpy(cut_name, st->name, 16);
659 cut_name[16] = '.';
660 cut_name[17] = '.';
661 cut_name[18] = '.';
662 cut_name[19] = '\0';
663 /* cut off name for saving output style */
664 printf("%20s ", cut_name);
665 }
666
667 printf("%10u ", st->nr_acquired);
668 printf("%10u ", st->nr_contended);
669
670 printf("%15llu ", st->wait_time_total);
671 printf("%15llu ", st->wait_time_max);
672 printf("%15llu ", st->wait_time_min == ULLONG_MAX ?
673 0 : st->wait_time_min);
674 printf("\n");
675 }
676}
677
678static void dump_map(void)
679{
680 unsigned int i;
681 struct lock_stat *st;
682
683 for (i = 0; i < LOCKHASH_SIZE; i++) {
684 list_for_each_entry(st, &lockhash_table[i], hash_entry) {
685 printf("%p: %s\n", st->addr, st->name);
686 }
687 }
688}
689
690static struct perf_event_ops eops = {
691 .sample = process_sample_event,
692 .comm = event__process_comm,
693};
694
695static struct perf_session *session;
696
697static int read_events(void)
698{
699 session = perf_session__new(input_name, O_RDONLY, 0);
700 if (!session)
701 die("Initializing perf session failed\n");
702
703 return perf_session__process_events(session, &eops);
704}
705
706static void sort_result(void)
707{
708 unsigned int i;
709 struct lock_stat *st;
710
711 for (i = 0; i < LOCKHASH_SIZE; i++) {
712 list_for_each_entry(st, &lockhash_table[i], hash_entry) {
713 insert_to_result(st, compare);
714 }
715 }
716}
717
718static void __cmd_report(void)
719{
720 setup_pager();
721 select_key();
722 read_events();
723 flush_raw_event_queue(ULLONG_MAX);
724 sort_result();
725 print_result();
726}
727
728static const char * const report_usage[] = {
729 "perf lock report [<options>]",
730 NULL
731};
732
733static const struct option report_options[] = {
734 OPT_STRING('k', "key", &sort_key, "acquired",
735 "key for sorting"),
736 /* TODO: type */
737 OPT_END()
738};
739
740static const char * const lock_usage[] = {
741 "perf lock [<options>] {record|trace|report}",
742 NULL
743};
744
745static const struct option lock_options[] = {
746 OPT_STRING('i', "input", &input_name, "file", "input file name"),
747 OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
748 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
749 OPT_END()
750};
751
752static const char *record_args[] = {
753 "record",
754 "-a",
755 "-R",
756 "-f",
757 "-m", "1024",
758 "-c", "1",
759 "-e", "lock:lock_acquire:r",
760 "-e", "lock:lock_acquired:r",
761 "-e", "lock:lock_contended:r",
762 "-e", "lock:lock_release:r",
763};
764
765static int __cmd_record(int argc, const char **argv)
766{
767 unsigned int rec_argc, i, j;
768 const char **rec_argv;
769
770 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
771 rec_argv = calloc(rec_argc + 1, sizeof(char *));
772
773 for (i = 0; i < ARRAY_SIZE(record_args); i++)
774 rec_argv[i] = strdup(record_args[i]);
775
776 for (j = 1; j < (unsigned int)argc; j++, i++)
777 rec_argv[i] = argv[j];
778
779 BUG_ON(i != rec_argc);
780
781 return cmd_record(i, rec_argv, NULL);
782}
783
784int cmd_lock(int argc, const char **argv, const char *prefix __used)
785{
786 unsigned int i;
787
788 symbol__init();
789 for (i = 0; i < LOCKHASH_SIZE; i++)
790 INIT_LIST_HEAD(lockhash_table + i);
791
792 argc = parse_options(argc, argv, lock_options, lock_usage,
793 PARSE_OPT_STOP_AT_NON_OPTION);
794 if (!argc)
795 usage_with_options(lock_usage, lock_options);
796
797 if (!strncmp(argv[0], "rec", 3)) {
798 return __cmd_record(argc, argv);
799 } else if (!strncmp(argv[0], "report", 6)) {
800 trace_handler = &report_lock_ops;
801 if (argc) {
802 argc = parse_options(argc, argv,
803 report_options, report_usage, 0);
804 if (argc)
805 usage_with_options(report_usage, report_options);
806 }
807 __cmd_report();
808 } else if (!strcmp(argv[0], "trace")) {
809 /* Aliased to 'perf trace' */
810 return cmd_trace(argc, argv, prefix);
811 } else if (!strcmp(argv[0], "map")) {
812 /* recycling report_lock_ops */
813 trace_handler = &report_lock_ops;
814 setup_pager();
815 read_events();
816 dump_map();
817 } else {
818 usage_with_options(lock_usage, lock_options);
819 }
820
821 return 0;
822}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index c1e6774fd3ed..ad47bd4c50ef 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -41,7 +41,6 @@
41#include "util/debugfs.h" 41#include "util/debugfs.h"
42#include "util/symbol.h" 42#include "util/symbol.h"
43#include "util/thread.h" 43#include "util/thread.h"
44#include "util/session.h"
45#include "util/parse-options.h" 44#include "util/parse-options.h"
46#include "util/parse-events.h" /* For debugfs_path */ 45#include "util/parse-events.h" /* For debugfs_path */
47#include "util/probe-finder.h" 46#include "util/probe-finder.h"
@@ -55,11 +54,13 @@ static struct {
55 bool need_dwarf; 54 bool need_dwarf;
56 bool list_events; 55 bool list_events;
57 bool force_add; 56 bool force_add;
57 bool show_lines;
58 int nr_probe; 58 int nr_probe;
59 struct probe_point probes[MAX_PROBES]; 59 struct probe_point probes[MAX_PROBES];
60 struct strlist *dellist; 60 struct strlist *dellist;
61 struct perf_session *psession; 61 struct map_groups kmap_groups;
62 struct map *kmap; 62 struct map *kmaps[MAP__NR_TYPES];
63 struct line_range line_range;
63} session; 64} session;
64 65
65 66
@@ -120,8 +121,8 @@ static int opt_del_probe_event(const struct option *opt __used,
120static void evaluate_probe_point(struct probe_point *pp) 121static void evaluate_probe_point(struct probe_point *pp)
121{ 122{
122 struct symbol *sym; 123 struct symbol *sym;
123 sym = map__find_symbol_by_name(session.kmap, pp->function, 124 sym = map__find_symbol_by_name(session.kmaps[MAP__FUNCTION],
124 session.psession, NULL); 125 pp->function, NULL);
125 if (!sym) 126 if (!sym)
126 die("Kernel symbol \'%s\' not found - probe not added.", 127 die("Kernel symbol \'%s\' not found - probe not added.",
127 pp->function); 128 pp->function);
@@ -130,12 +131,23 @@ static void evaluate_probe_point(struct probe_point *pp)
130#ifndef NO_LIBDWARF 131#ifndef NO_LIBDWARF
131static int open_vmlinux(void) 132static int open_vmlinux(void)
132{ 133{
133 if (map__load(session.kmap, session.psession, NULL) < 0) { 134 if (map__load(session.kmaps[MAP__FUNCTION], NULL) < 0) {
134 pr_debug("Failed to load kernel map.\n"); 135 pr_debug("Failed to load kernel map.\n");
135 return -EINVAL; 136 return -EINVAL;
136 } 137 }
137 pr_debug("Try to open %s\n", session.kmap->dso->long_name); 138 pr_debug("Try to open %s\n",
138 return open(session.kmap->dso->long_name, O_RDONLY); 139 session.kmaps[MAP__FUNCTION]->dso->long_name);
140 return open(session.kmaps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
141}
142
143static int opt_show_lines(const struct option *opt __used,
144 const char *str, int unset __used)
145{
146 if (str)
147 parse_line_range_desc(str, &session.line_range);
148 INIT_LIST_HEAD(&session.line_range.line_list);
149 session.show_lines = true;
150 return 0;
139} 151}
140#endif 152#endif
141 153
@@ -144,6 +156,7 @@ static const char * const probe_usage[] = {
144 "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", 156 "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
145 "perf probe [<options>] --del '[GROUP:]EVENT' ...", 157 "perf probe [<options>] --del '[GROUP:]EVENT' ...",
146 "perf probe --list", 158 "perf probe --list",
159 "perf probe --line 'LINEDESC'",
147 NULL 160 NULL
148}; 161};
149 162
@@ -182,9 +195,31 @@ static const struct option options[] = {
182 opt_add_probe_event), 195 opt_add_probe_event),
183 OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events" 196 OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events"
184 " with existing name"), 197 " with existing name"),
198#ifndef NO_LIBDWARF
199 OPT_CALLBACK('L', "line", NULL,
200 "FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]",
201 "Show source code lines.", opt_show_lines),
202#endif
185 OPT_END() 203 OPT_END()
186}; 204};
187 205
206/* Initialize symbol maps for vmlinux */
207static void init_vmlinux(void)
208{
209 symbol_conf.sort_by_name = true;
210 if (symbol_conf.vmlinux_name == NULL)
211 symbol_conf.try_vmlinux_path = true;
212 else
213 pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
214 if (symbol__init() < 0)
215 die("Failed to init symbol map.");
216
217 map_groups__init(&session.kmap_groups);
218 if (map_groups__create_kernel_maps(&session.kmap_groups,
219 session.kmaps) < 0)
220 die("Failed to create kernel maps.");
221}
222
188int cmd_probe(int argc, const char **argv, const char *prefix __used) 223int cmd_probe(int argc, const char **argv, const char *prefix __used)
189{ 224{
190 int i, ret; 225 int i, ret;
@@ -203,7 +238,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
203 parse_probe_event_argv(argc, argv); 238 parse_probe_event_argv(argc, argv);
204 } 239 }
205 240
206 if ((!session.nr_probe && !session.dellist && !session.list_events)) 241 if ((!session.nr_probe && !session.dellist && !session.list_events &&
242 !session.show_lines))
207 usage_with_options(probe_usage, options); 243 usage_with_options(probe_usage, options);
208 244
209 if (debugfs_valid_mountpoint(debugfs_path) < 0) 245 if (debugfs_valid_mountpoint(debugfs_path) < 0)
@@ -215,10 +251,34 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
215 " --add/--del.\n"); 251 " --add/--del.\n");
216 usage_with_options(probe_usage, options); 252 usage_with_options(probe_usage, options);
217 } 253 }
254 if (session.show_lines) {
255 pr_warning(" Error: Don't use --list with --line.\n");
256 usage_with_options(probe_usage, options);
257 }
218 show_perf_probe_events(); 258 show_perf_probe_events();
219 return 0; 259 return 0;
220 } 260 }
221 261
262#ifndef NO_LIBDWARF
263 if (session.show_lines) {
264 if (session.nr_probe != 0 || session.dellist) {
265 pr_warning(" Error: Don't use --line with"
266 " --add/--del.\n");
267 usage_with_options(probe_usage, options);
268 }
269 init_vmlinux();
270 fd = open_vmlinux();
271 if (fd < 0)
272 die("Could not open debuginfo file.");
273 ret = find_line_range(fd, &session.line_range);
274 if (ret <= 0)
275 die("Source line is not found.\n");
276 close(fd);
277 show_line_range(&session.line_range);
278 return 0;
279 }
280#endif
281
222 if (session.dellist) { 282 if (session.dellist) {
223 del_trace_kprobe_events(session.dellist); 283 del_trace_kprobe_events(session.dellist);
224 strlist__delete(session.dellist); 284 strlist__delete(session.dellist);
@@ -226,20 +286,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
226 return 0; 286 return 0;
227 } 287 }
228 288
229 /* Initialize symbol maps for vmlinux */ 289 /* Add probes */
230 symbol_conf.sort_by_name = true; 290 init_vmlinux();
231 if (symbol_conf.vmlinux_name == NULL)
232 symbol_conf.try_vmlinux_path = true;
233 if (symbol__init() < 0)
234 die("Failed to init symbol map.");
235 session.psession = perf_session__new(NULL, O_WRONLY, false);
236 if (session.psession == NULL)
237 die("Failed to init perf_session.");
238 session.kmap = map_groups__find_by_name(&session.psession->kmaps,
239 MAP__FUNCTION,
240 "[kernel.kallsyms]");
241 if (!session.kmap)
242 die("Could not find kernel map.\n");
243 291
244 if (session.need_dwarf) 292 if (session.need_dwarf)
245#ifdef NO_LIBDWARF 293#ifdef NO_LIBDWARF
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 265425322734..771533ced6a8 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -5,10 +5,13 @@
5 * (or a CPU, or a PID) into the perf.data output file - for 5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report. 6 * later analysis via perf report.
7 */ 7 */
8#define _FILE_OFFSET_BITS 64
9
8#include "builtin.h" 10#include "builtin.h"
9 11
10#include "perf.h" 12#include "perf.h"
11 13
14#include "util/build-id.h"
12#include "util/util.h" 15#include "util/util.h"
13#include "util/parse-options.h" 16#include "util/parse-options.h"
14#include "util/parse-events.h" 17#include "util/parse-events.h"
@@ -62,6 +65,7 @@ static int nr_poll = 0;
62static int nr_cpu = 0; 65static int nr_cpu = 0;
63 66
64static int file_new = 1; 67static int file_new = 1;
68static off_t post_processing_offset;
65 69
66static struct perf_session *session; 70static struct perf_session *session;
67 71
@@ -111,22 +115,10 @@ static void write_output(void *buf, size_t size)
111 } 115 }
112} 116}
113 117
114static void write_event(event_t *buf, size_t size)
115{
116 /*
117 * Add it to the list of DSOs, so that when we finish this
118 * record session we can pick the available build-ids.
119 */
120 if (buf->header.type == PERF_RECORD_MMAP)
121 dsos__findnew(buf->mmap.filename);
122
123 write_output(buf, size);
124}
125
126static int process_synthesized_event(event_t *event, 118static int process_synthesized_event(event_t *event,
127 struct perf_session *self __used) 119 struct perf_session *self __used)
128{ 120{
129 write_event(event, event->header.size); 121 write_output(event, event->header.size);
130 return 0; 122 return 0;
131} 123}
132 124
@@ -178,14 +170,14 @@ static void mmap_read(struct mmap_data *md)
178 size = md->mask + 1 - (old & md->mask); 170 size = md->mask + 1 - (old & md->mask);
179 old += size; 171 old += size;
180 172
181 write_event(buf, size); 173 write_output(buf, size);
182 } 174 }
183 175
184 buf = &data[old & md->mask]; 176 buf = &data[old & md->mask];
185 size = head - old; 177 size = head - old;
186 old += size; 178 old += size;
187 179
188 write_event(buf, size); 180 write_output(buf, size);
189 181
190 md->prev = old; 182 md->prev = old;
191 mmap_write_tail(md, old); 183 mmap_write_tail(md, old);
@@ -395,10 +387,21 @@ static void open_counters(int cpu, pid_t pid)
395 nr_cpu++; 387 nr_cpu++;
396} 388}
397 389
390static int process_buildids(void)
391{
392 u64 size = lseek(output, 0, SEEK_CUR);
393
394 session->fd = output;
395 return __perf_session__process_events(session, post_processing_offset,
396 size - post_processing_offset,
397 size, &build_id__mark_dso_hit_ops);
398}
399
398static void atexit_header(void) 400static void atexit_header(void)
399{ 401{
400 session->header.data_size += bytes_written; 402 session->header.data_size += bytes_written;
401 403
404 process_buildids();
402 perf_header__write(&session->header, output, true); 405 perf_header__write(&session->header, output, true);
403} 406}
404 407
@@ -551,8 +554,23 @@ static int __cmd_record(int argc, const char **argv)
551 return err; 554 return err;
552 } 555 }
553 556
557 post_processing_offset = lseek(output, 0, SEEK_CUR);
558
559 err = event__synthesize_kernel_mmap(process_synthesized_event,
560 session, "_text");
561 if (err < 0) {
562 pr_err("Couldn't record kernel reference relocation symbol.\n");
563 return err;
564 }
565
566 err = event__synthesize_modules(process_synthesized_event, session);
567 if (err < 0) {
568 pr_err("Couldn't record kernel reference relocation symbol.\n");
569 return err;
570 }
571
554 if (!system_wide && profile_cpu == -1) 572 if (!system_wide && profile_cpu == -1)
555 event__synthesize_thread(pid, process_synthesized_event, 573 event__synthesize_thread(target_pid, process_synthesized_event,
556 session); 574 session);
557 else 575 else
558 event__synthesize_threads(process_synthesized_event, session); 576 event__synthesize_threads(process_synthesized_event, session);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 860f1eeeea7d..cfc655d40bb7 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -34,6 +34,8 @@
34static char const *input_name = "perf.data"; 34static char const *input_name = "perf.data";
35 35
36static int force; 36static int force;
37static bool hide_unresolved;
38static bool dont_use_callchains;
37 39
38static int show_threads; 40static int show_threads;
39static struct perf_read_values show_threads_values; 41static struct perf_read_values show_threads_values;
@@ -91,11 +93,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
91 93
92 event__parse_sample(event, session->sample_type, &data); 94 event__parse_sample(event, session->sample_type, &data);
93 95
94 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 96 dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
95 event->header.misc, 97 data.pid, data.tid, data.ip, data.period);
96 data.pid, data.tid,
97 (void *)(long)data.ip,
98 (long long)data.period);
99 98
100 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) { 99 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
101 unsigned int i; 100 unsigned int i;
@@ -121,7 +120,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
121 return -1; 120 return -1;
122 } 121 }
123 122
124 if (al.filtered) 123 if (al.filtered || (hide_unresolved && al.sym == NULL))
125 return 0; 124 return 0;
126 125
127 if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) { 126 if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) {
@@ -156,14 +155,14 @@ static int process_read_event(event_t *event, struct perf_session *session __use
156 return 0; 155 return 0;
157} 156}
158 157
159static int sample_type_check(struct perf_session *session) 158static int perf_session__setup_sample_type(struct perf_session *self)
160{ 159{
161 if (!(session->sample_type & PERF_SAMPLE_CALLCHAIN)) { 160 if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
162 if (sort__has_parent) { 161 if (sort__has_parent) {
163 fprintf(stderr, "selected --sort parent, but no" 162 fprintf(stderr, "selected --sort parent, but no"
164 " callchain data. Did you call" 163 " callchain data. Did you call"
165 " perf record without -g?\n"); 164 " perf record without -g?\n");
166 return -1; 165 return -EINVAL;
167 } 166 }
168 if (symbol_conf.use_callchain) { 167 if (symbol_conf.use_callchain) {
169 fprintf(stderr, "selected -g but no callchain data." 168 fprintf(stderr, "selected -g but no callchain data."
@@ -171,12 +170,13 @@ static int sample_type_check(struct perf_session *session)
171 " -g?\n"); 170 " -g?\n");
172 return -1; 171 return -1;
173 } 172 }
174 } else if (callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) { 173 } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
174 !symbol_conf.use_callchain) {
175 symbol_conf.use_callchain = true; 175 symbol_conf.use_callchain = true;
176 if (register_callchain_param(&callchain_param) < 0) { 176 if (register_callchain_param(&callchain_param) < 0) {
177 fprintf(stderr, "Can't register callchain" 177 fprintf(stderr, "Can't register callchain"
178 " params\n"); 178 " params\n");
179 return -1; 179 return -EINVAL;
180 } 180 }
181 } 181 }
182 182
@@ -184,20 +184,18 @@ static int sample_type_check(struct perf_session *session)
184} 184}
185 185
186static struct perf_event_ops event_ops = { 186static struct perf_event_ops event_ops = {
187 .process_sample_event = process_sample_event, 187 .sample = process_sample_event,
188 .process_mmap_event = event__process_mmap, 188 .mmap = event__process_mmap,
189 .process_comm_event = event__process_comm, 189 .comm = event__process_comm,
190 .process_exit_event = event__process_task, 190 .exit = event__process_task,
191 .process_fork_event = event__process_task, 191 .fork = event__process_task,
192 .process_lost_event = event__process_lost, 192 .lost = event__process_lost,
193 .process_read_event = process_read_event, 193 .read = process_read_event,
194 .sample_type_check = sample_type_check,
195}; 194};
196 195
197
198static int __cmd_report(void) 196static int __cmd_report(void)
199{ 197{
200 int ret; 198 int ret = -EINVAL;
201 struct perf_session *session; 199 struct perf_session *session;
202 200
203 session = perf_session__new(input_name, O_RDONLY, force); 201 session = perf_session__new(input_name, O_RDONLY, force);
@@ -207,6 +205,10 @@ static int __cmd_report(void)
207 if (show_threads) 205 if (show_threads)
208 perf_read_values_init(&show_threads_values); 206 perf_read_values_init(&show_threads_values);
209 207
208 ret = perf_session__setup_sample_type(session);
209 if (ret)
210 goto out_delete;
211
210 ret = perf_session__process_events(session, &event_ops); 212 ret = perf_session__process_events(session, &event_ops);
211 if (ret) 213 if (ret)
212 goto out_delete; 214 goto out_delete;
@@ -243,11 +245,19 @@ out_delete:
243 245
244static int 246static int
245parse_callchain_opt(const struct option *opt __used, const char *arg, 247parse_callchain_opt(const struct option *opt __used, const char *arg,
246 int unset __used) 248 int unset)
247{ 249{
248 char *tok; 250 char *tok;
249 char *endptr; 251 char *endptr;
250 252
253 /*
254 * --no-call-graph
255 */
256 if (unset) {
257 dont_use_callchains = true;
258 return 0;
259 }
260
251 symbol_conf.use_callchain = true; 261 symbol_conf.use_callchain = true;
252 262
253 if (!arg) 263 if (!arg)
@@ -319,7 +329,7 @@ static const struct option options[] = {
319 "pretty printing style key: normal raw"), 329 "pretty printing style key: normal raw"),
320 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 330 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
321 "sort by key(s): pid, comm, dso, symbol, parent"), 331 "sort by key(s): pid, comm, dso, symbol, parent"),
322 OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths, 332 OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
323 "Don't shorten the pathnames taking into account the cwd"), 333 "Don't shorten the pathnames taking into account the cwd"),
324 OPT_STRING('p', "parent", &parent_pattern, "regex", 334 OPT_STRING('p', "parent", &parent_pattern, "regex",
325 "regex filter to identify parent, see: '--sort parent'"), 335 "regex filter to identify parent, see: '--sort parent'"),
@@ -340,6 +350,8 @@ static const struct option options[] = {
340 OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", 350 OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
341 "separator for columns, no spaces will be added between " 351 "separator for columns, no spaces will be added between "
342 "columns '.' is reserved."), 352 "columns '.' is reserved."),
353 OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved,
354 "Only display entries resolved to a symbol"),
343 OPT_END() 355 OPT_END()
344}; 356};
345 357
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 80209df6cfe8..4f5a03e43444 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1621,11 +1621,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
1621 1621
1622 event__parse_sample(event, session->sample_type, &data); 1622 event__parse_sample(event, session->sample_type, &data);
1623 1623
1624 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 1624 dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
1625 event->header.misc, 1625 data.pid, data.tid, data.ip, data.period);
1626 data.pid, data.tid,
1627 (void *)(long)data.ip,
1628 (long long)data.period);
1629 1626
1630 thread = perf_session__findnew(session, data.pid); 1627 thread = perf_session__findnew(session, data.pid);
1631 if (thread == NULL) { 1628 if (thread == NULL) {
@@ -1653,33 +1650,22 @@ static int process_lost_event(event_t *event __used,
1653 return 0; 1650 return 0;
1654} 1651}
1655 1652
1656static int sample_type_check(struct perf_session *session __used)
1657{
1658 if (!(session->sample_type & PERF_SAMPLE_RAW)) {
1659 fprintf(stderr,
1660 "No trace sample to read. Did you call perf record "
1661 "without -R?");
1662 return -1;
1663 }
1664
1665 return 0;
1666}
1667
1668static struct perf_event_ops event_ops = { 1653static struct perf_event_ops event_ops = {
1669 .process_sample_event = process_sample_event, 1654 .sample = process_sample_event,
1670 .process_comm_event = event__process_comm, 1655 .comm = event__process_comm,
1671 .process_lost_event = process_lost_event, 1656 .lost = process_lost_event,
1672 .sample_type_check = sample_type_check,
1673}; 1657};
1674 1658
1675static int read_events(void) 1659static int read_events(void)
1676{ 1660{
1677 int err; 1661 int err = -EINVAL;
1678 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 1662 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
1679 if (session == NULL) 1663 if (session == NULL)
1680 return -ENOMEM; 1664 return -ENOMEM;
1681 1665
1682 err = perf_session__process_events(session, &event_ops); 1666 if (perf_session__has_traces(session, "record -R"))
1667 err = perf_session__process_events(session, &event_ops);
1668
1683 perf_session__delete(session); 1669 perf_session__delete(session);
1684 return err; 1670 return err;
1685} 1671}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index c70d72003557..e8c85d5aec41 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -44,6 +44,7 @@
44#include "util/parse-events.h" 44#include "util/parse-events.h"
45#include "util/event.h" 45#include "util/event.h"
46#include "util/debug.h" 46#include "util/debug.h"
47#include "util/header.h"
47 48
48#include <sys/prctl.h> 49#include <sys/prctl.h>
49#include <math.h> 50#include <math.h>
@@ -79,6 +80,8 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS];
79 80
80static int event_scaled[MAX_COUNTERS]; 81static int event_scaled[MAX_COUNTERS];
81 82
83static volatile int done = 0;
84
82struct stats 85struct stats
83{ 86{
84 double n, mean, M2; 87 double n, mean, M2;
@@ -247,61 +250,64 @@ static int run_perf_stat(int argc __used, const char **argv)
247 unsigned long long t0, t1; 250 unsigned long long t0, t1;
248 int status = 0; 251 int status = 0;
249 int counter; 252 int counter;
250 int pid; 253 int pid = target_pid;
251 int child_ready_pipe[2], go_pipe[2]; 254 int child_ready_pipe[2], go_pipe[2];
255 const bool forks = (target_pid == -1 && argc > 0);
252 char buf; 256 char buf;
253 257
254 if (!system_wide) 258 if (!system_wide)
255 nr_cpus = 1; 259 nr_cpus = 1;
256 260
257 if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) { 261 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
258 perror("failed to create pipes"); 262 perror("failed to create pipes");
259 exit(1); 263 exit(1);
260 } 264 }
261 265
262 if ((pid = fork()) < 0) 266 if (forks) {
263 perror("failed to fork"); 267 if ((pid = fork()) < 0)
268 perror("failed to fork");
269
270 if (!pid) {
271 close(child_ready_pipe[0]);
272 close(go_pipe[1]);
273 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
274
275 /*
276 * Do a dummy execvp to get the PLT entry resolved,
277 * so we avoid the resolver overhead on the real
278 * execvp call.
279 */
280 execvp("", (char **)argv);
281
282 /*
283 * Tell the parent we're ready to go
284 */
285 close(child_ready_pipe[1]);
286
287 /*
288 * Wait until the parent tells us to go.
289 */
290 if (read(go_pipe[0], &buf, 1) == -1)
291 perror("unable to read pipe");
292
293 execvp(argv[0], (char **)argv);
294
295 perror(argv[0]);
296 exit(-1);
297 }
264 298
265 if (!pid) { 299 child_pid = pid;
266 close(child_ready_pipe[0]);
267 close(go_pipe[1]);
268 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
269 300
270 /* 301 /*
271 * Do a dummy execvp to get the PLT entry resolved, 302 * Wait for the child to be ready to exec.
272 * so we avoid the resolver overhead on the real
273 * execvp call.
274 */
275 execvp("", (char **)argv);
276
277 /*
278 * Tell the parent we're ready to go
279 */ 303 */
280 close(child_ready_pipe[1]); 304 close(child_ready_pipe[1]);
281 305 close(go_pipe[0]);
282 /* 306 if (read(child_ready_pipe[0], &buf, 1) == -1)
283 * Wait until the parent tells us to go.
284 */
285 if (read(go_pipe[0], &buf, 1) == -1)
286 perror("unable to read pipe"); 307 perror("unable to read pipe");
287 308 close(child_ready_pipe[0]);
288 execvp(argv[0], (char **)argv);
289
290 perror(argv[0]);
291 exit(-1);
292 } 309 }
293 310
294 child_pid = pid;
295
296 /*
297 * Wait for the child to be ready to exec.
298 */
299 close(child_ready_pipe[1]);
300 close(go_pipe[0]);
301 if (read(child_ready_pipe[0], &buf, 1) == -1)
302 perror("unable to read pipe");
303 close(child_ready_pipe[0]);
304
305 for (counter = 0; counter < nr_counters; counter++) 311 for (counter = 0; counter < nr_counters; counter++)
306 create_perf_stat_counter(counter, pid); 312 create_perf_stat_counter(counter, pid);
307 313
@@ -310,8 +316,12 @@ static int run_perf_stat(int argc __used, const char **argv)
310 */ 316 */
311 t0 = rdclock(); 317 t0 = rdclock();
312 318
313 close(go_pipe[1]); 319 if (forks) {
314 wait(&status); 320 close(go_pipe[1]);
321 wait(&status);
322 } else {
323 while(!done);
324 }
315 325
316 t1 = rdclock(); 326 t1 = rdclock();
317 327
@@ -417,10 +427,13 @@ static void print_stat(int argc, const char **argv)
417 fflush(stdout); 427 fflush(stdout);
418 428
419 fprintf(stderr, "\n"); 429 fprintf(stderr, "\n");
420 fprintf(stderr, " Performance counter stats for \'%s", argv[0]); 430 fprintf(stderr, " Performance counter stats for ");
421 431 if(target_pid == -1) {
422 for (i = 1; i < argc; i++) 432 fprintf(stderr, "\'%s", argv[0]);
423 fprintf(stderr, " %s", argv[i]); 433 for (i = 1; i < argc; i++)
434 fprintf(stderr, " %s", argv[i]);
435 }else
436 fprintf(stderr, "task pid \'%d", target_pid);
424 437
425 fprintf(stderr, "\'"); 438 fprintf(stderr, "\'");
426 if (run_count > 1) 439 if (run_count > 1)
@@ -445,6 +458,9 @@ static volatile int signr = -1;
445 458
446static void skip_signal(int signo) 459static void skip_signal(int signo)
447{ 460{
461 if(target_pid != -1)
462 done = 1;
463
448 signr = signo; 464 signr = signo;
449} 465}
450 466
@@ -461,7 +477,7 @@ static void sig_atexit(void)
461} 477}
462 478
463static const char * const stat_usage[] = { 479static const char * const stat_usage[] = {
464 "perf stat [<options>] <command>", 480 "perf stat [<options>] [<command>]",
465 NULL 481 NULL
466}; 482};
467 483
@@ -492,7 +508,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
492 508
493 argc = parse_options(argc, argv, options, stat_usage, 509 argc = parse_options(argc, argv, options, stat_usage,
494 PARSE_OPT_STOP_AT_NON_OPTION); 510 PARSE_OPT_STOP_AT_NON_OPTION);
495 if (!argc) 511 if (!argc && target_pid == -1)
496 usage_with_options(stat_usage, options); 512 usage_with_options(stat_usage, options);
497 if (run_count <= 0) 513 if (run_count <= 0)
498 usage_with_options(stat_usage, options); 514 usage_with_options(stat_usage, options);
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 3f8bbcfb1e9b..0d4d8ff7914b 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1029,33 +1029,24 @@ static void process_samples(struct perf_session *session)
1029 } 1029 }
1030} 1030}
1031 1031
1032static int sample_type_check(struct perf_session *session)
1033{
1034 if (!(session->sample_type & PERF_SAMPLE_RAW)) {
1035 fprintf(stderr, "No trace samples found in the file.\n"
1036 "Have you used 'perf timechart record' to record it?\n");
1037 return -1;
1038 }
1039
1040 return 0;
1041}
1042
1043static struct perf_event_ops event_ops = { 1032static struct perf_event_ops event_ops = {
1044 .process_comm_event = process_comm_event, 1033 .comm = process_comm_event,
1045 .process_fork_event = process_fork_event, 1034 .fork = process_fork_event,
1046 .process_exit_event = process_exit_event, 1035 .exit = process_exit_event,
1047 .process_sample_event = queue_sample_event, 1036 .sample = queue_sample_event,
1048 .sample_type_check = sample_type_check,
1049}; 1037};
1050 1038
1051static int __cmd_timechart(void) 1039static int __cmd_timechart(void)
1052{ 1040{
1053 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0); 1041 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
1054 int ret; 1042 int ret = -EINVAL;
1055 1043
1056 if (session == NULL) 1044 if (session == NULL)
1057 return -ENOMEM; 1045 return -ENOMEM;
1058 1046
1047 if (!perf_session__has_traces(session, "timechart record"))
1048 goto out_delete;
1049
1059 ret = perf_session__process_events(session, &event_ops); 1050 ret = perf_session__process_events(session, &event_ops);
1060 if (ret) 1051 if (ret)
1061 goto out_delete; 1052 goto out_delete;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 4b91d8cf00ec..31f2e597800c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -94,6 +94,7 @@ struct source_line {
94 94
95static char *sym_filter = NULL; 95static char *sym_filter = NULL;
96struct sym_entry *sym_filter_entry = NULL; 96struct sym_entry *sym_filter_entry = NULL;
97struct sym_entry *sym_filter_entry_sched = NULL;
97static int sym_pcnt_filter = 5; 98static int sym_pcnt_filter = 5;
98static int sym_counter = 0; 99static int sym_counter = 0;
99static int display_weighted = -1; 100static int display_weighted = -1;
@@ -201,10 +202,9 @@ static void parse_source(struct sym_entry *syme)
201 len = sym->end - sym->start; 202 len = sym->end - sym->start;
202 203
203 sprintf(command, 204 sprintf(command,
204 "objdump --start-address=0x%016Lx " 205 "objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
205 "--stop-address=0x%016Lx -dS %s", 206 BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
206 map->unmap_ip(map, sym->start), 207 BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
207 map->unmap_ip(map, sym->end), path);
208 208
209 file = popen(command, "r"); 209 file = popen(command, "r");
210 if (!file) 210 if (!file)
@@ -215,7 +215,7 @@ static void parse_source(struct sym_entry *syme)
215 while (!feof(file)) { 215 while (!feof(file)) {
216 struct source_line *src; 216 struct source_line *src;
217 size_t dummy = 0; 217 size_t dummy = 0;
218 char *c; 218 char *c, *sep;
219 219
220 src = malloc(sizeof(struct source_line)); 220 src = malloc(sizeof(struct source_line));
221 assert(src != NULL); 221 assert(src != NULL);
@@ -234,14 +234,11 @@ static void parse_source(struct sym_entry *syme)
234 *source->lines_tail = src; 234 *source->lines_tail = src;
235 source->lines_tail = &src->next; 235 source->lines_tail = &src->next;
236 236
237 if (strlen(src->line)>8 && src->line[8] == ':') { 237 src->eip = strtoull(src->line, &sep, 16);
238 src->eip = strtoull(src->line, NULL, 16); 238 if (*sep == ':')
239 src->eip = map->unmap_ip(map, src->eip); 239 src->eip = map__objdump_2ip(map, src->eip);
240 } 240 else /* this line has no ip info (e.g. source line) */
241 if (strlen(src->line)>8 && src->line[16] == ':') { 241 src->eip = 0;
242 src->eip = strtoull(src->line, NULL, 16);
243 src->eip = map->unmap_ip(map, src->eip);
244 }
245 } 242 }
246 pclose(file); 243 pclose(file);
247out_assign: 244out_assign:
@@ -276,6 +273,9 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
276 goto out_unlock; 273 goto out_unlock;
277 274
278 for (line = syme->src->lines; line; line = line->next) { 275 for (line = syme->src->lines; line; line = line->next) {
276 /* skip lines without IP info */
277 if (line->eip == 0)
278 continue;
279 if (line->eip == ip) { 279 if (line->eip == ip) {
280 line->count[counter]++; 280 line->count[counter]++;
281 break; 281 break;
@@ -287,17 +287,20 @@ out_unlock:
287 pthread_mutex_unlock(&syme->src->lock); 287 pthread_mutex_unlock(&syme->src->lock);
288} 288}
289 289
290#define PATTERN_LEN (BITS_PER_LONG / 4 + 2)
291
290static void lookup_sym_source(struct sym_entry *syme) 292static void lookup_sym_source(struct sym_entry *syme)
291{ 293{
292 struct symbol *symbol = sym_entry__symbol(syme); 294 struct symbol *symbol = sym_entry__symbol(syme);
293 struct source_line *line; 295 struct source_line *line;
294 char pattern[PATH_MAX]; 296 char pattern[PATTERN_LEN + 1];
295 297
296 sprintf(pattern, "<%s>:", symbol->name); 298 sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
299 map__rip_2objdump(syme->map, symbol->start));
297 300
298 pthread_mutex_lock(&syme->src->lock); 301 pthread_mutex_lock(&syme->src->lock);
299 for (line = syme->src->lines; line; line = line->next) { 302 for (line = syme->src->lines; line; line = line->next) {
300 if (strstr(line->line, pattern)) { 303 if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
301 syme->src->source = line; 304 syme->src->source = line;
302 break; 305 break;
303 } 306 }
@@ -667,7 +670,7 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
667 } 670 }
668 671
669 if (!found) { 672 if (!found) {
670 fprintf(stderr, "Sorry, %s is not active.\n", sym_filter); 673 fprintf(stderr, "Sorry, %s is not active.\n", buf);
671 sleep(1); 674 sleep(1);
672 return; 675 return;
673 } else 676 } else
@@ -695,11 +698,9 @@ static void print_mapped_keys(void)
695 698
696 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); 699 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
697 700
698 if (symbol_conf.vmlinux_name) { 701 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
699 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); 702 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
700 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); 703 fprintf(stdout, "\t[S] stop annotation.\n");
701 fprintf(stdout, "\t[S] stop annotation.\n");
702 }
703 704
704 if (nr_counters > 1) 705 if (nr_counters > 1)
705 fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0); 706 fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
@@ -725,14 +726,13 @@ static int key_mapped(int c)
725 case 'Q': 726 case 'Q':
726 case 'K': 727 case 'K':
727 case 'U': 728 case 'U':
729 case 'F':
730 case 's':
731 case 'S':
728 return 1; 732 return 1;
729 case 'E': 733 case 'E':
730 case 'w': 734 case 'w':
731 return nr_counters > 1 ? 1 : 0; 735 return nr_counters > 1 ? 1 : 0;
732 case 'F':
733 case 's':
734 case 'S':
735 return symbol_conf.vmlinux_name ? 1 : 0;
736 default: 736 default:
737 break; 737 break;
738 } 738 }
@@ -910,8 +910,12 @@ static int symbol_filter(struct map *map, struct symbol *sym)
910 syme = symbol__priv(sym); 910 syme = symbol__priv(sym);
911 syme->map = map; 911 syme->map = map;
912 syme->src = NULL; 912 syme->src = NULL;
913 if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) 913
914 sym_filter_entry = syme; 914 if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
915 /* schedule initial sym_filter_entry setup */
916 sym_filter_entry_sched = syme;
917 sym_filter = NULL;
918 }
915 919
916 for (i = 0; skip_symbols[i]; i++) { 920 for (i = 0; skip_symbols[i]; i++) {
917 if (!strcmp(skip_symbols[i], name)) { 921 if (!strcmp(skip_symbols[i], name)) {
@@ -934,8 +938,11 @@ static void event__process_sample(const event_t *self,
934 struct addr_location al; 938 struct addr_location al;
935 u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 939 u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
936 940
941 ++samples;
942
937 switch (origin) { 943 switch (origin) {
938 case PERF_RECORD_MISC_USER: 944 case PERF_RECORD_MISC_USER:
945 ++userspace_samples;
939 if (hide_user_symbols) 946 if (hide_user_symbols)
940 return; 947 return;
941 break; 948 break;
@@ -948,9 +955,38 @@ static void event__process_sample(const event_t *self,
948 } 955 }
949 956
950 if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 || 957 if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
951 al.sym == NULL || al.filtered) 958 al.filtered)
952 return; 959 return;
953 960
961 if (al.sym == NULL) {
962 /*
963 * As we do lazy loading of symtabs we only will know if the
964 * specified vmlinux file is invalid when we actually have a
965 * hit in kernel space and then try to load it. So if we get
966 * here and there are _no_ symbols in the DSO backing the
967 * kernel map, bail out.
968 *
969 * We may never get here, for instance, if we use -K/
970 * --hide-kernel-symbols, even if the user specifies an
971 * invalid --vmlinux ;-)
972 */
973 if (al.map == session->vmlinux_maps[MAP__FUNCTION] &&
974 RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
975 pr_err("The %s file can't be used\n",
976 symbol_conf.vmlinux_name);
977 exit(1);
978 }
979
980 return;
981 }
982
983 /* let's see, whether we need to install initial sym_filter_entry */
984 if (sym_filter_entry_sched) {
985 sym_filter_entry = sym_filter_entry_sched;
986 sym_filter_entry_sched = NULL;
987 parse_source(sym_filter_entry);
988 }
989
954 syme = symbol__priv(al.sym); 990 syme = symbol__priv(al.sym);
955 if (!syme->skip) { 991 if (!syme->skip) {
956 syme->count[counter]++; 992 syme->count[counter]++;
@@ -960,9 +996,6 @@ static void event__process_sample(const event_t *self,
960 if (list_empty(&syme->node) || !syme->node.next) 996 if (list_empty(&syme->node) || !syme->node.next)
961 __list_insert_active_sym(syme); 997 __list_insert_active_sym(syme);
962 pthread_mutex_unlock(&active_symbols_lock); 998 pthread_mutex_unlock(&active_symbols_lock);
963 if (origin == PERF_RECORD_MISC_USER)
964 ++userspace_samples;
965 ++samples;
966 } 999 }
967} 1000}
968 1001
@@ -975,6 +1008,10 @@ static int event__process(event_t *event, struct perf_session *session)
975 case PERF_RECORD_MMAP: 1008 case PERF_RECORD_MMAP:
976 event__process_mmap(event, session); 1009 event__process_mmap(event, session);
977 break; 1010 break;
1011 case PERF_RECORD_FORK:
1012 case PERF_RECORD_EXIT:
1013 event__process_task(event, session);
1014 break;
978 default: 1015 default:
979 break; 1016 break;
980 } 1017 }
@@ -1244,7 +1281,7 @@ static const struct option options[] = {
1244 OPT_BOOLEAN('i', "inherit", &inherit, 1281 OPT_BOOLEAN('i', "inherit", &inherit,
1245 "child tasks inherit counters"), 1282 "child tasks inherit counters"),
1246 OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name", 1283 OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
1247 "symbol to annotate - requires -k option"), 1284 "symbol to annotate"),
1248 OPT_BOOLEAN('z', "zero", &zero, 1285 OPT_BOOLEAN('z', "zero", &zero,
1249 "zero history across updates"), 1286 "zero history across updates"),
1250 OPT_INTEGER('F', "freq", &freq, 1287 OPT_INTEGER('F', "freq", &freq,
@@ -1280,16 +1317,14 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1280 1317
1281 symbol_conf.priv_size = (sizeof(struct sym_entry) + 1318 symbol_conf.priv_size = (sizeof(struct sym_entry) +
1282 (nr_counters + 1) * sizeof(unsigned long)); 1319 (nr_counters + 1) * sizeof(unsigned long));
1283 if (symbol_conf.vmlinux_name == NULL) 1320
1284 symbol_conf.try_vmlinux_path = true; 1321 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1285 if (symbol__init() < 0) 1322 if (symbol__init() < 0)
1286 return -1; 1323 return -1;
1287 1324
1288 if (delay_secs < 1) 1325 if (delay_secs < 1)
1289 delay_secs = 1; 1326 delay_secs = 1;
1290 1327
1291 parse_source(sym_filter_entry);
1292
1293 /* 1328 /*
1294 * User specified count overrides default frequency. 1329 * User specified count overrides default frequency.
1295 */ 1330 */
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 574a215e800b..5db687fc13de 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -44,6 +44,7 @@ static void setup_scripting(void)
44 perf_set_argv_exec_path(perf_exec_path()); 44 perf_set_argv_exec_path(perf_exec_path());
45 45
46 setup_perl_scripting(); 46 setup_perl_scripting();
47 setup_python_scripting();
47 48
48 scripting_ops = &default_scripting_ops; 49 scripting_ops = &default_scripting_ops;
49} 50}
@@ -75,11 +76,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
75 76
76 event__parse_sample(event, session->sample_type, &data); 77 event__parse_sample(event, session->sample_type, &data);
77 78
78 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 79 dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
79 event->header.misc, 80 data.pid, data.tid, data.ip, data.period);
80 data.pid, data.tid,
81 (void *)(long)data.ip,
82 (long long)data.period);
83 81
84 thread = perf_session__findnew(session, event->ip.pid); 82 thread = perf_session__findnew(session, event->ip.pid);
85 if (thread == NULL) { 83 if (thread == NULL) {
@@ -103,22 +101,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
103 return 0; 101 return 0;
104} 102}
105 103
106static int sample_type_check(struct perf_session *session)
107{
108 if (!(session->sample_type & PERF_SAMPLE_RAW)) {
109 fprintf(stderr,
110 "No trace sample to read. Did you call perf record "
111 "without -R?");
112 return -1;
113 }
114
115 return 0;
116}
117
118static struct perf_event_ops event_ops = { 104static struct perf_event_ops event_ops = {
119 .process_sample_event = process_sample_event, 105 .sample = process_sample_event,
120 .process_comm_event = event__process_comm, 106 .comm = event__process_comm,
121 .sample_type_check = sample_type_check,
122}; 107};
123 108
124static int __cmd_trace(struct perf_session *session) 109static int __cmd_trace(struct perf_session *session)
@@ -235,9 +220,9 @@ static int parse_scriptname(const struct option *opt __used,
235 const char *script, *ext; 220 const char *script, *ext;
236 int len; 221 int len;
237 222
238 if (strcmp(str, "list") == 0) { 223 if (strcmp(str, "lang") == 0) {
239 list_available_languages(); 224 list_available_languages();
240 return 0; 225 exit(0);
241 } 226 }
242 227
243 script = strchr(str, ':'); 228 script = strchr(str, ':');
@@ -531,6 +516,8 @@ static const struct option options[] = {
531 parse_scriptname), 516 parse_scriptname),
532 OPT_STRING('g', "gen-script", &generate_script_lang, "lang", 517 OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
533 "generate perf-trace.xx script in specified language"), 518 "generate perf-trace.xx script in specified language"),
519 OPT_STRING('i', "input", &input_name, "file",
520 "input file name"),
534 521
535 OPT_END() 522 OPT_END()
536}; 523};
@@ -592,6 +579,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
592 if (session == NULL) 579 if (session == NULL)
593 return -ENOMEM; 580 return -ENOMEM;
594 581
582 if (!perf_session__has_traces(session, "record -R"))
583 return -EINVAL;
584
595 if (generate_script_lang) { 585 if (generate_script_lang) {
596 struct stat perf_stat; 586 struct stat perf_stat;
597 587
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 18035b1f16c7..10fe49e7048a 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -16,6 +16,7 @@ extern int check_pager_config(const char *cmd);
16 16
17extern int cmd_annotate(int argc, const char **argv, const char *prefix); 17extern int cmd_annotate(int argc, const char **argv, const char *prefix);
18extern int cmd_bench(int argc, const char **argv, const char *prefix); 18extern int cmd_bench(int argc, const char **argv, const char *prefix);
19extern int cmd_buildid_cache(int argc, const char **argv, const char *prefix);
19extern int cmd_buildid_list(int argc, const char **argv, const char *prefix); 20extern int cmd_buildid_list(int argc, const char **argv, const char *prefix);
20extern int cmd_diff(int argc, const char **argv, const char *prefix); 21extern int cmd_diff(int argc, const char **argv, const char *prefix);
21extern int cmd_help(int argc, const char **argv, const char *prefix); 22extern int cmd_help(int argc, const char **argv, const char *prefix);
@@ -30,5 +31,6 @@ extern int cmd_trace(int argc, const char **argv, const char *prefix);
30extern int cmd_version(int argc, const char **argv, const char *prefix); 31extern int cmd_version(int argc, const char **argv, const char *prefix);
31extern int cmd_probe(int argc, const char **argv, const char *prefix); 32extern int cmd_probe(int argc, const char **argv, const char *prefix);
32extern int cmd_kmem(int argc, const char **argv, const char *prefix); 33extern int cmd_kmem(int argc, const char **argv, const char *prefix);
34extern int cmd_lock(int argc, const char **argv, const char *prefix);
33 35
34#endif 36#endif
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 71dc7c3fe7b2..db6ee94d4a8e 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -3,7 +3,9 @@
3# command name category [deprecated] [common] 3# command name category [deprecated] [common]
4# 4#
5perf-annotate mainporcelain common 5perf-annotate mainporcelain common
6perf-archive mainporcelain common
6perf-bench mainporcelain common 7perf-bench mainporcelain common
8perf-buildid-cache mainporcelain common
7perf-buildid-list mainporcelain common 9perf-buildid-list mainporcelain common
8perf-diff mainporcelain common 10perf-diff mainporcelain common
9perf-list mainporcelain common 11perf-list mainporcelain common
@@ -16,3 +18,4 @@ perf-top mainporcelain common
16perf-trace mainporcelain common 18perf-trace mainporcelain common
17perf-probe mainporcelain common 19perf-probe mainporcelain common
18perf-kmem mainporcelain common 20perf-kmem mainporcelain common
21perf-lock mainporcelain common
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index 8d0de5130db3..bd0bb1b1279b 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -101,10 +101,10 @@ enum hw_event_ids {
101 */ 101 */
102 PERF_COUNT_HW_CPU_CYCLES = 0, 102 PERF_COUNT_HW_CPU_CYCLES = 0,
103 PERF_COUNT_HW_INSTRUCTIONS = 1, 103 PERF_COUNT_HW_INSTRUCTIONS = 1,
104 PERF_COUNT_HW_CACHE_REFERENCES = 2, 104 PERF_COUNT_HW_CACHE_REFERENCES = 2,
105 PERF_COUNT_HW_CACHE_MISSES = 3, 105 PERF_COUNT_HW_CACHE_MISSES = 3,
106 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 106 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
107 PERF_COUNT_HW_BRANCH_MISSES = 5, 107 PERF_COUNT_HW_BRANCH_MISSES = 5,
108 PERF_COUNT_HW_BUS_CYCLES = 6, 108 PERF_COUNT_HW_BUS_CYCLES = 6,
109}; 109};
110 110
@@ -131,8 +131,8 @@ software events, selected by 'event_id':
131 */ 131 */
132enum sw_event_ids { 132enum sw_event_ids {
133 PERF_COUNT_SW_CPU_CLOCK = 0, 133 PERF_COUNT_SW_CPU_CLOCK = 0,
134 PERF_COUNT_SW_TASK_CLOCK = 1, 134 PERF_COUNT_SW_TASK_CLOCK = 1,
135 PERF_COUNT_SW_PAGE_FAULTS = 2, 135 PERF_COUNT_SW_PAGE_FAULTS = 2,
136 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 136 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
137 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 137 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
138 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 138 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
new file mode 100644
index 000000000000..910468e6e01c
--- /dev/null
+++ b/tools/perf/perf-archive.sh
@@ -0,0 +1,33 @@
1#!/bin/bash
2# perf archive
3# Arnaldo Carvalho de Melo <acme@redhat.com>
4
5PERF_DATA=perf.data
6if [ $# -ne 0 ] ; then
7 PERF_DATA=$1
8fi
9
10DEBUGDIR=~/.debug/
11BUILDIDS=$(mktemp /tmp/perf-archive-buildids.XXXXXX)
12NOBUILDID=0000000000000000000000000000000000000000
13
14perf buildid-list -i $PERF_DATA --with-hits | grep -v "^$NOBUILDID " > $BUILDIDS
15if [ ! -s $BUILDIDS ] ; then
16 echo "perf archive: no build-ids found"
17 rm -f $BUILDIDS
18 exit 1
19fi
20
21MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX)
22
23cut -d ' ' -f 1 $BUILDIDS | \
24while read build_id ; do
25 linkname=$DEBUGDIR.build-id/${build_id:0:2}/${build_id:2}
26 filename=$(readlink -f $linkname)
27 echo ${linkname#$DEBUGDIR} >> $MANIFEST
28 echo ${filename#$DEBUGDIR} >> $MANIFEST
29done
30
31tar cfj $PERF_DATA.tar.bz2 -C $DEBUGDIR -T $MANIFEST
32rm -f $MANIFEST $BUILDIDS
33exit 0
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 873e55fab375..57cb107c1f13 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -48,7 +48,8 @@ int check_pager_config(const char *cmd)
48 return c.val; 48 return c.val;
49} 49}
50 50
51static void commit_pager_choice(void) { 51static void commit_pager_choice(void)
52{
52 switch (use_pager) { 53 switch (use_pager) {
53 case 0: 54 case 0:
54 setenv("PERF_PAGER", "cat", 1); 55 setenv("PERF_PAGER", "cat", 1);
@@ -70,7 +71,7 @@ static void set_debugfs_path(void)
70 "tracing/events"); 71 "tracing/events");
71} 72}
72 73
73static int handle_options(const char*** argv, int* argc, int* envchanged) 74static int handle_options(const char ***argv, int *argc, int *envchanged)
74{ 75{
75 int handled = 0; 76 int handled = 0;
76 77
@@ -109,7 +110,7 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
109 *envchanged = 1; 110 *envchanged = 1;
110 } else if (!strcmp(cmd, "--perf-dir")) { 111 } else if (!strcmp(cmd, "--perf-dir")) {
111 if (*argc < 2) { 112 if (*argc < 2) {
112 fprintf(stderr, "No directory given for --perf-dir.\n" ); 113 fprintf(stderr, "No directory given for --perf-dir.\n");
113 usage(perf_usage_string); 114 usage(perf_usage_string);
114 } 115 }
115 setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1); 116 setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1);
@@ -124,7 +125,7 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
124 *envchanged = 1; 125 *envchanged = 1;
125 } else if (!strcmp(cmd, "--work-tree")) { 126 } else if (!strcmp(cmd, "--work-tree")) {
126 if (*argc < 2) { 127 if (*argc < 2) {
127 fprintf(stderr, "No directory given for --work-tree.\n" ); 128 fprintf(stderr, "No directory given for --work-tree.\n");
128 usage(perf_usage_string); 129 usage(perf_usage_string);
129 } 130 }
130 setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1); 131 setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
@@ -168,7 +169,7 @@ static int handle_alias(int *argcp, const char ***argv)
168{ 169{
169 int envchanged = 0, ret = 0, saved_errno = errno; 170 int envchanged = 0, ret = 0, saved_errno = errno;
170 int count, option_count; 171 int count, option_count;
171 const char** new_argv; 172 const char **new_argv;
172 const char *alias_command; 173 const char *alias_command;
173 char *alias_string; 174 char *alias_string;
174 175
@@ -210,11 +211,11 @@ static int handle_alias(int *argcp, const char ***argv)
210 if (!strcmp(alias_command, new_argv[0])) 211 if (!strcmp(alias_command, new_argv[0]))
211 die("recursive alias: %s", alias_command); 212 die("recursive alias: %s", alias_command);
212 213
213 new_argv = realloc(new_argv, sizeof(char*) * 214 new_argv = realloc(new_argv, sizeof(char *) *
214 (count + *argcp + 1)); 215 (count + *argcp + 1));
215 /* insert after command name */ 216 /* insert after command name */
216 memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp); 217 memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
217 new_argv[count+*argcp] = NULL; 218 new_argv[count + *argcp] = NULL;
218 219
219 *argv = new_argv; 220 *argv = new_argv;
220 *argcp += count - 1; 221 *argcp += count - 1;
@@ -285,6 +286,7 @@ static void handle_internal_command(int argc, const char **argv)
285{ 286{
286 const char *cmd = argv[0]; 287 const char *cmd = argv[0];
287 static struct cmd_struct commands[] = { 288 static struct cmd_struct commands[] = {
289 { "buildid-cache", cmd_buildid_cache, 0 },
288 { "buildid-list", cmd_buildid_list, 0 }, 290 { "buildid-list", cmd_buildid_list, 0 },
289 { "diff", cmd_diff, 0 }, 291 { "diff", cmd_diff, 0 },
290 { "help", cmd_help, 0 }, 292 { "help", cmd_help, 0 },
@@ -301,6 +303,7 @@ static void handle_internal_command(int argc, const char **argv)
301 { "sched", cmd_sched, 0 }, 303 { "sched", cmd_sched, 0 },
302 { "probe", cmd_probe, 0 }, 304 { "probe", cmd_probe, 0 },
303 { "kmem", cmd_kmem, 0 }, 305 { "kmem", cmd_kmem, 0 },
306 { "lock", cmd_lock, 0 },
304 }; 307 };
305 unsigned int i; 308 unsigned int i;
306 static const char ext[] = STRIP_EXTENSION; 309 static const char ext[] = STRIP_EXTENSION;
@@ -388,7 +391,7 @@ static int run_argv(int *argcp, const char ***argv)
388/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ 391/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
389static void get_debugfs_mntpt(void) 392static void get_debugfs_mntpt(void)
390{ 393{
391 const char *path = debugfs_find_mountpoint(); 394 const char *path = debugfs_mount(NULL);
392 395
393 if (path) 396 if (path)
394 strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt)); 397 strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt));
@@ -449,8 +452,8 @@ int main(int argc, const char **argv)
449 setup_path(); 452 setup_path();
450 453
451 while (1) { 454 while (1) {
452 static int done_help = 0; 455 static int done_help;
453 static int was_alias = 0; 456 static int was_alias;
454 457
455 was_alias = run_argv(&argc, &argv); 458 was_alias = run_argv(&argc, &argv);
456 if (errno != ENOENT) 459 if (errno != ENOENT)
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
index af78d9a52a7d..01a64ad693f2 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
@@ -31,13 +31,14 @@
31#include "EXTERN.h" 31#include "EXTERN.h"
32#include "perl.h" 32#include "perl.h"
33#include "XSUB.h" 33#include "XSUB.h"
34#include "../../../util/trace-event-perl.h" 34#include "../../../perf.h"
35#include "../../../util/trace-event.h"
35 36
36#ifndef PERL_UNUSED_VAR 37#ifndef PERL_UNUSED_VAR
37# define PERL_UNUSED_VAR(var) if (0) var = var 38# define PERL_UNUSED_VAR(var) if (0) var = var
38#endif 39#endif
39 40
40#line 41 "Context.c" 41#line 42 "Context.c"
41 42
42XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */ 43XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */
43XS(XS_Perf__Trace__Context_common_pc) 44XS(XS_Perf__Trace__Context_common_pc)
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
index fb78006c165e..549cf0467d30 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
@@ -22,7 +22,8 @@
22#include "EXTERN.h" 22#include "EXTERN.h"
23#include "perl.h" 23#include "perl.h"
24#include "XSUB.h" 24#include "XSUB.h"
25#include "../../../util/trace-event-perl.h" 25#include "../../../perf.h"
26#include "../../../util/trace-event.h"
26 27
27MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context 28MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context
28PROTOTYPES: ENABLE 29PROTOTYPES: ENABLE
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
index 052f132ced24..f869c48dc9b0 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
@@ -44,7 +44,7 @@ sub nsecs_secs {
44sub nsecs_nsecs { 44sub nsecs_nsecs {
45 my ($nsecs) = @_; 45 my ($nsecs) = @_;
46 46
47 return $nsecs - nsecs_secs($nsecs); 47 return $nsecs % $NSECS_PER_SEC;
48} 48}
49 49
50sub nsecs_str { 50sub nsecs_str {
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-record b/tools/perf/scripts/perl/bin/check-perf-trace-record
index c7ec5de2f535..e6cb1474f8e8 100644
--- a/tools/perf/scripts/perl/bin/check-perf-trace-record
+++ b/tools/perf/scripts/perl/bin/check-perf-trace-record
@@ -1,7 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry 2perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry -e kmem:kfree
3
4
5
6
7
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-report b/tools/perf/scripts/perl/bin/check-perf-trace-report
deleted file mode 100644
index 7fc4a033dd49..000000000000
--- a/tools/perf/scripts/perl/bin/check-perf-trace-report
+++ /dev/null
@@ -1,6 +0,0 @@
1#!/bin/bash
2# description: useless but exhaustive test script
3perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl
4
5
6
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
new file mode 100644
index 000000000000..f8885d389e6f
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -0,0 +1,2 @@
1#!/bin/bash
2perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-report b/tools/perf/scripts/perl/bin/failed-syscalls-report
new file mode 100644
index 000000000000..8bfc660e5056
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-report
@@ -0,0 +1,4 @@
1#!/bin/bash
2# description: system-wide failed syscalls
3# args: [comm]
4perf trace -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $1
diff --git a/tools/perf/scripts/perl/failed-syscalls.pl b/tools/perf/scripts/perl/failed-syscalls.pl
new file mode 100644
index 000000000000..c18e7e27a84b
--- /dev/null
+++ b/tools/perf/scripts/perl/failed-syscalls.pl
@@ -0,0 +1,38 @@
1# failed system call counts
2# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
3# Licensed under the terms of the GNU GPL License version 2
4#
5# Displays system-wide failed system call totals
6# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
7
8use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
9use lib "./Perf-Trace-Util/lib";
10use Perf::Trace::Core;
11use Perf::Trace::Context;
12use Perf::Trace::Util;
13
14my %failed_syscalls;
15
16sub raw_syscalls::sys_exit
17{
18 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
19 $common_pid, $common_comm,
20 $id, $ret) = @_;
21
22 if ($ret < 0) {
23 $failed_syscalls{$common_comm}++;
24 }
25}
26
27sub trace_end
28{
29 printf("\nfailed syscalls by comm:\n\n");
30
31 printf("%-20s %10s\n", "comm", "# errors");
32 printf("%-20s %6s %10s\n", "--------------------", "----------");
33
34 foreach my $comm (sort {$failed_syscalls{$b} <=> $failed_syscalls{$a}}
35 keys %failed_syscalls) {
36 printf("%-20s %10s\n", $comm, $failed_syscalls{$comm});
37 }
38}
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
new file mode 100644
index 000000000000..957085dd5d8d
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -0,0 +1,88 @@
1/*
2 * Context.c. Python interfaces for perf trace.
3 *
4 * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <Python.h>
23#include "../../../perf.h"
24#include "../../../util/trace-event.h"
25
26PyMODINIT_FUNC initperf_trace_context(void);
27
28static PyObject *perf_trace_context_common_pc(PyObject *self, PyObject *args)
29{
30 static struct scripting_context *scripting_context;
31 PyObject *context;
32 int retval;
33
34 if (!PyArg_ParseTuple(args, "O", &context))
35 return NULL;
36
37 scripting_context = PyCObject_AsVoidPtr(context);
38 retval = common_pc(scripting_context);
39
40 return Py_BuildValue("i", retval);
41}
42
43static PyObject *perf_trace_context_common_flags(PyObject *self,
44 PyObject *args)
45{
46 static struct scripting_context *scripting_context;
47 PyObject *context;
48 int retval;
49
50 if (!PyArg_ParseTuple(args, "O", &context))
51 return NULL;
52
53 scripting_context = PyCObject_AsVoidPtr(context);
54 retval = common_flags(scripting_context);
55
56 return Py_BuildValue("i", retval);
57}
58
59static PyObject *perf_trace_context_common_lock_depth(PyObject *self,
60 PyObject *args)
61{
62 static struct scripting_context *scripting_context;
63 PyObject *context;
64 int retval;
65
66 if (!PyArg_ParseTuple(args, "O", &context))
67 return NULL;
68
69 scripting_context = PyCObject_AsVoidPtr(context);
70 retval = common_lock_depth(scripting_context);
71
72 return Py_BuildValue("i", retval);
73}
74
75static PyMethodDef ContextMethods[] = {
76 { "common_pc", perf_trace_context_common_pc, METH_VARARGS,
77 "Get the common preempt count event field value."},
78 { "common_flags", perf_trace_context_common_flags, METH_VARARGS,
79 "Get the common flags event field value."},
80 { "common_lock_depth", perf_trace_context_common_lock_depth,
81 METH_VARARGS, "Get the common lock depth event field value."},
82 { NULL, NULL, 0, NULL}
83};
84
85PyMODINIT_FUNC initperf_trace_context(void)
86{
87 (void) Py_InitModule("perf_trace_context", ContextMethods);
88}
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
new file mode 100644
index 000000000000..1dc464ee2ca8
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
@@ -0,0 +1,91 @@
1# Core.py - Python extension for perf trace, core functions
2#
3# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
4#
5# This software may be distributed under the terms of the GNU General
6# Public License ("GPL") version 2 as published by the Free Software
7# Foundation.
8
9from collections import defaultdict
10
11def autodict():
12 return defaultdict(autodict)
13
14flag_fields = autodict()
15symbolic_fields = autodict()
16
17def define_flag_field(event_name, field_name, delim):
18 flag_fields[event_name][field_name]['delim'] = delim
19
20def define_flag_value(event_name, field_name, value, field_str):
21 flag_fields[event_name][field_name]['values'][value] = field_str
22
23def define_symbolic_field(event_name, field_name):
24 # nothing to do, really
25 pass
26
27def define_symbolic_value(event_name, field_name, value, field_str):
28 symbolic_fields[event_name][field_name]['values'][value] = field_str
29
30def flag_str(event_name, field_name, value):
31 string = ""
32
33 if flag_fields[event_name][field_name]:
34 print_delim = 0
35 keys = flag_fields[event_name][field_name]['values'].keys()
36 keys.sort()
37 for idx in keys:
38 if not value and not idx:
39 string += flag_fields[event_name][field_name]['values'][idx]
40 break
41 if idx and (value & idx) == idx:
42 if print_delim and flag_fields[event_name][field_name]['delim']:
43 string += " " + flag_fields[event_name][field_name]['delim'] + " "
44 string += flag_fields[event_name][field_name]['values'][idx]
45 print_delim = 1
46 value &= ~idx
47
48 return string
49
50def symbol_str(event_name, field_name, value):
51 string = ""
52
53 if symbolic_fields[event_name][field_name]:
54 keys = symbolic_fields[event_name][field_name]['values'].keys()
55 keys.sort()
56 for idx in keys:
57 if not value and not idx:
58 string = symbolic_fields[event_name][field_name]['values'][idx]
59 break
60 if (value == idx):
61 string = symbolic_fields[event_name][field_name]['values'][idx]
62 break
63
64 return string
65
66trace_flags = { 0x00: "NONE", \
67 0x01: "IRQS_OFF", \
68 0x02: "IRQS_NOSUPPORT", \
69 0x04: "NEED_RESCHED", \
70 0x08: "HARDIRQ", \
71 0x10: "SOFTIRQ" }
72
73def trace_flag_str(value):
74 string = ""
75 print_delim = 0
76
77 keys = trace_flags.keys()
78
79 for idx in keys:
80 if not value and not idx:
81 string += "NONE"
82 break
83
84 if idx and (value & idx) == idx:
85 if print_delim:
86 string += " | ";
87 string += trace_flags[idx]
88 print_delim = 1
89 value &= ~idx
90
91 return string
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
new file mode 100644
index 000000000000..83e91435ed09
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -0,0 +1,25 @@
1# Util.py - Python extension for perf trace, miscellaneous utility code
2#
3# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
4#
5# This software may be distributed under the terms of the GNU General
6# Public License ("GPL") version 2 as published by the Free Software
7# Foundation.
8
9NSECS_PER_SEC = 1000000000
10
11def avg(total, n):
12 return total / n
13
14def nsecs(secs, nsecs):
15 return secs * NSECS_PER_SEC + nsecs
16
17def nsecs_secs(nsecs):
18 return nsecs / NSECS_PER_SEC
19
20def nsecs_nsecs(nsecs):
21 return nsecs % NSECS_PER_SEC
22
23def nsecs_str(nsecs):
24 str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
25 return str
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
new file mode 100644
index 000000000000..f8885d389e6f
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -0,0 +1,2 @@
1#!/bin/bash
2perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
new file mode 100644
index 000000000000..1e0c0a860c87
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
@@ -0,0 +1,4 @@
1#!/bin/bash
2# description: system-wide failed syscalls, by pid
3# args: [comm]
4perf trace -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $1
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
new file mode 100644
index 000000000000..45a8c50359da
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -0,0 +1,2 @@
1#!/bin/bash
2perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
new file mode 100644
index 000000000000..f8044d192271
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
@@ -0,0 +1,4 @@
1#!/bin/bash
2# description: system-wide syscall counts, by pid
3# args: [comm]
4perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $1
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
new file mode 100644
index 000000000000..45a8c50359da
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -0,0 +1,2 @@
1#!/bin/bash
2perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
diff --git a/tools/perf/scripts/python/bin/syscall-counts-report b/tools/perf/scripts/python/bin/syscall-counts-report
new file mode 100644
index 000000000000..a366aa61612f
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-report
@@ -0,0 +1,4 @@
1#!/bin/bash
2# description: system-wide syscall counts
3# args: [comm]
4perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py $1
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
new file mode 100644
index 000000000000..964d934395ff
--- /dev/null
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -0,0 +1,83 @@
1# perf trace event handlers, generated by perf trace -g python
2# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
3# Licensed under the terms of the GNU GPL License version 2
4#
5# This script tests basic functionality such as flag and symbol
6# strings, common_xxx() calls back into perf, begin, end, unhandled
7# events, etc. Basically, if this script runs successfully and
8# displays expected results, Python scripting support should be ok.
9
10import os
11import sys
12
13sys.path.append(os.environ['PERF_EXEC_PATH'] + \
14 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
15
16from Core import *
17from perf_trace_context import *
18
19unhandled = autodict()
20
21def trace_begin():
22 print "trace_begin"
23 pass
24
25def trace_end():
26 print_unhandled()
27
28def irq__softirq_entry(event_name, context, common_cpu,
29 common_secs, common_nsecs, common_pid, common_comm,
30 vec):
31 print_header(event_name, common_cpu, common_secs, common_nsecs,
32 common_pid, common_comm)
33
34 print_uncommon(context)
35
36 print "vec=%s\n" % \
37 (symbol_str("irq__softirq_entry", "vec", vec)),
38
39def kmem__kmalloc(event_name, context, common_cpu,
40 common_secs, common_nsecs, common_pid, common_comm,
41 call_site, ptr, bytes_req, bytes_alloc,
42 gfp_flags):
43 print_header(event_name, common_cpu, common_secs, common_nsecs,
44 common_pid, common_comm)
45
46 print_uncommon(context)
47
48 print "call_site=%u, ptr=%u, bytes_req=%u, " \
49 "bytes_alloc=%u, gfp_flags=%s\n" % \
50 (call_site, ptr, bytes_req, bytes_alloc,
51
52 flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
53
54def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
55 common_pid, common_comm):
56 try:
57 unhandled[event_name] += 1
58 except TypeError:
59 unhandled[event_name] = 1
60
61def print_header(event_name, cpu, secs, nsecs, pid, comm):
62 print "%-20s %5u %05u.%09u %8u %-20s " % \
63 (event_name, cpu, secs, nsecs, pid, comm),
64
65# print trace fields not included in handler args
66def print_uncommon(context):
67 print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
68 % (common_pc(context), trace_flag_str(common_flags(context)), \
69 common_lock_depth(context))
70
71def print_unhandled():
72 keys = unhandled.keys()
73 if not keys:
74 return
75
76 print "\nunhandled events:\n\n",
77
78 print "%-40s %10s\n" % ("event", "count"),
79 print "%-40s %10s\n" % ("----------------------------------------", \
80 "-----------"),
81
82 for event_name in keys:
83 print "%-40s %10d\n" % (event_name, unhandled[event_name])
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
new file mode 100644
index 000000000000..0ca02278fe69
--- /dev/null
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -0,0 +1,68 @@
1# failed system call counts, by pid
2# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
3# Licensed under the terms of the GNU GPL License version 2
4#
5# Displays system-wide failed system call totals, broken down by pid.
6# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
7
8import os
9import sys
10
11sys.path.append(os.environ['PERF_EXEC_PATH'] + \
12 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
13
14from perf_trace_context import *
15from Core import *
16
17usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
18
19for_comm = None
20
21if len(sys.argv) > 2:
22 sys.exit(usage)
23
24if len(sys.argv) > 1:
25 for_comm = sys.argv[1]
26
27syscalls = autodict()
28
29def trace_begin():
30 pass
31
32def trace_end():
33 print_error_totals()
34
35def raw_syscalls__sys_exit(event_name, context, common_cpu,
36 common_secs, common_nsecs, common_pid, common_comm,
37 id, ret):
38 if for_comm is not None:
39 if common_comm != for_comm:
40 return
41
42 if ret < 0:
43 try:
44 syscalls[common_comm][common_pid][id][ret] += 1
45 except TypeError:
46 syscalls[common_comm][common_pid][id][ret] = 1
47
48def print_error_totals():
49 if for_comm is not None:
50 print "\nsyscall errors for %s:\n\n" % (for_comm),
51 else:
52 print "\nsyscall errors:\n\n",
53
54 print "%-30s %10s\n" % ("comm [pid]", "count"),
55 print "%-30s %10s\n" % ("------------------------------", \
56 "----------"),
57
58 comm_keys = syscalls.keys()
59 for comm in comm_keys:
60 pid_keys = syscalls[comm].keys()
61 for pid in pid_keys:
62 print "\n%s [%d]\n" % (comm, pid),
63 id_keys = syscalls[comm][pid].keys()
64 for id in id_keys:
65 print " syscall: %-16d\n" % (id),
66 ret_keys = syscalls[comm][pid][id].keys()
67 for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
68 print " err = %-20d %10d\n" % (ret, val),
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
new file mode 100644
index 000000000000..af722d6a4b3f
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -0,0 +1,64 @@
1# system call counts, by pid
2# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
3# Licensed under the terms of the GNU GPL License version 2
4#
5# Displays system-wide system call totals, broken down by syscall.
6# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
7
8import os
9import sys
10
11sys.path.append(os.environ['PERF_EXEC_PATH'] + \
12 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
13
14from perf_trace_context import *
15from Core import *
16
17usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
18
19for_comm = None
20
21if len(sys.argv) > 2:
22 sys.exit(usage)
23
24if len(sys.argv) > 1:
25 for_comm = sys.argv[1]
26
27syscalls = autodict()
28
29def trace_begin():
30 pass
31
32def trace_end():
33 print_syscall_totals()
34
35def raw_syscalls__sys_enter(event_name, context, common_cpu,
36 common_secs, common_nsecs, common_pid, common_comm,
37 id, args):
38 if for_comm is not None:
39 if common_comm != for_comm:
40 return
41 try:
42 syscalls[common_comm][common_pid][id] += 1
43 except TypeError:
44 syscalls[common_comm][common_pid][id] = 1
45
46def print_syscall_totals():
47 if for_comm is not None:
48 print "\nsyscall events for %s:\n\n" % (for_comm),
49 else:
50 print "\nsyscall events by comm/pid:\n\n",
51
52 print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
53 print "%-40s %10s\n" % ("----------------------------------------", \
54 "----------"),
55
56 comm_keys = syscalls.keys()
57 for comm in comm_keys:
58 pid_keys = syscalls[comm].keys()
59 for pid in pid_keys:
60 print "\n%s [%d]\n" % (comm, pid),
61 id_keys = syscalls[comm][pid].keys()
62 for id, val in sorted(syscalls[comm][pid].iteritems(), \
63 key = lambda(k, v): (v, k), reverse = True):
64 print " %-38d %10d\n" % (id, val),
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
new file mode 100644
index 000000000000..f977e85ff049
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -0,0 +1,58 @@
1# system call counts
2# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
3# Licensed under the terms of the GNU GPL License version 2
4#
5# Displays system-wide system call totals, broken down by syscall.
6# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
7
8import os
9import sys
10
11sys.path.append(os.environ['PERF_EXEC_PATH'] + \
12 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
13
14from perf_trace_context import *
15from Core import *
16
17usage = "perf trace -s syscall-counts.py [comm]\n";
18
19for_comm = None
20
21if len(sys.argv) > 2:
22 sys.exit(usage)
23
24if len(sys.argv) > 1:
25 for_comm = sys.argv[1]
26
27syscalls = autodict()
28
29def trace_begin():
30 pass
31
32def trace_end():
33 print_syscall_totals()
34
35def raw_syscalls__sys_enter(event_name, context, common_cpu,
36 common_secs, common_nsecs, common_pid, common_comm,
37 id, args):
38 if for_comm is not None:
39 if common_comm != for_comm:
40 return
41 try:
42 syscalls[id] += 1
43 except TypeError:
44 syscalls[id] = 1
45
46def print_syscall_totals():
47 if for_comm is not None:
48 print "\nsyscall events for %s:\n\n" % (for_comm),
49 else:
50 print "\nsyscall events:\n\n",
51
52 print "%-40s %10s\n" % ("event", "count"),
53 print "%-40s %10s\n" % ("----------------------------------------", \
54 "-----------"),
55
56 for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
57 reverse = True):
58 print "%-40d %10d\n" % (id, val),
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
new file mode 100644
index 000000000000..04904b35ba81
--- /dev/null
+++ b/tools/perf/util/build-id.c
@@ -0,0 +1,39 @@
1/*
2 * build-id.c
3 *
4 * build-id support
5 *
6 * Copyright (C) 2009, 2010 Red Hat Inc.
7 * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
8 */
9#include "build-id.h"
10#include "event.h"
11#include "symbol.h"
12#include <linux/kernel.h>
13
14static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
15{
16 struct addr_location al;
17 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
18 struct thread *thread = perf_session__findnew(session, event->ip.pid);
19
20 if (thread == NULL) {
21 pr_err("problem processing %d event, skipping it.\n",
22 event->header.type);
23 return -1;
24 }
25
26 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
27 event->ip.ip, &al);
28
29 if (al.map != NULL)
30 al.map->dso->hit = 1;
31
32 return 0;
33}
34
35struct perf_event_ops build_id__mark_dso_hit_ops = {
36 .sample = build_id__mark_dso_hit,
37 .mmap = event__process_mmap,
38 .fork = event__process_task,
39};
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
new file mode 100644
index 000000000000..1d981d63cf9a
--- /dev/null
+++ b/tools/perf/util/build-id.h
@@ -0,0 +1,8 @@
1#ifndef PERF_BUILD_ID_H_
2#define PERF_BUILD_ID_H_ 1
3
4#include "session.h"
5
6extern struct perf_event_ops build_id__mark_dso_hit_ops;
7
8#endif
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c
deleted file mode 100644
index b557b836de3d..000000000000
--- a/tools/perf/util/data_map.c
+++ /dev/null
@@ -1,252 +0,0 @@
1#include "symbol.h"
2#include "util.h"
3#include "debug.h"
4#include "thread.h"
5#include "session.h"
6
7static int process_event_stub(event_t *event __used,
8 struct perf_session *session __used)
9{
10 dump_printf(": unhandled!\n");
11 return 0;
12}
13
14static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
15{
16 if (!handler->process_sample_event)
17 handler->process_sample_event = process_event_stub;
18 if (!handler->process_mmap_event)
19 handler->process_mmap_event = process_event_stub;
20 if (!handler->process_comm_event)
21 handler->process_comm_event = process_event_stub;
22 if (!handler->process_fork_event)
23 handler->process_fork_event = process_event_stub;
24 if (!handler->process_exit_event)
25 handler->process_exit_event = process_event_stub;
26 if (!handler->process_lost_event)
27 handler->process_lost_event = process_event_stub;
28 if (!handler->process_read_event)
29 handler->process_read_event = process_event_stub;
30 if (!handler->process_throttle_event)
31 handler->process_throttle_event = process_event_stub;
32 if (!handler->process_unthrottle_event)
33 handler->process_unthrottle_event = process_event_stub;
34}
35
36static const char *event__name[] = {
37 [0] = "TOTAL",
38 [PERF_RECORD_MMAP] = "MMAP",
39 [PERF_RECORD_LOST] = "LOST",
40 [PERF_RECORD_COMM] = "COMM",
41 [PERF_RECORD_EXIT] = "EXIT",
42 [PERF_RECORD_THROTTLE] = "THROTTLE",
43 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
44 [PERF_RECORD_FORK] = "FORK",
45 [PERF_RECORD_READ] = "READ",
46 [PERF_RECORD_SAMPLE] = "SAMPLE",
47};
48
49unsigned long event__total[PERF_RECORD_MAX];
50
51void event__print_totals(void)
52{
53 int i;
54 for (i = 0; i < PERF_RECORD_MAX; ++i)
55 pr_info("%10s events: %10ld\n",
56 event__name[i], event__total[i]);
57}
58
59static int process_event(event_t *event, struct perf_session *session,
60 struct perf_event_ops *ops,
61 unsigned long offset, unsigned long head)
62{
63 trace_event(event);
64
65 if (event->header.type < PERF_RECORD_MAX) {
66 dump_printf("%p [%p]: PERF_RECORD_%s",
67 (void *)(offset + head),
68 (void *)(long)(event->header.size),
69 event__name[event->header.type]);
70 ++event__total[0];
71 ++event__total[event->header.type];
72 }
73
74 switch (event->header.type) {
75 case PERF_RECORD_SAMPLE:
76 return ops->process_sample_event(event, session);
77 case PERF_RECORD_MMAP:
78 return ops->process_mmap_event(event, session);
79 case PERF_RECORD_COMM:
80 return ops->process_comm_event(event, session);
81 case PERF_RECORD_FORK:
82 return ops->process_fork_event(event, session);
83 case PERF_RECORD_EXIT:
84 return ops->process_exit_event(event, session);
85 case PERF_RECORD_LOST:
86 return ops->process_lost_event(event, session);
87 case PERF_RECORD_READ:
88 return ops->process_read_event(event, session);
89 case PERF_RECORD_THROTTLE:
90 return ops->process_throttle_event(event, session);
91 case PERF_RECORD_UNTHROTTLE:
92 return ops->process_unthrottle_event(event, session);
93 default:
94 ops->total_unknown++;
95 return -1;
96 }
97}
98
99int perf_header__read_build_ids(int input, u64 offset, u64 size)
100{
101 struct build_id_event bev;
102 char filename[PATH_MAX];
103 u64 limit = offset + size;
104 int err = -1;
105
106 while (offset < limit) {
107 struct dso *dso;
108 ssize_t len;
109
110 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
111 goto out;
112
113 len = bev.header.size - sizeof(bev);
114 if (read(input, filename, len) != len)
115 goto out;
116
117 dso = dsos__findnew(filename);
118 if (dso != NULL)
119 dso__set_build_id(dso, &bev.build_id);
120
121 offset += bev.header.size;
122 }
123 err = 0;
124out:
125 return err;
126}
127
128static struct thread *perf_session__register_idle_thread(struct perf_session *self)
129{
130 struct thread *thread = perf_session__findnew(self, 0);
131
132 if (!thread || thread__set_comm(thread, "swapper")) {
133 pr_err("problem inserting idle task.\n");
134 thread = NULL;
135 }
136
137 return thread;
138}
139
140int perf_session__process_events(struct perf_session *self,
141 struct perf_event_ops *ops)
142{
143 int err;
144 unsigned long head, shift;
145 unsigned long offset = 0;
146 size_t page_size;
147 event_t *event;
148 uint32_t size;
149 char *buf;
150
151 if (perf_session__register_idle_thread(self) == NULL)
152 return -ENOMEM;
153
154 perf_event_ops__fill_defaults(ops);
155
156 page_size = getpagesize();
157
158 head = self->header.data_offset;
159 self->sample_type = perf_header__sample_type(&self->header);
160
161 err = -EINVAL;
162 if (ops->sample_type_check && ops->sample_type_check(self) < 0)
163 goto out_err;
164
165 if (!ops->full_paths) {
166 char bf[PATH_MAX];
167
168 if (getcwd(bf, sizeof(bf)) == NULL) {
169 err = -errno;
170out_getcwd_err:
171 pr_err("failed to get the current directory\n");
172 goto out_err;
173 }
174 self->cwd = strdup(bf);
175 if (self->cwd == NULL) {
176 err = -ENOMEM;
177 goto out_getcwd_err;
178 }
179 self->cwdlen = strlen(self->cwd);
180 }
181
182 shift = page_size * (head / page_size);
183 offset += shift;
184 head -= shift;
185
186remap:
187 buf = mmap(NULL, page_size * self->mmap_window, PROT_READ,
188 MAP_SHARED, self->fd, offset);
189 if (buf == MAP_FAILED) {
190 pr_err("failed to mmap file\n");
191 err = -errno;
192 goto out_err;
193 }
194
195more:
196 event = (event_t *)(buf + head);
197
198 size = event->header.size;
199 if (!size)
200 size = 8;
201
202 if (head + event->header.size >= page_size * self->mmap_window) {
203 int munmap_ret;
204
205 shift = page_size * (head / page_size);
206
207 munmap_ret = munmap(buf, page_size * self->mmap_window);
208 assert(munmap_ret == 0);
209
210 offset += shift;
211 head -= shift;
212 goto remap;
213 }
214
215 size = event->header.size;
216
217 dump_printf("\n%p [%p]: event: %d\n",
218 (void *)(offset + head),
219 (void *)(long)event->header.size,
220 event->header.type);
221
222 if (!size || process_event(event, self, ops, offset, head) < 0) {
223
224 dump_printf("%p [%p]: skipping unknown header type: %d\n",
225 (void *)(offset + head),
226 (void *)(long)(event->header.size),
227 event->header.type);
228
229 /*
230 * assume we lost track of the stream, check alignment, and
231 * increment a single u64 in the hope to catch on again 'soon'.
232 */
233
234 if (unlikely(head & 7))
235 head &= ~7ULL;
236
237 size = 8;
238 }
239
240 head += size;
241
242 if (offset + head >= self->header.data_offset + self->header.data_size)
243 goto done;
244
245 if (offset + head < self->size)
246 goto more;
247
248done:
249 err = 0;
250out_err:
251 return err;
252}
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 28d520d5a1fb..0905600c3851 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -9,6 +9,7 @@
9#include "color.h" 9#include "color.h"
10#include "event.h" 10#include "event.h"
11#include "debug.h" 11#include "debug.h"
12#include "util.h"
12 13
13int verbose = 0; 14int verbose = 0;
14int dump_trace = 0; 15int dump_trace = 0;
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c
index 06b73ee02c49..a88fefc0cc0a 100644
--- a/tools/perf/util/debugfs.c
+++ b/tools/perf/util/debugfs.c
@@ -106,16 +106,14 @@ int debugfs_valid_entry(const char *path)
106 return 0; 106 return 0;
107} 107}
108 108
109/* mount the debugfs somewhere */ 109/* mount the debugfs somewhere if it's not mounted */
110 110
111int debugfs_mount(const char *mountpoint) 111char *debugfs_mount(const char *mountpoint)
112{ 112{
113 char mountcmd[128];
114
115 /* see if it's already mounted */ 113 /* see if it's already mounted */
116 if (debugfs_find_mountpoint()) { 114 if (debugfs_find_mountpoint()) {
117 debugfs_premounted = 1; 115 debugfs_premounted = 1;
118 return 0; 116 return debugfs_mountpoint;
119 } 117 }
120 118
121 /* if not mounted and no argument */ 119 /* if not mounted and no argument */
@@ -127,13 +125,14 @@ int debugfs_mount(const char *mountpoint)
127 mountpoint = "/sys/kernel/debug"; 125 mountpoint = "/sys/kernel/debug";
128 } 126 }
129 127
128 if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
129 return NULL;
130
130 /* save the mountpoint */ 131 /* save the mountpoint */
131 strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); 132 strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
133 debugfs_found = 1;
132 134
133 /* mount it */ 135 return debugfs_mountpoint;
134 snprintf(mountcmd, sizeof(mountcmd),
135 "/bin/mount -t debugfs debugfs %s", mountpoint);
136 return system(mountcmd);
137} 136}
138 137
139/* umount the debugfs */ 138/* umount the debugfs */
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h
index 3cd14f9ae784..83a02879745f 100644
--- a/tools/perf/util/debugfs.h
+++ b/tools/perf/util/debugfs.h
@@ -15,7 +15,7 @@
15extern const char *debugfs_find_mountpoint(void); 15extern const char *debugfs_find_mountpoint(void);
16extern int debugfs_valid_mountpoint(const char *debugfs); 16extern int debugfs_valid_mountpoint(const char *debugfs);
17extern int debugfs_valid_entry(const char *path); 17extern int debugfs_valid_entry(const char *path);
18extern int debugfs_mount(const char *mountpoint); 18extern char *debugfs_mount(const char *mountpoint);
19extern int debugfs_umount(void); 19extern int debugfs_umount(void);
20extern int debugfs_write(const char *entry, const char *value); 20extern int debugfs_write(const char *entry, const char *value);
21extern int debugfs_read(const char *entry, char *buffer, size_t size); 21extern int debugfs_read(const char *entry, char *buffer, size_t size);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 8a9e6baa3099..705ec63548b4 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -8,8 +8,7 @@
8#include "thread.h" 8#include "thread.h"
9 9
10static pid_t event__synthesize_comm(pid_t pid, int full, 10static pid_t event__synthesize_comm(pid_t pid, int full,
11 int (*process)(event_t *event, 11 event__handler_t process,
12 struct perf_session *session),
13 struct perf_session *session) 12 struct perf_session *session)
14{ 13{
15 event_t ev; 14 event_t ev;
@@ -91,8 +90,7 @@ out_failure:
91} 90}
92 91
93static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, 92static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
94 int (*process)(event_t *event, 93 event__handler_t process,
95 struct perf_session *session),
96 struct perf_session *session) 94 struct perf_session *session)
97{ 95{
98 char filename[PATH_MAX]; 96 char filename[PATH_MAX];
@@ -112,7 +110,10 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
112 while (1) { 110 while (1) {
113 char bf[BUFSIZ], *pbf = bf; 111 char bf[BUFSIZ], *pbf = bf;
114 event_t ev = { 112 event_t ev = {
115 .header = { .type = PERF_RECORD_MMAP }, 113 .header = {
114 .type = PERF_RECORD_MMAP,
115 .misc = 0, /* Just like the kernel, see kernel/perf_event.c __perf_event_mmap */
116 },
116 }; 117 };
117 int n; 118 int n;
118 size_t size; 119 size_t size;
@@ -156,9 +157,38 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
156 return 0; 157 return 0;
157} 158}
158 159
159int event__synthesize_thread(pid_t pid, 160int event__synthesize_modules(event__handler_t process,
160 int (*process)(event_t *event, 161 struct perf_session *session)
161 struct perf_session *session), 162{
163 struct rb_node *nd;
164
165 for (nd = rb_first(&session->kmaps.maps[MAP__FUNCTION]);
166 nd; nd = rb_next(nd)) {
167 event_t ev;
168 size_t size;
169 struct map *pos = rb_entry(nd, struct map, rb_node);
170
171 if (pos->dso->kernel)
172 continue;
173
174 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
175 memset(&ev, 0, sizeof(ev));
176 ev.mmap.header.misc = 1; /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */
177 ev.mmap.header.type = PERF_RECORD_MMAP;
178 ev.mmap.header.size = (sizeof(ev.mmap) -
179 (sizeof(ev.mmap.filename) - size));
180 ev.mmap.start = pos->start;
181 ev.mmap.len = pos->end - pos->start;
182
183 memcpy(ev.mmap.filename, pos->dso->long_name,
184 pos->dso->long_name_len + 1);
185 process(&ev, session);
186 }
187
188 return 0;
189}
190
191int event__synthesize_thread(pid_t pid, event__handler_t process,
162 struct perf_session *session) 192 struct perf_session *session)
163{ 193{
164 pid_t tgid = event__synthesize_comm(pid, 1, process, session); 194 pid_t tgid = event__synthesize_comm(pid, 1, process, session);
@@ -167,8 +197,7 @@ int event__synthesize_thread(pid_t pid,
167 return event__synthesize_mmap_events(pid, tgid, process, session); 197 return event__synthesize_mmap_events(pid, tgid, process, session);
168} 198}
169 199
170void event__synthesize_threads(int (*process)(event_t *event, 200void event__synthesize_threads(event__handler_t process,
171 struct perf_session *session),
172 struct perf_session *session) 201 struct perf_session *session)
173{ 202{
174 DIR *proc; 203 DIR *proc;
@@ -189,6 +218,59 @@ void event__synthesize_threads(int (*process)(event_t *event,
189 closedir(proc); 218 closedir(proc);
190} 219}
191 220
221struct process_symbol_args {
222 const char *name;
223 u64 start;
224};
225
226static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
227{
228 struct process_symbol_args *args = arg;
229
230 /*
231 * Must be a function or at least an alias, as in PARISC64, where "_text" is
232 * an 'A' to the same address as "_stext".
233 */
234 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
235 type == 'A') || strcmp(name, args->name))
236 return 0;
237
238 args->start = start;
239 return 1;
240}
241
242int event__synthesize_kernel_mmap(event__handler_t process,
243 struct perf_session *session,
244 const char *symbol_name)
245{
246 size_t size;
247 event_t ev = {
248 .header = {
249 .type = PERF_RECORD_MMAP,
250 .misc = 1, /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */
251 },
252 };
253 /*
254 * We should get this from /sys/kernel/sections/.text, but till that is
255 * available use this, and after it is use this as a fallback for older
256 * kernels.
257 */
258 struct process_symbol_args args = { .name = symbol_name, };
259
260 if (kallsyms__parse("/proc/kallsyms", &args, find_symbol_cb) <= 0)
261 return -ENOENT;
262
263 size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
264 "[kernel.kallsyms.%s]", symbol_name) + 1;
265 size = ALIGN(size, sizeof(u64));
266 ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size));
267 ev.mmap.pgoff = args.start;
268 ev.mmap.start = session->vmlinux_maps[MAP__FUNCTION]->start;
269 ev.mmap.len = session->vmlinux_maps[MAP__FUNCTION]->end - ev.mmap.start ;
270
271 return process(&ev, session);
272}
273
192static void thread__comm_adjust(struct thread *self) 274static void thread__comm_adjust(struct thread *self)
193{ 275{
194 char *comm = self->comm; 276 char *comm = self->comm;
@@ -240,22 +322,88 @@ int event__process_lost(event_t *self, struct perf_session *session)
240 322
241int event__process_mmap(event_t *self, struct perf_session *session) 323int event__process_mmap(event_t *self, struct perf_session *session)
242{ 324{
243 struct thread *thread = perf_session__findnew(session, self->mmap.pid); 325 struct thread *thread;
244 struct map *map = map__new(&self->mmap, MAP__FUNCTION, 326 struct map *map;
245 session->cwd, session->cwdlen); 327
328 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
329 self->mmap.pid, self->mmap.tid, self->mmap.start,
330 self->mmap.len, self->mmap.pgoff, self->mmap.filename);
331
332 if (self->mmap.pid == 0) {
333 static const char kmmap_prefix[] = "[kernel.kallsyms.";
334
335 if (self->mmap.filename[0] == '/') {
336 char short_module_name[1024];
337 char *name = strrchr(self->mmap.filename, '/'), *dot;
338
339 if (name == NULL)
340 goto out_problem;
341
342 ++name; /* skip / */
343 dot = strrchr(name, '.');
344 if (dot == NULL)
345 goto out_problem;
346
347 snprintf(short_module_name, sizeof(short_module_name),
348 "[%.*s]", (int)(dot - name), name);
349 strxfrchar(short_module_name, '-', '_');
350
351 map = perf_session__new_module_map(session,
352 self->mmap.start,
353 self->mmap.filename);
354 if (map == NULL)
355 goto out_problem;
356
357 name = strdup(short_module_name);
358 if (name == NULL)
359 goto out_problem;
360
361 map->dso->short_name = name;
362 map->end = map->start + self->mmap.len;
363 } else if (memcmp(self->mmap.filename, kmmap_prefix,
364 sizeof(kmmap_prefix) - 1) == 0) {
365 const char *symbol_name = (self->mmap.filename +
366 sizeof(kmmap_prefix) - 1);
367 /*
368 * Should be there already, from the build-id table in
369 * the header.
370 */
371 struct dso *kernel = __dsos__findnew(&dsos__kernel,
372 "[kernel.kallsyms]");
373 if (kernel == NULL)
374 goto out_problem;
375
376 kernel->kernel = 1;
377 if (__perf_session__create_kernel_maps(session, kernel) < 0)
378 goto out_problem;
379
380 session->vmlinux_maps[MAP__FUNCTION]->start = self->mmap.start;
381 session->vmlinux_maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
382 /*
383 * Be a bit paranoid here, some perf.data file came with
384 * a zero sized synthesized MMAP event for the kernel.
385 */
386 if (session->vmlinux_maps[MAP__FUNCTION]->end == 0)
387 session->vmlinux_maps[MAP__FUNCTION]->end = ~0UL;
388
389 perf_session__set_kallsyms_ref_reloc_sym(session, symbol_name,
390 self->mmap.pgoff);
391 }
392 return 0;
393 }
246 394
247 dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n", 395 thread = perf_session__findnew(session, self->mmap.pid);
248 self->mmap.pid, self->mmap.tid, 396 map = map__new(&self->mmap, MAP__FUNCTION,
249 (void *)(long)self->mmap.start, 397 session->cwd, session->cwdlen);
250 (void *)(long)self->mmap.len,
251 (void *)(long)self->mmap.pgoff,
252 self->mmap.filename);
253 398
254 if (thread == NULL || map == NULL) 399 if (thread == NULL || map == NULL)
255 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 400 goto out_problem;
256 else
257 thread__insert_map(thread, map);
258 401
402 thread__insert_map(thread, map);
403 return 0;
404
405out_problem:
406 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
259 return 0; 407 return 0;
260} 408}
261 409
@@ -284,11 +432,10 @@ int event__process_task(event_t *self, struct perf_session *session)
284 return 0; 432 return 0;
285} 433}
286 434
287void thread__find_addr_location(struct thread *self, 435void thread__find_addr_map(struct thread *self,
288 struct perf_session *session, u8 cpumode, 436 struct perf_session *session, u8 cpumode,
289 enum map_type type, u64 addr, 437 enum map_type type, u64 addr,
290 struct addr_location *al, 438 struct addr_location *al)
291 symbol_filter_t filter)
292{ 439{
293 struct map_groups *mg = &self->mg; 440 struct map_groups *mg = &self->mg;
294 441
@@ -303,7 +450,6 @@ void thread__find_addr_location(struct thread *self,
303 else { 450 else {
304 al->level = 'H'; 451 al->level = 'H';
305 al->map = NULL; 452 al->map = NULL;
306 al->sym = NULL;
307 return; 453 return;
308 } 454 }
309try_again: 455try_again:
@@ -322,11 +468,21 @@ try_again:
322 mg = &session->kmaps; 468 mg = &session->kmaps;
323 goto try_again; 469 goto try_again;
324 } 470 }
325 al->sym = NULL; 471 } else
326 } else {
327 al->addr = al->map->map_ip(al->map, al->addr); 472 al->addr = al->map->map_ip(al->map, al->addr);
328 al->sym = map__find_symbol(al->map, session, al->addr, filter); 473}
329 } 474
475void thread__find_addr_location(struct thread *self,
476 struct perf_session *session, u8 cpumode,
477 enum map_type type, u64 addr,
478 struct addr_location *al,
479 symbol_filter_t filter)
480{
481 thread__find_addr_map(self, session, cpumode, type, addr, al);
482 if (al->map != NULL)
483 al->sym = map__find_symbol(al->map, al->addr, filter);
484 else
485 al->sym = NULL;
330} 486}
331 487
332static void dso__calc_col_width(struct dso *self) 488static void dso__calc_col_width(struct dso *self)
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 690a96d0467c..50a7132887f5 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -1,10 +1,10 @@
1#ifndef __PERF_RECORD_H 1#ifndef __PERF_RECORD_H
2#define __PERF_RECORD_H 2#define __PERF_RECORD_H
3 3
4#include <limits.h>
5
4#include "../perf.h" 6#include "../perf.h"
5#include "util.h" 7#include "map.h"
6#include <linux/list.h>
7#include <linux/rbtree.h>
8 8
9/* 9/*
10 * PERF_SAMPLE_IP | PERF_SAMPLE_TID | * 10 * PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
@@ -101,74 +101,19 @@ struct events_stats {
101 101
102void event__print_totals(void); 102void event__print_totals(void);
103 103
104enum map_type {
105 MAP__FUNCTION = 0,
106 MAP__VARIABLE,
107};
108
109#define MAP__NR_TYPES (MAP__VARIABLE + 1)
110
111struct map {
112 union {
113 struct rb_node rb_node;
114 struct list_head node;
115 };
116 u64 start;
117 u64 end;
118 enum map_type type;
119 u64 pgoff;
120 u64 (*map_ip)(struct map *, u64);
121 u64 (*unmap_ip)(struct map *, u64);
122 struct dso *dso;
123};
124
125static inline u64 map__map_ip(struct map *map, u64 ip)
126{
127 return ip - map->start + map->pgoff;
128}
129
130static inline u64 map__unmap_ip(struct map *map, u64 ip)
131{
132 return ip + map->start - map->pgoff;
133}
134
135static inline u64 identity__map_ip(struct map *map __used, u64 ip)
136{
137 return ip;
138}
139
140struct symbol;
141
142typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
143
144void map__init(struct map *self, enum map_type type,
145 u64 start, u64 end, u64 pgoff, struct dso *dso);
146struct map *map__new(struct mmap_event *event, enum map_type,
147 char *cwd, int cwdlen);
148void map__delete(struct map *self);
149struct map *map__clone(struct map *self);
150int map__overlap(struct map *l, struct map *r);
151size_t map__fprintf(struct map *self, FILE *fp);
152
153struct perf_session; 104struct perf_session;
154 105
155int map__load(struct map *self, struct perf_session *session, 106typedef int (*event__handler_t)(event_t *event, struct perf_session *session);
156 symbol_filter_t filter); 107
157struct symbol *map__find_symbol(struct map *self, struct perf_session *session, 108int event__synthesize_thread(pid_t pid, event__handler_t process,
158 u64 addr, symbol_filter_t filter);
159struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
160 struct perf_session *session,
161 symbol_filter_t filter);
162void map__fixup_start(struct map *self);
163void map__fixup_end(struct map *self);
164
165int event__synthesize_thread(pid_t pid,
166 int (*process)(event_t *event,
167 struct perf_session *session),
168 struct perf_session *session); 109 struct perf_session *session);
169void event__synthesize_threads(int (*process)(event_t *event, 110void event__synthesize_threads(event__handler_t process,
170 struct perf_session *session),
171 struct perf_session *session); 111 struct perf_session *session);
112int event__synthesize_kernel_mmap(event__handler_t process,
113 struct perf_session *session,
114 const char *symbol_name);
115int event__synthesize_modules(event__handler_t process,
116 struct perf_session *session);
172 117
173int event__process_comm(event_t *self, struct perf_session *session); 118int event__process_comm(event_t *self, struct perf_session *session);
174int event__process_lost(event_t *self, struct perf_session *session); 119int event__process_lost(event_t *self, struct perf_session *session);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 8a0bca55106f..6c9aa16ee51f 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1,8 +1,12 @@
1#define _FILE_OFFSET_BITS 64
2
1#include <sys/types.h> 3#include <sys/types.h>
4#include <byteswap.h>
2#include <unistd.h> 5#include <unistd.h>
3#include <stdio.h> 6#include <stdio.h>
4#include <stdlib.h> 7#include <stdlib.h>
5#include <linux/list.h> 8#include <linux/list.h>
9#include <linux/kernel.h>
6 10
7#include "util.h" 11#include "util.h"
8#include "header.h" 12#include "header.h"
@@ -105,24 +109,28 @@ struct perf_trace_event_type {
105static int event_count; 109static int event_count;
106static struct perf_trace_event_type *events; 110static struct perf_trace_event_type *events;
107 111
108void perf_header__push_event(u64 id, const char *name) 112int perf_header__push_event(u64 id, const char *name)
109{ 113{
110 if (strlen(name) > MAX_EVENT_NAME) 114 if (strlen(name) > MAX_EVENT_NAME)
111 pr_warning("Event %s will be truncated\n", name); 115 pr_warning("Event %s will be truncated\n", name);
112 116
113 if (!events) { 117 if (!events) {
114 events = malloc(sizeof(struct perf_trace_event_type)); 118 events = malloc(sizeof(struct perf_trace_event_type));
115 if (!events) 119 if (events == NULL)
116 die("nomem"); 120 return -ENOMEM;
117 } else { 121 } else {
118 events = realloc(events, (event_count + 1) * sizeof(struct perf_trace_event_type)); 122 struct perf_trace_event_type *nevents;
119 if (!events) 123
120 die("nomem"); 124 nevents = realloc(events, (event_count + 1) * sizeof(*events));
125 if (nevents == NULL)
126 return -ENOMEM;
127 events = nevents;
121 } 128 }
122 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type)); 129 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
123 events[event_count].event_id = id; 130 events[event_count].event_id = id;
124 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1); 131 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
125 event_count++; 132 event_count++;
133 return 0;
126} 134}
127 135
128char *perf_header__find_event(u64 id) 136char *perf_header__find_event(u64 id)
@@ -169,31 +177,48 @@ static int do_write(int fd, const void *buf, size_t size)
169 return 0; 177 return 0;
170} 178}
171 179
172static int __dsos__write_buildid_table(struct list_head *head, int fd) 180#define NAME_ALIGN 64
181
182static int write_padded(int fd, const void *bf, size_t count,
183 size_t count_aligned)
173{ 184{
174#define NAME_ALIGN 64
175 struct dso *pos;
176 static const char zero_buf[NAME_ALIGN]; 185 static const char zero_buf[NAME_ALIGN];
186 int err = do_write(fd, bf, count);
187
188 if (!err)
189 err = do_write(fd, zero_buf, count_aligned - count);
190
191 return err;
192}
177 193
178 list_for_each_entry(pos, head, node) { 194#define dsos__for_each_with_build_id(pos, head) \
195 list_for_each_entry(pos, head, node) \
196 if (!pos->has_build_id) \
197 continue; \
198 else
199
200static int __dsos__write_buildid_table(struct list_head *head, u16 misc, int fd)
201{
202 struct dso *pos;
203
204 dsos__for_each_with_build_id(pos, head) {
179 int err; 205 int err;
180 struct build_id_event b; 206 struct build_id_event b;
181 size_t len; 207 size_t len;
182 208
183 if (!pos->has_build_id) 209 if (!pos->hit)
184 continue; 210 continue;
185 len = pos->long_name_len + 1; 211 len = pos->long_name_len + 1;
186 len = ALIGN(len, NAME_ALIGN); 212 len = ALIGN(len, NAME_ALIGN);
187 memset(&b, 0, sizeof(b)); 213 memset(&b, 0, sizeof(b));
188 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); 214 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
215 b.header.misc = misc;
189 b.header.size = sizeof(b) + len; 216 b.header.size = sizeof(b) + len;
190 err = do_write(fd, &b, sizeof(b)); 217 err = do_write(fd, &b, sizeof(b));
191 if (err < 0) 218 if (err < 0)
192 return err; 219 return err;
193 err = do_write(fd, pos->long_name, pos->long_name_len + 1); 220 err = write_padded(fd, pos->long_name,
194 if (err < 0) 221 pos->long_name_len + 1, len);
195 return err;
196 err = do_write(fd, zero_buf, len - pos->long_name_len - 1);
197 if (err < 0) 222 if (err < 0)
198 return err; 223 return err;
199 } 224 }
@@ -203,12 +228,143 @@ static int __dsos__write_buildid_table(struct list_head *head, int fd)
203 228
204static int dsos__write_buildid_table(int fd) 229static int dsos__write_buildid_table(int fd)
205{ 230{
206 int err = __dsos__write_buildid_table(&dsos__kernel, fd); 231 int err = __dsos__write_buildid_table(&dsos__kernel,
232 PERF_RECORD_MISC_KERNEL, fd);
207 if (err == 0) 233 if (err == 0)
208 err = __dsos__write_buildid_table(&dsos__user, fd); 234 err = __dsos__write_buildid_table(&dsos__user,
235 PERF_RECORD_MISC_USER, fd);
209 return err; 236 return err;
210} 237}
211 238
239int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
240 const char *name, bool is_kallsyms)
241{
242 const size_t size = PATH_MAX;
243 char *filename = malloc(size),
244 *linkname = malloc(size), *targetname;
245 int len, err = -1;
246
247 if (filename == NULL || linkname == NULL)
248 goto out_free;
249
250 len = snprintf(filename, size, "%s%s%s",
251 debugdir, is_kallsyms ? "/" : "", name);
252 if (mkdir_p(filename, 0755))
253 goto out_free;
254
255 snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
256
257 if (access(filename, F_OK)) {
258 if (is_kallsyms) {
259 if (copyfile("/proc/kallsyms", filename))
260 goto out_free;
261 } else if (link(name, filename) && copyfile(name, filename))
262 goto out_free;
263 }
264
265 len = snprintf(linkname, size, "%s/.build-id/%.2s",
266 debugdir, sbuild_id);
267
268 if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
269 goto out_free;
270
271 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
272 targetname = filename + strlen(debugdir) - 5;
273 memcpy(targetname, "../..", 5);
274
275 if (symlink(targetname, linkname) == 0)
276 err = 0;
277out_free:
278 free(filename);
279 free(linkname);
280 return err;
281}
282
283static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
284 const char *name, const char *debugdir,
285 bool is_kallsyms)
286{
287 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
288
289 build_id__sprintf(build_id, build_id_size, sbuild_id);
290
291 return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
292}
293
294int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
295{
296 const size_t size = PATH_MAX;
297 char *filename = malloc(size),
298 *linkname = malloc(size);
299 int err = -1;
300
301 if (filename == NULL || linkname == NULL)
302 goto out_free;
303
304 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
305 debugdir, sbuild_id, sbuild_id + 2);
306
307 if (access(linkname, F_OK))
308 goto out_free;
309
310 if (readlink(linkname, filename, size) < 0)
311 goto out_free;
312
313 if (unlink(linkname))
314 goto out_free;
315
316 /*
317 * Since the link is relative, we must make it absolute:
318 */
319 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
320 debugdir, sbuild_id, filename);
321
322 if (unlink(linkname))
323 goto out_free;
324
325 err = 0;
326out_free:
327 free(filename);
328 free(linkname);
329 return err;
330}
331
332static int dso__cache_build_id(struct dso *self, const char *debugdir)
333{
334 bool is_kallsyms = self->kernel && self->long_name[0] != '/';
335
336 return build_id_cache__add_b(self->build_id, sizeof(self->build_id),
337 self->long_name, debugdir, is_kallsyms);
338}
339
340static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
341{
342 struct dso *pos;
343 int err = 0;
344
345 dsos__for_each_with_build_id(pos, head)
346 if (dso__cache_build_id(pos, debugdir))
347 err = -1;
348
349 return err;
350}
351
352static int dsos__cache_build_ids(void)
353{
354 int err_kernel, err_user;
355 char debugdir[PATH_MAX];
356
357 snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
358 DEBUG_CACHE_DIR);
359
360 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
361 return -1;
362
363 err_kernel = __dsos__cache_build_ids(&dsos__kernel, debugdir);
364 err_user = __dsos__cache_build_ids(&dsos__user, debugdir);
365 return err_kernel || err_user ? -1 : 0;
366}
367
212static int perf_header__adds_write(struct perf_header *self, int fd) 368static int perf_header__adds_write(struct perf_header *self, int fd)
213{ 369{
214 int nr_sections; 370 int nr_sections;
@@ -217,7 +373,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
217 u64 sec_start; 373 u64 sec_start;
218 int idx = 0, err; 374 int idx = 0, err;
219 375
220 if (dsos__read_build_ids()) 376 if (dsos__read_build_ids(true))
221 perf_header__set_feat(self, HEADER_BUILD_ID); 377 perf_header__set_feat(self, HEADER_BUILD_ID);
222 378
223 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); 379 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
@@ -257,7 +413,9 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
257 pr_debug("failed to write buildid table\n"); 413 pr_debug("failed to write buildid table\n");
258 goto out_free; 414 goto out_free;
259 } 415 }
260 buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset; 416 buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
417 buildid_sec->offset;
418 dsos__cache_build_ids();
261 } 419 }
262 420
263 lseek(fd, sec_start, SEEK_SET); 421 lseek(fd, sec_start, SEEK_SET);
@@ -360,30 +518,43 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
360 return 0; 518 return 0;
361} 519}
362 520
363static void do_read(int fd, void *buf, size_t size) 521static int do_read(int fd, void *buf, size_t size)
364{ 522{
365 while (size) { 523 while (size) {
366 int ret = read(fd, buf, size); 524 int ret = read(fd, buf, size);
367 525
368 if (ret < 0) 526 if (ret <= 0)
369 die("failed to read"); 527 return -1;
370 if (ret == 0)
371 die("failed to read: missing data");
372 528
373 size -= ret; 529 size -= ret;
374 buf += ret; 530 buf += ret;
375 } 531 }
532
533 return 0;
534}
535
536static int perf_header__getbuffer64(struct perf_header *self,
537 int fd, void *buf, size_t size)
538{
539 if (do_read(fd, buf, size))
540 return -1;
541
542 if (self->needs_swap)
543 mem_bswap_64(buf, size);
544
545 return 0;
376} 546}
377 547
378int perf_header__process_sections(struct perf_header *self, int fd, 548int perf_header__process_sections(struct perf_header *self, int fd,
379 int (*process)(struct perf_file_section *self, 549 int (*process)(struct perf_file_section *self,
550 struct perf_header *ph,
380 int feat, int fd)) 551 int feat, int fd))
381{ 552{
382 struct perf_file_section *feat_sec; 553 struct perf_file_section *feat_sec;
383 int nr_sections; 554 int nr_sections;
384 int sec_size; 555 int sec_size;
385 int idx = 0; 556 int idx = 0;
386 int err = 0, feat = 1; 557 int err = -1, feat = 1;
387 558
388 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); 559 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
389 if (!nr_sections) 560 if (!nr_sections)
@@ -397,33 +568,45 @@ int perf_header__process_sections(struct perf_header *self, int fd,
397 568
398 lseek(fd, self->data_offset + self->data_size, SEEK_SET); 569 lseek(fd, self->data_offset + self->data_size, SEEK_SET);
399 570
400 do_read(fd, feat_sec, sec_size); 571 if (perf_header__getbuffer64(self, fd, feat_sec, sec_size))
572 goto out_free;
401 573
574 err = 0;
402 while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { 575 while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
403 if (perf_header__has_feat(self, feat)) { 576 if (perf_header__has_feat(self, feat)) {
404 struct perf_file_section *sec = &feat_sec[idx++]; 577 struct perf_file_section *sec = &feat_sec[idx++];
405 578
406 err = process(sec, feat, fd); 579 err = process(sec, self, feat, fd);
407 if (err < 0) 580 if (err < 0)
408 break; 581 break;
409 } 582 }
410 ++feat; 583 ++feat;
411 } 584 }
412 585out_free:
413 free(feat_sec); 586 free(feat_sec);
414 return err; 587 return err;
415}; 588}
416 589
417int perf_file_header__read(struct perf_file_header *self, 590int perf_file_header__read(struct perf_file_header *self,
418 struct perf_header *ph, int fd) 591 struct perf_header *ph, int fd)
419{ 592{
420 lseek(fd, 0, SEEK_SET); 593 lseek(fd, 0, SEEK_SET);
421 do_read(fd, self, sizeof(*self));
422 594
423 if (self->magic != PERF_MAGIC || 595 if (do_read(fd, self, sizeof(*self)) ||
424 self->attr_size != sizeof(struct perf_file_attr)) 596 memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
425 return -1; 597 return -1;
426 598
599 if (self->attr_size != sizeof(struct perf_file_attr)) {
600 u64 attr_size = bswap_64(self->attr_size);
601
602 if (attr_size != sizeof(struct perf_file_attr))
603 return -1;
604
605 mem_bswap_64(self, offsetof(struct perf_file_header,
606 adds_features));
607 ph->needs_swap = true;
608 }
609
427 if (self->size != sizeof(*self)) { 610 if (self->size != sizeof(*self)) {
428 /* Support the previous format */ 611 /* Support the previous format */
429 if (self->size == offsetof(typeof(*self), adds_features)) 612 if (self->size == offsetof(typeof(*self), adds_features))
@@ -433,19 +616,31 @@ int perf_file_header__read(struct perf_file_header *self,
433 } 616 }
434 617
435 memcpy(&ph->adds_features, &self->adds_features, 618 memcpy(&ph->adds_features, &self->adds_features,
436 sizeof(self->adds_features)); 619 sizeof(ph->adds_features));
620 /*
621 * FIXME: hack that assumes that if we need swap the perf.data file
622 * may be coming from an arch with a different word-size, ergo different
623 * DEFINE_BITMAP format, investigate more later, but for now its mostly
624 * safe to assume that we have a build-id section. Trace files probably
625 * have several other issues in this realm anyway...
626 */
627 if (ph->needs_swap) {
628 memset(&ph->adds_features, 0, sizeof(ph->adds_features));
629 perf_header__set_feat(ph, HEADER_BUILD_ID);
630 }
437 631
438 ph->event_offset = self->event_types.offset; 632 ph->event_offset = self->event_types.offset;
439 ph->event_size = self->event_types.size; 633 ph->event_size = self->event_types.size;
440 ph->data_offset = self->data.offset; 634 ph->data_offset = self->data.offset;
441 ph->data_size = self->data.size; 635 ph->data_size = self->data.size;
442 return 0; 636 return 0;
443} 637}
444 638
445static int perf_file_section__process(struct perf_file_section *self, 639static int perf_file_section__process(struct perf_file_section *self,
640 struct perf_header *ph,
446 int feat, int fd) 641 int feat, int fd)
447{ 642{
448 if (lseek(fd, self->offset, SEEK_SET) < 0) { 643 if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
449 pr_debug("Failed to lseek to %Ld offset for feature %d, " 644 pr_debug("Failed to lseek to %Ld offset for feature %d, "
450 "continuing...\n", self->offset, feat); 645 "continuing...\n", self->offset, feat);
451 return 0; 646 return 0;
@@ -457,7 +652,7 @@ static int perf_file_section__process(struct perf_file_section *self,
457 break; 652 break;
458 653
459 case HEADER_BUILD_ID: 654 case HEADER_BUILD_ID:
460 if (perf_header__read_build_ids(fd, self->offset, self->size)) 655 if (perf_header__read_build_ids(ph, fd, self->offset, self->size))
461 pr_debug("Failed to read buildids, continuing...\n"); 656 pr_debug("Failed to read buildids, continuing...\n");
462 break; 657 break;
463 default: 658 default:
@@ -469,7 +664,7 @@ static int perf_file_section__process(struct perf_file_section *self,
469 664
470int perf_header__read(struct perf_header *self, int fd) 665int perf_header__read(struct perf_header *self, int fd)
471{ 666{
472 struct perf_file_header f_header; 667 struct perf_file_header f_header;
473 struct perf_file_attr f_attr; 668 struct perf_file_attr f_attr;
474 u64 f_id; 669 u64 f_id;
475 int nr_attrs, nr_ids, i, j; 670 int nr_attrs, nr_ids, i, j;
@@ -486,7 +681,9 @@ int perf_header__read(struct perf_header *self, int fd)
486 struct perf_header_attr *attr; 681 struct perf_header_attr *attr;
487 off_t tmp; 682 off_t tmp;
488 683
489 do_read(fd, &f_attr, sizeof(f_attr)); 684 if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr)))
685 goto out_errno;
686
490 tmp = lseek(fd, 0, SEEK_CUR); 687 tmp = lseek(fd, 0, SEEK_CUR);
491 688
492 attr = perf_header_attr__new(&f_attr.attr); 689 attr = perf_header_attr__new(&f_attr.attr);
@@ -497,7 +694,8 @@ int perf_header__read(struct perf_header *self, int fd)
497 lseek(fd, f_attr.ids.offset, SEEK_SET); 694 lseek(fd, f_attr.ids.offset, SEEK_SET);
498 695
499 for (j = 0; j < nr_ids; j++) { 696 for (j = 0; j < nr_ids; j++) {
500 do_read(fd, &f_id, sizeof(f_id)); 697 if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id)))
698 goto out_errno;
501 699
502 if (perf_header_attr__add_id(attr, f_id) < 0) { 700 if (perf_header_attr__add_id(attr, f_id) < 0) {
503 perf_header_attr__delete(attr); 701 perf_header_attr__delete(attr);
@@ -517,7 +715,9 @@ int perf_header__read(struct perf_header *self, int fd)
517 events = malloc(f_header.event_types.size); 715 events = malloc(f_header.event_types.size);
518 if (events == NULL) 716 if (events == NULL)
519 return -ENOMEM; 717 return -ENOMEM;
520 do_read(fd, events, f_header.event_types.size); 718 if (perf_header__getbuffer64(self, fd, events,
719 f_header.event_types.size))
720 goto out_errno;
521 event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); 721 event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
522 } 722 }
523 723
@@ -527,6 +727,8 @@ int perf_header__read(struct perf_header *self, int fd)
527 727
528 self->frozen = 1; 728 self->frozen = 1;
529 return 0; 729 return 0;
730out_errno:
731 return -errno;
530} 732}
531 733
532u64 perf_header__sample_type(struct perf_header *header) 734u64 perf_header__sample_type(struct perf_header *header)
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index d118d05d3abe..82a6af72d4cc 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -5,6 +5,7 @@
5#include <sys/types.h> 5#include <sys/types.h>
6#include <stdbool.h> 6#include <stdbool.h>
7#include "types.h" 7#include "types.h"
8#include "event.h"
8 9
9#include <linux/bitmap.h> 10#include <linux/bitmap.h>
10 11
@@ -52,6 +53,7 @@ struct perf_header {
52 u64 data_size; 53 u64 data_size;
53 u64 event_offset; 54 u64 event_offset;
54 u64 event_size; 55 u64 event_size;
56 bool needs_swap;
55 DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); 57 DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
56}; 58};
57 59
@@ -64,7 +66,7 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit);
64int perf_header__add_attr(struct perf_header *self, 66int perf_header__add_attr(struct perf_header *self,
65 struct perf_header_attr *attr); 67 struct perf_header_attr *attr);
66 68
67void perf_header__push_event(u64 id, const char *name); 69int perf_header__push_event(u64 id, const char *name);
68char *perf_header__find_event(u64 id); 70char *perf_header__find_event(u64 id);
69 71
70struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr); 72struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr);
@@ -80,6 +82,11 @@ bool perf_header__has_feat(const struct perf_header *self, int feat);
80 82
81int perf_header__process_sections(struct perf_header *self, int fd, 83int perf_header__process_sections(struct perf_header *self, int fd,
82 int (*process)(struct perf_file_section *self, 84 int (*process)(struct perf_file_section *self,
85 struct perf_header *ph,
83 int feat, int fd)); 86 int feat, int fd));
84 87
88int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
89 const char *name, bool is_kallsyms);
90int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
91
85#endif /* __PERF_HEADER_H */ 92#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h
new file mode 100644
index 000000000000..201f57397997
--- /dev/null
+++ b/tools/perf/util/include/linux/hash.h
@@ -0,0 +1,5 @@
1#include "../../../../include/linux/hash.h"
2
3#ifndef PERF_HASH_H
4#define PERF_HASH_H
5#endif
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index 21c0274c02fa..f2611655ab51 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -101,5 +101,6 @@ simple_strtoul(const char *nptr, char **endptr, int base)
101 eprintf(n, pr_fmt(fmt), ##__VA_ARGS__) 101 eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
102#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) 102#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
103#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__) 103#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
104#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
104 105
105#endif 106#endif
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index c4d55a0da2ea..e509cd59c67d 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -5,6 +5,11 @@
5#include <stdio.h> 5#include <stdio.h>
6#include "debug.h" 6#include "debug.h"
7 7
8const char *map_type__name[MAP__NR_TYPES] = {
9 [MAP__FUNCTION] = "Functions",
10 [MAP__VARIABLE] = "Variables",
11};
12
8static inline int is_anon_memory(const char *filename) 13static inline int is_anon_memory(const char *filename)
9{ 14{
10 return strcmp(filename, "//anon") == 0; 15 return strcmp(filename, "//anon") == 0;
@@ -68,8 +73,13 @@ struct map *map__new(struct mmap_event *event, enum map_type type,
68 map__init(self, type, event->start, event->start + event->len, 73 map__init(self, type, event->start, event->start + event->len,
69 event->pgoff, dso); 74 event->pgoff, dso);
70 75
71 if (self->dso == vdso || anon) 76 if (anon) {
77set_identity:
72 self->map_ip = self->unmap_ip = identity__map_ip; 78 self->map_ip = self->unmap_ip = identity__map_ip;
79 } else if (strcmp(filename, "[vdso]") == 0) {
80 dso__set_loaded(dso, self->type);
81 goto set_identity;
82 }
73 } 83 }
74 return self; 84 return self;
75out_delete: 85out_delete:
@@ -104,8 +114,7 @@ void map__fixup_end(struct map *self)
104 114
105#define DSO__DELETED "(deleted)" 115#define DSO__DELETED "(deleted)"
106 116
107int map__load(struct map *self, struct perf_session *session, 117int map__load(struct map *self, symbol_filter_t filter)
108 symbol_filter_t filter)
109{ 118{
110 const char *name = self->dso->long_name; 119 const char *name = self->dso->long_name;
111 int nr; 120 int nr;
@@ -113,7 +122,7 @@ int map__load(struct map *self, struct perf_session *session,
113 if (dso__loaded(self->dso, self->type)) 122 if (dso__loaded(self->dso, self->type))
114 return 0; 123 return 0;
115 124
116 nr = dso__load(self->dso, self, session, filter); 125 nr = dso__load(self->dso, self, filter);
117 if (nr < 0) { 126 if (nr < 0) {
118 if (self->dso->has_build_id) { 127 if (self->dso->has_build_id) {
119 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 128 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
@@ -144,24 +153,29 @@ int map__load(struct map *self, struct perf_session *session,
144 153
145 return -1; 154 return -1;
146 } 155 }
156 /*
157 * Only applies to the kernel, as its symtabs aren't relative like the
158 * module ones.
159 */
160 if (self->dso->kernel)
161 map__reloc_vmlinux(self);
147 162
148 return 0; 163 return 0;
149} 164}
150 165
151struct symbol *map__find_symbol(struct map *self, struct perf_session *session, 166struct symbol *map__find_symbol(struct map *self, u64 addr,
152 u64 addr, symbol_filter_t filter) 167 symbol_filter_t filter)
153{ 168{
154 if (map__load(self, session, filter) < 0) 169 if (map__load(self, filter) < 0)
155 return NULL; 170 return NULL;
156 171
157 return dso__find_symbol(self->dso, self->type, addr); 172 return dso__find_symbol(self->dso, self->type, addr);
158} 173}
159 174
160struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 175struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
161 struct perf_session *session,
162 symbol_filter_t filter) 176 symbol_filter_t filter)
163{ 177{
164 if (map__load(self, session, filter) < 0) 178 if (map__load(self, filter) < 0)
165 return NULL; 179 return NULL;
166 180
167 if (!dso__sorted_by_name(self->dso, self->type)) 181 if (!dso__sorted_by_name(self->dso, self->type))
@@ -201,3 +215,23 @@ size_t map__fprintf(struct map *self, FILE *fp)
201 return fprintf(fp, " %Lx-%Lx %Lx %s\n", 215 return fprintf(fp, " %Lx-%Lx %Lx %s\n",
202 self->start, self->end, self->pgoff, self->dso->name); 216 self->start, self->end, self->pgoff, self->dso->name);
203} 217}
218
219/*
220 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
221 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
222 */
223u64 map__rip_2objdump(struct map *map, u64 rip)
224{
225 u64 addr = map->dso->adjust_symbols ?
226 map->unmap_ip(map, rip) : /* RIP -> IP */
227 rip;
228 return addr;
229}
230
231u64 map__objdump_2ip(struct map *map, u64 addr)
232{
233 u64 ip = map->dso->adjust_symbols ?
234 addr :
235 map->unmap_ip(map, addr); /* RIP -> IP */
236 return ip;
237}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
new file mode 100644
index 000000000000..b756368076c6
--- /dev/null
+++ b/tools/perf/util/map.h
@@ -0,0 +1,94 @@
1#ifndef __PERF_MAP_H
2#define __PERF_MAP_H
3
4#include <linux/compiler.h>
5#include <linux/list.h>
6#include <linux/rbtree.h>
7#include <linux/types.h>
8
9enum map_type {
10 MAP__FUNCTION = 0,
11 MAP__VARIABLE,
12};
13
14#define MAP__NR_TYPES (MAP__VARIABLE + 1)
15
16extern const char *map_type__name[MAP__NR_TYPES];
17
18struct dso;
19struct ref_reloc_sym;
20struct map_groups;
21
22struct map {
23 union {
24 struct rb_node rb_node;
25 struct list_head node;
26 };
27 u64 start;
28 u64 end;
29 enum map_type type;
30 u64 pgoff;
31
32 /* ip -> dso rip */
33 u64 (*map_ip)(struct map *, u64);
34 /* dso rip -> ip */
35 u64 (*unmap_ip)(struct map *, u64);
36
37 struct dso *dso;
38};
39
40struct kmap {
41 struct ref_reloc_sym *ref_reloc_sym;
42 struct map_groups *kmaps;
43};
44
45static inline struct kmap *map__kmap(struct map *self)
46{
47 return (struct kmap *)(self + 1);
48}
49
50static inline u64 map__map_ip(struct map *map, u64 ip)
51{
52 return ip - map->start + map->pgoff;
53}
54
55static inline u64 map__unmap_ip(struct map *map, u64 ip)
56{
57 return ip + map->start - map->pgoff;
58}
59
60static inline u64 identity__map_ip(struct map *map __used, u64 ip)
61{
62 return ip;
63}
64
65
66/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
67u64 map__rip_2objdump(struct map *map, u64 rip);
68u64 map__objdump_2ip(struct map *map, u64 addr);
69
70struct symbol;
71struct mmap_event;
72
73typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
74
75void map__init(struct map *self, enum map_type type,
76 u64 start, u64 end, u64 pgoff, struct dso *dso);
77struct map *map__new(struct mmap_event *event, enum map_type,
78 char *cwd, int cwdlen);
79void map__delete(struct map *self);
80struct map *map__clone(struct map *self);
81int map__overlap(struct map *l, struct map *r);
82size_t map__fprintf(struct map *self, FILE *fp);
83
84int map__load(struct map *self, symbol_filter_t filter);
85struct symbol *map__find_symbol(struct map *self,
86 u64 addr, symbol_filter_t filter);
87struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
88 symbol_filter_t filter);
89void map__fixup_start(struct map *self);
90void map__fixup_end(struct map *self);
91
92void map__reloc_vmlinux(struct map *self);
93
94#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index e5bc0fb016b2..05d0c5c2030c 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -450,7 +450,8 @@ parse_single_tracepoint_event(char *sys_name,
450/* sys + ':' + event + ':' + flags*/ 450/* sys + ':' + event + ':' + flags*/
451#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) 451#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
452static enum event_result 452static enum event_result
453parse_subsystem_tracepoint_event(char *sys_name, char *flags) 453parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
454 char *flags)
454{ 455{
455 char evt_path[MAXPATHLEN]; 456 char evt_path[MAXPATHLEN];
456 struct dirent *evt_ent; 457 struct dirent *evt_ent;
@@ -474,6 +475,9 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags)
474 || !strcmp(evt_ent->d_name, "filter")) 475 || !strcmp(evt_ent->d_name, "filter"))
475 continue; 476 continue;
476 477
478 if (!strglobmatch(evt_ent->d_name, evt_exp))
479 continue;
480
477 len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name, 481 len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
478 evt_ent->d_name, flags ? ":" : "", 482 evt_ent->d_name, flags ? ":" : "",
479 flags ?: ""); 483 flags ?: "");
@@ -522,9 +526,10 @@ static enum event_result parse_tracepoint_event(const char **strp,
522 if (evt_length >= MAX_EVENT_LENGTH) 526 if (evt_length >= MAX_EVENT_LENGTH)
523 return EVT_FAILED; 527 return EVT_FAILED;
524 528
525 if (!strcmp(evt_name, "*")) { 529 if (strpbrk(evt_name, "*?")) {
526 *strp = evt_name + evt_length; 530 *strp = evt_name + evt_length;
527 return parse_subsystem_tracepoint_event(sys_name, flags); 531 return parse_multiple_tracepoint_event(sys_name, evt_name,
532 flags);
528 } else 533 } else
529 return parse_single_tracepoint_event(sys_name, evt_name, 534 return parse_single_tracepoint_event(sys_name, evt_name,
530 evt_length, flags, 535 evt_length, flags,
@@ -753,11 +758,11 @@ modifier:
753 return ret; 758 return ret;
754} 759}
755 760
756static void store_event_type(const char *orgname) 761static int store_event_type(const char *orgname)
757{ 762{
758 char filename[PATH_MAX], *c; 763 char filename[PATH_MAX], *c;
759 FILE *file; 764 FILE *file;
760 int id; 765 int id, n;
761 766
762 sprintf(filename, "%s/", debugfs_path); 767 sprintf(filename, "%s/", debugfs_path);
763 strncat(filename, orgname, strlen(orgname)); 768 strncat(filename, orgname, strlen(orgname));
@@ -769,11 +774,14 @@ static void store_event_type(const char *orgname)
769 774
770 file = fopen(filename, "r"); 775 file = fopen(filename, "r");
771 if (!file) 776 if (!file)
772 return; 777 return 0;
773 if (fscanf(file, "%i", &id) < 1) 778 n = fscanf(file, "%i", &id);
774 die("cannot store event ID");
775 fclose(file); 779 fclose(file);
776 perf_header__push_event(id, orgname); 780 if (n < 1) {
781 pr_err("cannot store event ID\n");
782 return -EINVAL;
783 }
784 return perf_header__push_event(id, orgname);
777} 785}
778 786
779int parse_events(const struct option *opt __used, const char *str, int unset __used) 787int parse_events(const struct option *opt __used, const char *str, int unset __used)
@@ -782,7 +790,8 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
782 enum event_result ret; 790 enum event_result ret;
783 791
784 if (strchr(str, ':')) 792 if (strchr(str, ':'))
785 store_event_type(str); 793 if (store_event_type(str) < 0)
794 return -1;
786 795
787 for (;;) { 796 for (;;) {
788 if (nr_counters == MAX_COUNTERS) 797 if (nr_counters == MAX_COUNTERS)
@@ -835,11 +844,12 @@ int parse_filter(const struct option *opt __used, const char *str,
835} 844}
836 845
837static const char * const event_type_descriptors[] = { 846static const char * const event_type_descriptors[] = {
838 "",
839 "Hardware event", 847 "Hardware event",
840 "Software event", 848 "Software event",
841 "Tracepoint event", 849 "Tracepoint event",
842 "Hardware cache event", 850 "Hardware cache event",
851 "Raw hardware event descriptor",
852 "Hardware breakpoint",
843}; 853};
844 854
845/* 855/*
@@ -872,7 +882,7 @@ static void print_tracepoint_events(void)
872 snprintf(evt_path, MAXPATHLEN, "%s:%s", 882 snprintf(evt_path, MAXPATHLEN, "%s:%s",
873 sys_dirent.d_name, evt_dirent.d_name); 883 sys_dirent.d_name, evt_dirent.d_name);
874 printf(" %-42s [%s]\n", evt_path, 884 printf(" %-42s [%s]\n", evt_path,
875 event_type_descriptors[PERF_TYPE_TRACEPOINT+1]); 885 event_type_descriptors[PERF_TYPE_TRACEPOINT]);
876 } 886 }
877 closedir(evt_dir); 887 closedir(evt_dir);
878 } 888 }
@@ -892,9 +902,7 @@ void print_events(void)
892 printf("List of pre-defined events (to be used in -e):\n"); 902 printf("List of pre-defined events (to be used in -e):\n");
893 903
894 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { 904 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
895 type = syms->type + 1; 905 type = syms->type;
896 if (type >= ARRAY_SIZE(event_type_descriptors))
897 type = 0;
898 906
899 if (type != prev_type) 907 if (type != prev_type)
900 printf("\n"); 908 printf("\n");
@@ -919,17 +927,19 @@ void print_events(void)
919 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 927 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
920 printf(" %-42s [%s]\n", 928 printf(" %-42s [%s]\n",
921 event_cache_name(type, op, i), 929 event_cache_name(type, op, i),
922 event_type_descriptors[4]); 930 event_type_descriptors[PERF_TYPE_HW_CACHE]);
923 } 931 }
924 } 932 }
925 } 933 }
926 934
927 printf("\n"); 935 printf("\n");
928 printf(" %-42s [raw hardware event descriptor]\n", 936 printf(" %-42s [%s]\n",
929 "rNNN"); 937 "rNNN", event_type_descriptors[PERF_TYPE_RAW]);
930 printf("\n"); 938 printf("\n");
931 939
932 printf(" %-42s [hardware breakpoint]\n", "mem:<addr>[:access]"); 940 printf(" %-42s [%s]\n",
941 "mem:<addr>[:access]",
942 event_type_descriptors[PERF_TYPE_BREAKPOINT]);
933 printf("\n"); 943 printf("\n");
934 944
935 print_tracepoint_events(); 945 print_tracepoint_events();
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index fde17b090a47..a21b6bf055ac 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -37,6 +37,8 @@
37#include "string.h" 37#include "string.h"
38#include "strlist.h" 38#include "strlist.h"
39#include "debug.h" 39#include "debug.h"
40#include "cache.h"
41#include "color.h"
40#include "parse-events.h" /* For debugfs_path */ 42#include "parse-events.h" /* For debugfs_path */
41#include "probe-event.h" 43#include "probe-event.h"
42 44
@@ -62,6 +64,42 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
62 return ret; 64 return ret;
63} 65}
64 66
67void parse_line_range_desc(const char *arg, struct line_range *lr)
68{
69 const char *ptr;
70 char *tmp;
71 /*
72 * <Syntax>
73 * SRC:SLN[+NUM|-ELN]
74 * FUNC[:SLN[+NUM|-ELN]]
75 */
76 ptr = strchr(arg, ':');
77 if (ptr) {
78 lr->start = (unsigned int)strtoul(ptr + 1, &tmp, 0);
79 if (*tmp == '+')
80 lr->end = lr->start + (unsigned int)strtoul(tmp + 1,
81 &tmp, 0);
82 else if (*tmp == '-')
83 lr->end = (unsigned int)strtoul(tmp + 1, &tmp, 0);
84 else
85 lr->end = 0;
86 pr_debug("Line range is %u to %u\n", lr->start, lr->end);
87 if (lr->end && lr->start > lr->end)
88 semantic_error("Start line must be smaller"
89 " than end line.");
90 if (*tmp != '\0')
91 semantic_error("Tailing with invalid character '%d'.",
92 *tmp);
93 tmp = strndup(arg, (ptr - arg));
94 } else
95 tmp = strdup(arg);
96
97 if (strchr(tmp, '.'))
98 lr->file = tmp;
99 else
100 lr->function = tmp;
101}
102
65/* Check the name is good for event/group */ 103/* Check the name is good for event/group */
66static bool check_event_name(const char *name) 104static bool check_event_name(const char *name)
67{ 105{
@@ -370,7 +408,7 @@ static int open_kprobe_events(int flags, int mode)
370 if (ret < 0) { 408 if (ret < 0) {
371 if (errno == ENOENT) 409 if (errno == ENOENT)
372 die("kprobe_events file does not exist -" 410 die("kprobe_events file does not exist -"
373 " please rebuild with CONFIG_KPROBE_TRACER."); 411 " please rebuild with CONFIG_KPROBE_EVENT.");
374 else 412 else
375 die("Could not open kprobe_events file: %s", 413 die("Could not open kprobe_events file: %s",
376 strerror(errno)); 414 strerror(errno));
@@ -457,7 +495,9 @@ void show_perf_probe_events(void)
457 struct strlist *rawlist; 495 struct strlist *rawlist;
458 struct str_node *ent; 496 struct str_node *ent;
459 497
498 setup_pager();
460 memset(&pp, 0, sizeof(pp)); 499 memset(&pp, 0, sizeof(pp));
500
461 fd = open_kprobe_events(O_RDONLY, 0); 501 fd = open_kprobe_events(O_RDONLY, 0);
462 rawlist = get_trace_kprobe_event_rawlist(fd); 502 rawlist = get_trace_kprobe_event_rawlist(fd);
463 close(fd); 503 close(fd);
@@ -678,3 +718,66 @@ void del_trace_kprobe_events(struct strlist *dellist)
678 close(fd); 718 close(fd);
679} 719}
680 720
721#define LINEBUF_SIZE 256
722
723static void show_one_line(FILE *fp, unsigned int l, bool skip, bool show_num)
724{
725 char buf[LINEBUF_SIZE];
726 const char *color = PERF_COLOR_BLUE;
727
728 if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
729 goto error;
730 if (!skip) {
731 if (show_num)
732 fprintf(stdout, "%7u %s", l, buf);
733 else
734 color_fprintf(stdout, color, " %s", buf);
735 }
736
737 while (strlen(buf) == LINEBUF_SIZE - 1 &&
738 buf[LINEBUF_SIZE - 2] != '\n') {
739 if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
740 goto error;
741 if (!skip) {
742 if (show_num)
743 fprintf(stdout, "%s", buf);
744 else
745 color_fprintf(stdout, color, "%s", buf);
746 }
747 }
748 return;
749error:
750 if (feof(fp))
751 die("Source file is shorter than expected.");
752 else
753 die("File read error: %s", strerror(errno));
754}
755
756void show_line_range(struct line_range *lr)
757{
758 unsigned int l = 1;
759 struct line_node *ln;
760 FILE *fp;
761
762 setup_pager();
763
764 if (lr->function)
765 fprintf(stdout, "<%s:%d>\n", lr->function,
766 lr->start - lr->offset);
767 else
768 fprintf(stdout, "<%s:%d>\n", lr->file, lr->start);
769
770 fp = fopen(lr->path, "r");
771 if (fp == NULL)
772 die("Failed to open %s: %s", lr->path, strerror(errno));
773 /* Skip to starting line number */
774 while (l < lr->start)
775 show_one_line(fp, l++, true, false);
776
777 list_for_each_entry(ln, &lr->line_list, list) {
778 while (ln->line > l)
779 show_one_line(fp, (l++) - lr->offset, false, false);
780 show_one_line(fp, (l++) - lr->offset, false, true);
781 }
782 fclose(fp);
783}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 7f1d499118c0..711287d4baea 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -5,6 +5,7 @@
5#include "probe-finder.h" 5#include "probe-finder.h"
6#include "strlist.h" 6#include "strlist.h"
7 7
8extern void parse_line_range_desc(const char *arg, struct line_range *lr);
8extern void parse_perf_probe_event(const char *str, struct probe_point *pp, 9extern void parse_perf_probe_event(const char *str, struct probe_point *pp,
9 bool *need_dwarf); 10 bool *need_dwarf);
10extern int synthesize_perf_probe_point(struct probe_point *pp); 11extern int synthesize_perf_probe_point(struct probe_point *pp);
@@ -15,6 +16,7 @@ extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes,
15 bool force_add); 16 bool force_add);
16extern void del_trace_kprobe_events(struct strlist *dellist); 17extern void del_trace_kprobe_events(struct strlist *dellist);
17extern void show_perf_probe_events(void); 18extern void show_perf_probe_events(void);
19extern void show_line_range(struct line_range *lr);
18 20
19/* Maximum index number of event-name postfix */ 21/* Maximum index number of event-name postfix */
20#define MAX_EVENT_INDEX 1024 22#define MAX_EVENT_INDEX 1024
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 4b852c0d16a5..1b2124d12f68 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -140,6 +140,31 @@ static Dwarf_Unsigned cu_find_fileno(Dwarf_Die cu_die, const char *fname)
140 return found; 140 return found;
141} 141}
142 142
143static int cu_get_filename(Dwarf_Die cu_die, Dwarf_Unsigned fno, char **buf)
144{
145 Dwarf_Signed cnt, i;
146 char **srcs;
147 int ret = 0;
148
149 if (!buf || !fno)
150 return -EINVAL;
151
152 ret = dwarf_srcfiles(cu_die, &srcs, &cnt, &__dw_error);
153 if (ret == DW_DLV_OK) {
154 if ((Dwarf_Unsigned)cnt > fno - 1) {
155 *buf = strdup(srcs[fno - 1]);
156 ret = 0;
157 pr_debug("found filename: %s\n", *buf);
158 } else
159 ret = -ENOENT;
160 for (i = 0; i < cnt; i++)
161 dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING);
162 dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST);
163 } else
164 ret = -EINVAL;
165 return ret;
166}
167
143/* Compare diename and tname */ 168/* Compare diename and tname */
144static int die_compare_name(Dwarf_Die dw_die, const char *tname) 169static int die_compare_name(Dwarf_Die dw_die, const char *tname)
145{ 170{
@@ -402,11 +427,11 @@ static void show_location(Dwarf_Loc *loc, struct probe_finder *pf)
402 } else if (op == DW_OP_regx) { 427 } else if (op == DW_OP_regx) {
403 regn = loc->lr_number; 428 regn = loc->lr_number;
404 } else 429 } else
405 die("Dwarf_OP %d is not supported.\n", op); 430 die("Dwarf_OP %d is not supported.", op);
406 431
407 regs = get_arch_regstr(regn); 432 regs = get_arch_regstr(regn);
408 if (!regs) 433 if (!regs)
409 die("%lld exceeds max register number.\n", regn); 434 die("%lld exceeds max register number.", regn);
410 435
411 if (deref) 436 if (deref)
412 ret = snprintf(pf->buf, pf->len, 437 ret = snprintf(pf->buf, pf->len,
@@ -438,7 +463,7 @@ static void show_variable(Dwarf_Die vr_die, struct probe_finder *pf)
438 return ; 463 return ;
439error: 464error:
440 die("Failed to find the location of %s at this address.\n" 465 die("Failed to find the location of %s at this address.\n"
441 " Perhaps, it has been optimized out.\n", pf->var); 466 " Perhaps, it has been optimized out.", pf->var);
442} 467}
443 468
444static int variable_callback(struct die_link *dlink, void *data) 469static int variable_callback(struct die_link *dlink, void *data)
@@ -476,7 +501,7 @@ static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf)
476 /* Search child die for local variables and parameters. */ 501 /* Search child die for local variables and parameters. */
477 ret = search_die_from_children(sp_die, variable_callback, pf); 502 ret = search_die_from_children(sp_die, variable_callback, pf);
478 if (!ret) 503 if (!ret)
479 die("Failed to find '%s' in this function.\n", pf->var); 504 die("Failed to find '%s' in this function.", pf->var);
480} 505}
481 506
482/* Get a frame base on the address */ 507/* Get a frame base on the address */
@@ -567,7 +592,7 @@ static int probeaddr_callback(struct die_link *dlink, void *data)
567} 592}
568 593
569/* Find probe point from its line number */ 594/* Find probe point from its line number */
570static void find_by_line(struct probe_finder *pf) 595static void find_probe_point_by_line(struct probe_finder *pf)
571{ 596{
572 Dwarf_Signed cnt, i, clm; 597 Dwarf_Signed cnt, i, clm;
573 Dwarf_Line *lines; 598 Dwarf_Line *lines;
@@ -602,7 +627,7 @@ static void find_by_line(struct probe_finder *pf)
602 ret = search_die_from_children(pf->cu_die, 627 ret = search_die_from_children(pf->cu_die,
603 probeaddr_callback, pf); 628 probeaddr_callback, pf);
604 if (ret == 0) 629 if (ret == 0)
605 die("Probe point is not found in subprograms.\n"); 630 die("Probe point is not found in subprograms.");
606 /* Continuing, because target line might be inlined. */ 631 /* Continuing, because target line might be inlined. */
607 } 632 }
608 dwarf_srclines_dealloc(__dw_debug, lines, cnt); 633 dwarf_srclines_dealloc(__dw_debug, lines, cnt);
@@ -626,7 +651,7 @@ static int probefunc_callback(struct die_link *dlink, void *data)
626 pf->fno = die_get_decl_file(dlink->die); 651 pf->fno = die_get_decl_file(dlink->die);
627 pf->lno = die_get_decl_line(dlink->die) 652 pf->lno = die_get_decl_line(dlink->die)
628 + pp->line; 653 + pp->line;
629 find_by_line(pf); 654 find_probe_point_by_line(pf);
630 return 1; 655 return 1;
631 } 656 }
632 if (die_inlined_subprogram(dlink->die)) { 657 if (die_inlined_subprogram(dlink->die)) {
@@ -661,7 +686,7 @@ static int probefunc_callback(struct die_link *dlink, void *data)
661 !die_inlined_subprogram(lk->die)) 686 !die_inlined_subprogram(lk->die))
662 goto found; 687 goto found;
663 } 688 }
664 die("Failed to find real subprogram.\n"); 689 die("Failed to find real subprogram.");
665found: 690found:
666 /* Get offset from subprogram */ 691 /* Get offset from subprogram */
667 ret = die_within_subprogram(lk->die, pf->addr, &offs); 692 ret = die_within_subprogram(lk->die, pf->addr, &offs);
@@ -673,7 +698,7 @@ found:
673 return 0; 698 return 0;
674} 699}
675 700
676static void find_by_func(struct probe_finder *pf) 701static void find_probe_point_by_func(struct probe_finder *pf)
677{ 702{
678 search_die_from_children(pf->cu_die, probefunc_callback, pf); 703 search_die_from_children(pf->cu_die, probefunc_callback, pf);
679} 704}
@@ -714,10 +739,10 @@ int find_probepoint(int fd, struct probe_point *pp)
714 if (ret == DW_DLV_NO_ENTRY) 739 if (ret == DW_DLV_NO_ENTRY)
715 pf.cu_base = 0; 740 pf.cu_base = 0;
716 if (pp->function) 741 if (pp->function)
717 find_by_func(&pf); 742 find_probe_point_by_func(&pf);
718 else { 743 else {
719 pf.lno = pp->line; 744 pf.lno = pp->line;
720 find_by_line(&pf); 745 find_probe_point_by_line(&pf);
721 } 746 }
722 } 747 }
723 dwarf_dealloc(__dw_debug, pf.cu_die, DW_DLA_DIE); 748 dwarf_dealloc(__dw_debug, pf.cu_die, DW_DLA_DIE);
@@ -728,3 +753,159 @@ int find_probepoint(int fd, struct probe_point *pp)
728 return pp->found; 753 return pp->found;
729} 754}
730 755
756
757static void line_range_add_line(struct line_range *lr, unsigned int line)
758{
759 struct line_node *ln;
760 struct list_head *p;
761
762 /* Reverse search, because new line will be the last one */
763 list_for_each_entry_reverse(ln, &lr->line_list, list) {
764 if (ln->line < line) {
765 p = &ln->list;
766 goto found;
767 } else if (ln->line == line) /* Already exist */
768 return ;
769 }
770 /* List is empty, or the smallest entry */
771 p = &lr->line_list;
772found:
773 pr_debug("Debug: add a line %u\n", line);
774 ln = zalloc(sizeof(struct line_node));
775 DIE_IF(ln == NULL);
776 ln->line = line;
777 INIT_LIST_HEAD(&ln->list);
778 list_add(&ln->list, p);
779}
780
781/* Find line range from its line number */
782static void find_line_range_by_line(struct line_finder *lf)
783{
784 Dwarf_Signed cnt, i;
785 Dwarf_Line *lines;
786 Dwarf_Unsigned lineno = 0;
787 Dwarf_Unsigned fno;
788 Dwarf_Addr addr;
789 int ret;
790
791 ret = dwarf_srclines(lf->cu_die, &lines, &cnt, &__dw_error);
792 DIE_IF(ret != DW_DLV_OK);
793
794 for (i = 0; i < cnt; i++) {
795 ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error);
796 DIE_IF(ret != DW_DLV_OK);
797 if (fno != lf->fno)
798 continue;
799
800 ret = dwarf_lineno(lines[i], &lineno, &__dw_error);
801 DIE_IF(ret != DW_DLV_OK);
802 if (lf->lno_s > lineno || lf->lno_e < lineno)
803 continue;
804
805 /* Filter line in the function address range */
806 if (lf->addr_s && lf->addr_e) {
807 ret = dwarf_lineaddr(lines[i], &addr, &__dw_error);
808 DIE_IF(ret != DW_DLV_OK);
809 if (lf->addr_s > addr || lf->addr_e <= addr)
810 continue;
811 }
812 line_range_add_line(lf->lr, (unsigned int)lineno);
813 }
814 dwarf_srclines_dealloc(__dw_debug, lines, cnt);
815 if (!list_empty(&lf->lr->line_list))
816 lf->found = 1;
817}
818
819/* Search function from function name */
820static int linefunc_callback(struct die_link *dlink, void *data)
821{
822 struct line_finder *lf = (struct line_finder *)data;
823 struct line_range *lr = lf->lr;
824 Dwarf_Half tag;
825 int ret;
826
827 ret = dwarf_tag(dlink->die, &tag, &__dw_error);
828 DIE_IF(ret == DW_DLV_ERROR);
829 if (tag == DW_TAG_subprogram &&
830 die_compare_name(dlink->die, lr->function) == 0) {
831 /* Get the address range of this function */
832 ret = dwarf_highpc(dlink->die, &lf->addr_e, &__dw_error);
833 if (ret == DW_DLV_OK)
834 ret = dwarf_lowpc(dlink->die, &lf->addr_s, &__dw_error);
835 DIE_IF(ret == DW_DLV_ERROR);
836 if (ret == DW_DLV_NO_ENTRY) {
837 lf->addr_s = 0;
838 lf->addr_e = 0;
839 }
840
841 lf->fno = die_get_decl_file(dlink->die);
842 lr->offset = die_get_decl_line(dlink->die);;
843 lf->lno_s = lr->offset + lr->start;
844 if (!lr->end)
845 lf->lno_e = (Dwarf_Unsigned)-1;
846 else
847 lf->lno_e = lr->offset + lr->end;
848 lr->start = lf->lno_s;
849 lr->end = lf->lno_e;
850 find_line_range_by_line(lf);
851 /* If we find a target function, this should be end. */
852 lf->found = 1;
853 return 1;
854 }
855 return 0;
856}
857
858static void find_line_range_by_func(struct line_finder *lf)
859{
860 search_die_from_children(lf->cu_die, linefunc_callback, lf);
861}
862
863int find_line_range(int fd, struct line_range *lr)
864{
865 Dwarf_Half addr_size = 0;
866 Dwarf_Unsigned next_cuh = 0;
867 int ret;
868 struct line_finder lf = {.lr = lr};
869
870 ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error);
871 if (ret != DW_DLV_OK)
872 return -ENOENT;
873
874 while (!lf.found) {
875 /* Search CU (Compilation Unit) */
876 ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL,
877 &addr_size, &next_cuh, &__dw_error);
878 DIE_IF(ret == DW_DLV_ERROR);
879 if (ret == DW_DLV_NO_ENTRY)
880 break;
881
882 /* Get the DIE(Debugging Information Entry) of this CU */
883 ret = dwarf_siblingof(__dw_debug, 0, &lf.cu_die, &__dw_error);
884 DIE_IF(ret != DW_DLV_OK);
885
886 /* Check if target file is included. */
887 if (lr->file)
888 lf.fno = cu_find_fileno(lf.cu_die, lr->file);
889
890 if (!lr->file || lf.fno) {
891 if (lr->function)
892 find_line_range_by_func(&lf);
893 else {
894 lf.lno_s = lr->start;
895 if (!lr->end)
896 lf.lno_e = (Dwarf_Unsigned)-1;
897 else
898 lf.lno_e = lr->end;
899 find_line_range_by_line(&lf);
900 }
901 /* Get the real file path */
902 if (lf.found)
903 cu_get_filename(lf.cu_die, lf.fno, &lr->path);
904 }
905 dwarf_dealloc(__dw_debug, lf.cu_die, DW_DLA_DIE);
906 }
907 ret = dwarf_finish(__dw_debug, &__dw_error);
908 DIE_IF(ret != DW_DLV_OK);
909 return lf.found;
910}
911
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index a4086aaddb73..972b386116f1 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -1,6 +1,8 @@
1#ifndef _PROBE_FINDER_H 1#ifndef _PROBE_FINDER_H
2#define _PROBE_FINDER_H 2#define _PROBE_FINDER_H
3 3
4#include "util.h"
5
4#define MAX_PATH_LEN 256 6#define MAX_PATH_LEN 256
5#define MAX_PROBE_BUFFER 1024 7#define MAX_PROBE_BUFFER 1024
6#define MAX_PROBES 128 8#define MAX_PROBES 128
@@ -32,8 +34,26 @@ struct probe_point {
32 char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/ 34 char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/
33}; 35};
34 36
37/* Line number container */
38struct line_node {
39 struct list_head list;
40 unsigned int line;
41};
42
43/* Line range */
44struct line_range {
45 char *file; /* File name */
46 char *function; /* Function name */
47 unsigned int start; /* Start line number */
48 unsigned int end; /* End line number */
49 unsigned int offset; /* Start line offset */
50 char *path; /* Real path name */
51 struct list_head line_list; /* Visible lines */
52};
53
35#ifndef NO_LIBDWARF 54#ifndef NO_LIBDWARF
36extern int find_probepoint(int fd, struct probe_point *pp); 55extern int find_probepoint(int fd, struct probe_point *pp);
56extern int find_line_range(int fd, struct line_range *lr);
37 57
38/* Workaround for undefined _MIPS_SZLONG bug in libdwarf.h: */ 58/* Workaround for undefined _MIPS_SZLONG bug in libdwarf.h: */
39#ifndef _MIPS_SZLONG 59#ifndef _MIPS_SZLONG
@@ -60,6 +80,19 @@ struct probe_finder {
60 char *buf; /* Current output buffer */ 80 char *buf; /* Current output buffer */
61 int len; /* Length of output buffer */ 81 int len; /* Length of output buffer */
62}; 82};
83
84struct line_finder {
85 struct line_range *lr; /* Target line range */
86
87 Dwarf_Unsigned fno; /* File number */
88 Dwarf_Unsigned lno_s; /* Start line number */
89 Dwarf_Unsigned lno_e; /* End line number */
90 Dwarf_Addr addr_s; /* Start address */
91 Dwarf_Addr addr_e; /* End address */
92 Dwarf_Die cu_die; /* Current CU */
93 int found;
94};
95
63#endif /* NO_LIBDWARF */ 96#endif /* NO_LIBDWARF */
64 97
65#endif /*_PROBE_FINDER_H */ 98#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 6d6d76b8a21e..5376378e0cfc 100644
--- a/tools/perf/util/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -25,10 +25,16 @@
25#include <ctype.h> 25#include <ctype.h>
26#include <errno.h> 26#include <errno.h>
27 27
28#include "../perf.h" 28#include "../../perf.h"
29#include "util.h" 29#include "../util.h"
30#include "trace-event.h" 30#include "../trace-event.h"
31#include "trace-event-perl.h" 31
32#include <EXTERN.h>
33#include <perl.h>
34
35void boot_Perf__Trace__Context(pTHX_ CV *cv);
36void boot_DynaLoader(pTHX_ CV *cv);
37typedef PerlInterpreter * INTERP;
32 38
33void xs_init(pTHX); 39void xs_init(pTHX);
34 40
@@ -49,7 +55,7 @@ INTERP my_perl;
49 55
50struct event *events[FTRACE_MAX_EVENT]; 56struct event *events[FTRACE_MAX_EVENT];
51 57
52static struct scripting_context *scripting_context; 58extern struct scripting_context *scripting_context;
53 59
54static char *cur_field_name; 60static char *cur_field_name;
55static int zero_flag_atom; 61static int zero_flag_atom;
@@ -239,33 +245,6 @@ static inline struct event *find_cache_event(int type)
239 return event; 245 return event;
240} 246}
241 247
242int common_pc(struct scripting_context *context)
243{
244 int pc;
245
246 pc = parse_common_pc(context->event_data);
247
248 return pc;
249}
250
251int common_flags(struct scripting_context *context)
252{
253 int flags;
254
255 flags = parse_common_flags(context->event_data);
256
257 return flags;
258}
259
260int common_lock_depth(struct scripting_context *context)
261{
262 int lock_depth;
263
264 lock_depth = parse_common_lock_depth(context->event_data);
265
266 return lock_depth;
267}
268
269static void perl_process_event(int cpu, void *data, 248static void perl_process_event(int cpu, void *data,
270 int size __unused, 249 int size __unused,
271 unsigned long long nsecs, char *comm) 250 unsigned long long nsecs, char *comm)
@@ -587,75 +566,3 @@ struct scripting_ops perl_scripting_ops = {
587 .process_event = perl_process_event, 566 .process_event = perl_process_event,
588 .generate_script = perl_generate_script, 567 .generate_script = perl_generate_script,
589}; 568};
590
591static void print_unsupported_msg(void)
592{
593 fprintf(stderr, "Perl scripting not supported."
594 " Install libperl and rebuild perf to enable it.\n"
595 "For example:\n # apt-get install libperl-dev (ubuntu)"
596 "\n # yum install perl-ExtUtils-Embed (Fedora)"
597 "\n etc.\n");
598}
599
600static int perl_start_script_unsupported(const char *script __unused,
601 int argc __unused,
602 const char **argv __unused)
603{
604 print_unsupported_msg();
605
606 return -1;
607}
608
609static int perl_stop_script_unsupported(void)
610{
611 return 0;
612}
613
614static void perl_process_event_unsupported(int cpu __unused,
615 void *data __unused,
616 int size __unused,
617 unsigned long long nsecs __unused,
618 char *comm __unused)
619{
620}
621
622static int perl_generate_script_unsupported(const char *outfile __unused)
623{
624 print_unsupported_msg();
625
626 return -1;
627}
628
629struct scripting_ops perl_scripting_unsupported_ops = {
630 .name = "Perl",
631 .start_script = perl_start_script_unsupported,
632 .stop_script = perl_stop_script_unsupported,
633 .process_event = perl_process_event_unsupported,
634 .generate_script = perl_generate_script_unsupported,
635};
636
637static void register_perl_scripting(struct scripting_ops *scripting_ops)
638{
639 int err;
640 err = script_spec_register("Perl", scripting_ops);
641 if (err)
642 die("error registering Perl script extension");
643
644 err = script_spec_register("pl", scripting_ops);
645 if (err)
646 die("error registering pl script extension");
647
648 scripting_context = malloc(sizeof(struct scripting_context));
649}
650
651#ifdef NO_LIBPERL
652void setup_perl_scripting(void)
653{
654 register_perl_scripting(&perl_scripting_unsupported_ops);
655}
656#else
657void setup_perl_scripting(void)
658{
659 register_perl_scripting(&perl_scripting_ops);
660}
661#endif
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
new file mode 100644
index 000000000000..33a414bbba3e
--- /dev/null
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -0,0 +1,573 @@
1/*
2 * trace-event-python. Feed trace events to an embedded Python interpreter.
3 *
4 * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <Python.h>
23
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <ctype.h>
28#include <errno.h>
29
30#include "../../perf.h"
31#include "../util.h"
32#include "../trace-event.h"
33
34PyMODINIT_FUNC initperf_trace_context(void);
35
36#define FTRACE_MAX_EVENT \
37 ((1 << (sizeof(unsigned short) * 8)) - 1)
38
39struct event *events[FTRACE_MAX_EVENT];
40
41#define MAX_FIELDS 64
42#define N_COMMON_FIELDS 7
43
44extern struct scripting_context *scripting_context;
45
46static char *cur_field_name;
47static int zero_flag_atom;
48
49static PyObject *main_module, *main_dict;
50
51static void handler_call_die(const char *handler_name)
52{
53 PyErr_Print();
54 Py_FatalError("problem in Python trace event handler");
55}
56
57static void define_value(enum print_arg_type field_type,
58 const char *ev_name,
59 const char *field_name,
60 const char *field_value,
61 const char *field_str)
62{
63 const char *handler_name = "define_flag_value";
64 PyObject *handler, *t, *retval;
65 unsigned long long value;
66 unsigned n = 0;
67
68 if (field_type == PRINT_SYMBOL)
69 handler_name = "define_symbolic_value";
70
71 t = PyTuple_New(4);
72 if (!t)
73 Py_FatalError("couldn't create Python tuple");
74
75 value = eval_flag(field_value);
76
77 PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
78 PyTuple_SetItem(t, n++, PyString_FromString(field_name));
79 PyTuple_SetItem(t, n++, PyInt_FromLong(value));
80 PyTuple_SetItem(t, n++, PyString_FromString(field_str));
81
82 handler = PyDict_GetItemString(main_dict, handler_name);
83 if (handler && PyCallable_Check(handler)) {
84 retval = PyObject_CallObject(handler, t);
85 if (retval == NULL)
86 handler_call_die(handler_name);
87 }
88
89 Py_DECREF(t);
90}
91
92static void define_values(enum print_arg_type field_type,
93 struct print_flag_sym *field,
94 const char *ev_name,
95 const char *field_name)
96{
97 define_value(field_type, ev_name, field_name, field->value,
98 field->str);
99
100 if (field->next)
101 define_values(field_type, field->next, ev_name, field_name);
102}
103
104static void define_field(enum print_arg_type field_type,
105 const char *ev_name,
106 const char *field_name,
107 const char *delim)
108{
109 const char *handler_name = "define_flag_field";
110 PyObject *handler, *t, *retval;
111 unsigned n = 0;
112
113 if (field_type == PRINT_SYMBOL)
114 handler_name = "define_symbolic_field";
115
116 if (field_type == PRINT_FLAGS)
117 t = PyTuple_New(3);
118 else
119 t = PyTuple_New(2);
120 if (!t)
121 Py_FatalError("couldn't create Python tuple");
122
123 PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
124 PyTuple_SetItem(t, n++, PyString_FromString(field_name));
125 if (field_type == PRINT_FLAGS)
126 PyTuple_SetItem(t, n++, PyString_FromString(delim));
127
128 handler = PyDict_GetItemString(main_dict, handler_name);
129 if (handler && PyCallable_Check(handler)) {
130 retval = PyObject_CallObject(handler, t);
131 if (retval == NULL)
132 handler_call_die(handler_name);
133 }
134
135 Py_DECREF(t);
136}
137
138static void define_event_symbols(struct event *event,
139 const char *ev_name,
140 struct print_arg *args)
141{
142 switch (args->type) {
143 case PRINT_NULL:
144 break;
145 case PRINT_ATOM:
146 define_value(PRINT_FLAGS, ev_name, cur_field_name, "0",
147 args->atom.atom);
148 zero_flag_atom = 0;
149 break;
150 case PRINT_FIELD:
151 if (cur_field_name)
152 free(cur_field_name);
153 cur_field_name = strdup(args->field.name);
154 break;
155 case PRINT_FLAGS:
156 define_event_symbols(event, ev_name, args->flags.field);
157 define_field(PRINT_FLAGS, ev_name, cur_field_name,
158 args->flags.delim);
159 define_values(PRINT_FLAGS, args->flags.flags, ev_name,
160 cur_field_name);
161 break;
162 case PRINT_SYMBOL:
163 define_event_symbols(event, ev_name, args->symbol.field);
164 define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL);
165 define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
166 cur_field_name);
167 break;
168 case PRINT_STRING:
169 break;
170 case PRINT_TYPE:
171 define_event_symbols(event, ev_name, args->typecast.item);
172 break;
173 case PRINT_OP:
174 if (strcmp(args->op.op, ":") == 0)
175 zero_flag_atom = 1;
176 define_event_symbols(event, ev_name, args->op.left);
177 define_event_symbols(event, ev_name, args->op.right);
178 break;
179 default:
180 /* we should warn... */
181 return;
182 }
183
184 if (args->next)
185 define_event_symbols(event, ev_name, args->next);
186}
187
188static inline struct event *find_cache_event(int type)
189{
190 static char ev_name[256];
191 struct event *event;
192
193 if (events[type])
194 return events[type];
195
196 events[type] = event = trace_find_event(type);
197 if (!event)
198 return NULL;
199
200 sprintf(ev_name, "%s__%s", event->system, event->name);
201
202 define_event_symbols(event, ev_name, event->print_fmt.args);
203
204 return event;
205}
206
207static void python_process_event(int cpu, void *data,
208 int size __unused,
209 unsigned long long nsecs, char *comm)
210{
211 PyObject *handler, *retval, *context, *t;
212 static char handler_name[256];
213 struct format_field *field;
214 unsigned long long val;
215 unsigned long s, ns;
216 struct event *event;
217 unsigned n = 0;
218 int type;
219 int pid;
220
221 t = PyTuple_New(MAX_FIELDS);
222 if (!t)
223 Py_FatalError("couldn't create Python tuple");
224
225 type = trace_parse_common_type(data);
226
227 event = find_cache_event(type);
228 if (!event)
229 die("ug! no event found for type %d", type);
230
231 pid = trace_parse_common_pid(data);
232
233 sprintf(handler_name, "%s__%s", event->system, event->name);
234
235 s = nsecs / NSECS_PER_SEC;
236 ns = nsecs - s * NSECS_PER_SEC;
237
238 scripting_context->event_data = data;
239
240 context = PyCObject_FromVoidPtr(scripting_context, NULL);
241
242 PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
243 PyTuple_SetItem(t, n++,
244 PyCObject_FromVoidPtr(scripting_context, NULL));
245 PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
246 PyTuple_SetItem(t, n++, PyInt_FromLong(s));
247 PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
248 PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
249 PyTuple_SetItem(t, n++, PyString_FromString(comm));
250
251 for (field = event->format.fields; field; field = field->next) {
252 if (field->flags & FIELD_IS_STRING) {
253 int offset;
254 if (field->flags & FIELD_IS_DYNAMIC) {
255 offset = *(int *)(data + field->offset);
256 offset &= 0xffff;
257 } else
258 offset = field->offset;
259 PyTuple_SetItem(t, n++,
260 PyString_FromString((char *)data + offset));
261 } else { /* FIELD_IS_NUMERIC */
262 val = read_size(data + field->offset, field->size);
263 if (field->flags & FIELD_IS_SIGNED) {
264 PyTuple_SetItem(t, n++, PyInt_FromLong(val));
265 } else {
266 PyTuple_SetItem(t, n++, PyInt_FromLong(val));
267 }
268 }
269 }
270
271 if (_PyTuple_Resize(&t, n) == -1)
272 Py_FatalError("error resizing Python tuple");
273
274 handler = PyDict_GetItemString(main_dict, handler_name);
275 if (handler && PyCallable_Check(handler)) {
276 retval = PyObject_CallObject(handler, t);
277 if (retval == NULL)
278 handler_call_die(handler_name);
279 } else {
280 handler = PyDict_GetItemString(main_dict, "trace_unhandled");
281 if (handler && PyCallable_Check(handler)) {
282 if (_PyTuple_Resize(&t, N_COMMON_FIELDS) == -1)
283 Py_FatalError("error resizing Python tuple");
284
285 retval = PyObject_CallObject(handler, t);
286 if (retval == NULL)
287 handler_call_die("trace_unhandled");
288 }
289 }
290
291 Py_DECREF(t);
292}
293
294static int run_start_sub(void)
295{
296 PyObject *handler, *retval;
297 int err = 0;
298
299 main_module = PyImport_AddModule("__main__");
300 if (main_module == NULL)
301 return -1;
302 Py_INCREF(main_module);
303
304 main_dict = PyModule_GetDict(main_module);
305 if (main_dict == NULL) {
306 err = -1;
307 goto error;
308 }
309 Py_INCREF(main_dict);
310
311 handler = PyDict_GetItemString(main_dict, "trace_begin");
312 if (handler == NULL || !PyCallable_Check(handler))
313 goto out;
314
315 retval = PyObject_CallObject(handler, NULL);
316 if (retval == NULL)
317 handler_call_die("trace_begin");
318
319 Py_DECREF(retval);
320 return err;
321error:
322 Py_XDECREF(main_dict);
323 Py_XDECREF(main_module);
324out:
325 return err;
326}
327
328/*
329 * Start trace script
330 */
331static int python_start_script(const char *script, int argc, const char **argv)
332{
333 const char **command_line;
334 char buf[PATH_MAX];
335 int i, err = 0;
336 FILE *fp;
337
338 command_line = malloc((argc + 1) * sizeof(const char *));
339 command_line[0] = script;
340 for (i = 1; i < argc + 1; i++)
341 command_line[i] = argv[i - 1];
342
343 Py_Initialize();
344
345 initperf_trace_context();
346
347 PySys_SetArgv(argc + 1, (char **)command_line);
348
349 fp = fopen(script, "r");
350 if (!fp) {
351 sprintf(buf, "Can't open python script \"%s\"", script);
352 perror(buf);
353 err = -1;
354 goto error;
355 }
356
357 err = PyRun_SimpleFile(fp, script);
358 if (err) {
359 fprintf(stderr, "Error running python script %s\n", script);
360 goto error;
361 }
362
363 err = run_start_sub();
364 if (err) {
365 fprintf(stderr, "Error starting python script %s\n", script);
366 goto error;
367 }
368
369 free(command_line);
370 fprintf(stderr, "perf trace started with Python script %s\n\n",
371 script);
372
373 return err;
374error:
375 Py_Finalize();
376 free(command_line);
377
378 return err;
379}
380
381/*
382 * Stop trace script
383 */
384static int python_stop_script(void)
385{
386 PyObject *handler, *retval;
387 int err = 0;
388
389 handler = PyDict_GetItemString(main_dict, "trace_end");
390 if (handler == NULL || !PyCallable_Check(handler))
391 goto out;
392
393 retval = PyObject_CallObject(handler, NULL);
394 if (retval == NULL)
395 handler_call_die("trace_end");
396 else
397 Py_DECREF(retval);
398out:
399 Py_XDECREF(main_dict);
400 Py_XDECREF(main_module);
401 Py_Finalize();
402
403 fprintf(stderr, "\nperf trace Python script stopped\n");
404
405 return err;
406}
407
408static int python_generate_script(const char *outfile)
409{
410 struct event *event = NULL;
411 struct format_field *f;
412 char fname[PATH_MAX];
413 int not_first, count;
414 FILE *ofp;
415
416 sprintf(fname, "%s.py", outfile);
417 ofp = fopen(fname, "w");
418 if (ofp == NULL) {
419 fprintf(stderr, "couldn't open %s\n", fname);
420 return -1;
421 }
422 fprintf(ofp, "# perf trace event handlers, "
423 "generated by perf trace -g python\n");
424
425 fprintf(ofp, "# Licensed under the terms of the GNU GPL"
426 " License version 2\n\n");
427
428 fprintf(ofp, "# The common_* event handler fields are the most useful "
429 "fields common to\n");
430
431 fprintf(ofp, "# all events. They don't necessarily correspond to "
432 "the 'common_*' fields\n");
433
434 fprintf(ofp, "# in the format files. Those fields not available as "
435 "handler params can\n");
436
437 fprintf(ofp, "# be retrieved using Python functions of the form "
438 "common_*(context).\n");
439
440 fprintf(ofp, "# See the perf-trace-python Documentation for the list "
441 "of available functions.\n\n");
442
443 fprintf(ofp, "import os\n");
444 fprintf(ofp, "import sys\n\n");
445
446 fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n");
447 fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n");
448 fprintf(ofp, "\nfrom perf_trace_context import *\n");
449 fprintf(ofp, "from Core import *\n\n\n");
450
451 fprintf(ofp, "def trace_begin():\n");
452 fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
453
454 fprintf(ofp, "def trace_end():\n");
455 fprintf(ofp, "\tprint \"in trace_end\"\n\n");
456
457 while ((event = trace_find_next_event(event))) {
458 fprintf(ofp, "def %s__%s(", event->system, event->name);
459 fprintf(ofp, "event_name, ");
460 fprintf(ofp, "context, ");
461 fprintf(ofp, "common_cpu,\n");
462 fprintf(ofp, "\tcommon_secs, ");
463 fprintf(ofp, "common_nsecs, ");
464 fprintf(ofp, "common_pid, ");
465 fprintf(ofp, "common_comm,\n\t");
466
467 not_first = 0;
468 count = 0;
469
470 for (f = event->format.fields; f; f = f->next) {
471 if (not_first++)
472 fprintf(ofp, ", ");
473 if (++count % 5 == 0)
474 fprintf(ofp, "\n\t");
475
476 fprintf(ofp, "%s", f->name);
477 }
478 fprintf(ofp, "):\n");
479
480 fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
481 "common_secs, common_nsecs,\n\t\t\t"
482 "common_pid, common_comm)\n\n");
483
484 fprintf(ofp, "\t\tprint \"");
485
486 not_first = 0;
487 count = 0;
488
489 for (f = event->format.fields; f; f = f->next) {
490 if (not_first++)
491 fprintf(ofp, ", ");
492 if (count && count % 3 == 0) {
493 fprintf(ofp, "\" \\\n\t\t\"");
494 }
495 count++;
496
497 fprintf(ofp, "%s=", f->name);
498 if (f->flags & FIELD_IS_STRING ||
499 f->flags & FIELD_IS_FLAG ||
500 f->flags & FIELD_IS_SYMBOLIC)
501 fprintf(ofp, "%%s");
502 else if (f->flags & FIELD_IS_SIGNED)
503 fprintf(ofp, "%%d");
504 else
505 fprintf(ofp, "%%u");
506 }
507
508 fprintf(ofp, "\\n\" %% \\\n\t\t(");
509
510 not_first = 0;
511 count = 0;
512
513 for (f = event->format.fields; f; f = f->next) {
514 if (not_first++)
515 fprintf(ofp, ", ");
516
517 if (++count % 5 == 0)
518 fprintf(ofp, "\n\t\t");
519
520 if (f->flags & FIELD_IS_FLAG) {
521 if ((count - 1) % 5 != 0) {
522 fprintf(ofp, "\n\t\t");
523 count = 4;
524 }
525 fprintf(ofp, "flag_str(\"");
526 fprintf(ofp, "%s__%s\", ", event->system,
527 event->name);
528 fprintf(ofp, "\"%s\", %s)", f->name,
529 f->name);
530 } else if (f->flags & FIELD_IS_SYMBOLIC) {
531 if ((count - 1) % 5 != 0) {
532 fprintf(ofp, "\n\t\t");
533 count = 4;
534 }
535 fprintf(ofp, "symbol_str(\"");
536 fprintf(ofp, "%s__%s\", ", event->system,
537 event->name);
538 fprintf(ofp, "\"%s\", %s)", f->name,
539 f->name);
540 } else
541 fprintf(ofp, "%s", f->name);
542 }
543
544 fprintf(ofp, "),\n\n");
545 }
546
547 fprintf(ofp, "def trace_unhandled(event_name, context, "
548 "common_cpu, common_secs, common_nsecs,\n\t\t"
549 "common_pid, common_comm):\n");
550
551 fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
552 "common_secs, common_nsecs,\n\t\tcommon_pid, "
553 "common_comm)\n\n");
554
555 fprintf(ofp, "def print_header("
556 "event_name, cpu, secs, nsecs, pid, comm):\n"
557 "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
558 "(event_name, cpu, secs, nsecs, pid, comm),\n");
559
560 fclose(ofp);
561
562 fprintf(stderr, "generated Python script: %s\n", fname);
563
564 return 0;
565}
566
567struct scripting_ops python_scripting_ops = {
568 .name = "Python",
569 .start_script = python_start_script,
570 .stop_script = python_stop_script,
571 .process_event = python_process_event,
572 .generate_script = python_generate_script,
573};
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index ce3a6c8abe76..0de7258e70a5 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1,5 +1,8 @@
1#define _FILE_OFFSET_BITS 64
2
1#include <linux/kernel.h> 3#include <linux/kernel.h>
2 4
5#include <byteswap.h>
3#include <unistd.h> 6#include <unistd.h>
4#include <sys/types.h> 7#include <sys/types.h>
5 8
@@ -49,6 +52,11 @@ out_close:
49 return -1; 52 return -1;
50} 53}
51 54
55static inline int perf_session__create_kernel_maps(struct perf_session *self)
56{
57 return map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps);
58}
59
52struct perf_session *perf_session__new(const char *filename, int mode, bool force) 60struct perf_session *perf_session__new(const char *filename, int mode, bool force)
53{ 61{
54 size_t len = filename ? strlen(filename) + 1 : 0; 62 size_t len = filename ? strlen(filename) + 1 : 0;
@@ -66,13 +74,22 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
66 self->mmap_window = 32; 74 self->mmap_window = 32;
67 self->cwd = NULL; 75 self->cwd = NULL;
68 self->cwdlen = 0; 76 self->cwdlen = 0;
77 self->unknown_events = 0;
69 map_groups__init(&self->kmaps); 78 map_groups__init(&self->kmaps);
70 79
71 if (perf_session__create_kernel_maps(self) < 0) 80 if (mode == O_RDONLY) {
72 goto out_delete; 81 if (perf_session__open(self, force) < 0)
82 goto out_delete;
83 } else if (mode == O_WRONLY) {
84 /*
85 * In O_RDONLY mode this will be performed when reading the
86 * kernel MMAP event, in event__process_mmap().
87 */
88 if (perf_session__create_kernel_maps(self) < 0)
89 goto out_delete;
90 }
73 91
74 if (mode == O_RDONLY && perf_session__open(self, force) < 0) 92 self->sample_type = perf_header__sample_type(&self->header);
75 goto out_delete;
76out: 93out:
77 return self; 94 return self;
78out_free: 95out_free:
@@ -148,3 +165,409 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self,
148 165
149 return syms; 166 return syms;
150} 167}
168
169static int process_event_stub(event_t *event __used,
170 struct perf_session *session __used)
171{
172 dump_printf(": unhandled!\n");
173 return 0;
174}
175
176static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
177{
178 if (handler->sample == NULL)
179 handler->sample = process_event_stub;
180 if (handler->mmap == NULL)
181 handler->mmap = process_event_stub;
182 if (handler->comm == NULL)
183 handler->comm = process_event_stub;
184 if (handler->fork == NULL)
185 handler->fork = process_event_stub;
186 if (handler->exit == NULL)
187 handler->exit = process_event_stub;
188 if (handler->lost == NULL)
189 handler->lost = process_event_stub;
190 if (handler->read == NULL)
191 handler->read = process_event_stub;
192 if (handler->throttle == NULL)
193 handler->throttle = process_event_stub;
194 if (handler->unthrottle == NULL)
195 handler->unthrottle = process_event_stub;
196}
197
198static const char *event__name[] = {
199 [0] = "TOTAL",
200 [PERF_RECORD_MMAP] = "MMAP",
201 [PERF_RECORD_LOST] = "LOST",
202 [PERF_RECORD_COMM] = "COMM",
203 [PERF_RECORD_EXIT] = "EXIT",
204 [PERF_RECORD_THROTTLE] = "THROTTLE",
205 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
206 [PERF_RECORD_FORK] = "FORK",
207 [PERF_RECORD_READ] = "READ",
208 [PERF_RECORD_SAMPLE] = "SAMPLE",
209};
210
211unsigned long event__total[PERF_RECORD_MAX];
212
213void event__print_totals(void)
214{
215 int i;
216 for (i = 0; i < PERF_RECORD_MAX; ++i)
217 pr_info("%10s events: %10ld\n",
218 event__name[i], event__total[i]);
219}
220
221void mem_bswap_64(void *src, int byte_size)
222{
223 u64 *m = src;
224
225 while (byte_size > 0) {
226 *m = bswap_64(*m);
227 byte_size -= sizeof(u64);
228 ++m;
229 }
230}
231
232static void event__all64_swap(event_t *self)
233{
234 struct perf_event_header *hdr = &self->header;
235 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
236}
237
238static void event__comm_swap(event_t *self)
239{
240 self->comm.pid = bswap_32(self->comm.pid);
241 self->comm.tid = bswap_32(self->comm.tid);
242}
243
244static void event__mmap_swap(event_t *self)
245{
246 self->mmap.pid = bswap_32(self->mmap.pid);
247 self->mmap.tid = bswap_32(self->mmap.tid);
248 self->mmap.start = bswap_64(self->mmap.start);
249 self->mmap.len = bswap_64(self->mmap.len);
250 self->mmap.pgoff = bswap_64(self->mmap.pgoff);
251}
252
253static void event__task_swap(event_t *self)
254{
255 self->fork.pid = bswap_32(self->fork.pid);
256 self->fork.tid = bswap_32(self->fork.tid);
257 self->fork.ppid = bswap_32(self->fork.ppid);
258 self->fork.ptid = bswap_32(self->fork.ptid);
259 self->fork.time = bswap_64(self->fork.time);
260}
261
262static void event__read_swap(event_t *self)
263{
264 self->read.pid = bswap_32(self->read.pid);
265 self->read.tid = bswap_32(self->read.tid);
266 self->read.value = bswap_64(self->read.value);
267 self->read.time_enabled = bswap_64(self->read.time_enabled);
268 self->read.time_running = bswap_64(self->read.time_running);
269 self->read.id = bswap_64(self->read.id);
270}
271
272typedef void (*event__swap_op)(event_t *self);
273
274static event__swap_op event__swap_ops[] = {
275 [PERF_RECORD_MMAP] = event__mmap_swap,
276 [PERF_RECORD_COMM] = event__comm_swap,
277 [PERF_RECORD_FORK] = event__task_swap,
278 [PERF_RECORD_EXIT] = event__task_swap,
279 [PERF_RECORD_LOST] = event__all64_swap,
280 [PERF_RECORD_READ] = event__read_swap,
281 [PERF_RECORD_SAMPLE] = event__all64_swap,
282 [PERF_RECORD_MAX] = NULL,
283};
284
285static int perf_session__process_event(struct perf_session *self,
286 event_t *event,
287 struct perf_event_ops *ops,
288 u64 offset, u64 head)
289{
290 trace_event(event);
291
292 if (event->header.type < PERF_RECORD_MAX) {
293 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
294 offset + head, event->header.size,
295 event__name[event->header.type]);
296 ++event__total[0];
297 ++event__total[event->header.type];
298 }
299
300 if (self->header.needs_swap && event__swap_ops[event->header.type])
301 event__swap_ops[event->header.type](event);
302
303 switch (event->header.type) {
304 case PERF_RECORD_SAMPLE:
305 return ops->sample(event, self);
306 case PERF_RECORD_MMAP:
307 return ops->mmap(event, self);
308 case PERF_RECORD_COMM:
309 return ops->comm(event, self);
310 case PERF_RECORD_FORK:
311 return ops->fork(event, self);
312 case PERF_RECORD_EXIT:
313 return ops->exit(event, self);
314 case PERF_RECORD_LOST:
315 return ops->lost(event, self);
316 case PERF_RECORD_READ:
317 return ops->read(event, self);
318 case PERF_RECORD_THROTTLE:
319 return ops->throttle(event, self);
320 case PERF_RECORD_UNTHROTTLE:
321 return ops->unthrottle(event, self);
322 default:
323 self->unknown_events++;
324 return -1;
325 }
326}
327
328void perf_event_header__bswap(struct perf_event_header *self)
329{
330 self->type = bswap_32(self->type);
331 self->misc = bswap_16(self->misc);
332 self->size = bswap_16(self->size);
333}
334
335int perf_header__read_build_ids(struct perf_header *self,
336 int input, u64 offset, u64 size)
337{
338 struct build_id_event bev;
339 char filename[PATH_MAX];
340 u64 limit = offset + size;
341 int err = -1;
342
343 while (offset < limit) {
344 struct dso *dso;
345 ssize_t len;
346 struct list_head *head = &dsos__user;
347
348 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
349 goto out;
350
351 if (self->needs_swap)
352 perf_event_header__bswap(&bev.header);
353
354 len = bev.header.size - sizeof(bev);
355 if (read(input, filename, len) != len)
356 goto out;
357
358 if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
359 head = &dsos__kernel;
360
361 dso = __dsos__findnew(head, filename);
362 if (dso != NULL) {
363 dso__set_build_id(dso, &bev.build_id);
364 if (head == &dsos__kernel && filename[0] == '[')
365 dso->kernel = 1;
366 }
367
368 offset += bev.header.size;
369 }
370 err = 0;
371out:
372 return err;
373}
374
375static struct thread *perf_session__register_idle_thread(struct perf_session *self)
376{
377 struct thread *thread = perf_session__findnew(self, 0);
378
379 if (thread == NULL || thread__set_comm(thread, "swapper")) {
380 pr_err("problem inserting idle task.\n");
381 thread = NULL;
382 }
383
384 return thread;
385}
386
387int __perf_session__process_events(struct perf_session *self,
388 u64 data_offset, u64 data_size,
389 u64 file_size, struct perf_event_ops *ops)
390{
391 int err, mmap_prot, mmap_flags;
392 u64 head, shift;
393 u64 offset = 0;
394 size_t page_size;
395 event_t *event;
396 uint32_t size;
397 char *buf;
398
399 perf_event_ops__fill_defaults(ops);
400
401 page_size = sysconf(_SC_PAGESIZE);
402
403 head = data_offset;
404 shift = page_size * (head / page_size);
405 offset += shift;
406 head -= shift;
407
408 mmap_prot = PROT_READ;
409 mmap_flags = MAP_SHARED;
410
411 if (self->header.needs_swap) {
412 mmap_prot |= PROT_WRITE;
413 mmap_flags = MAP_PRIVATE;
414 }
415remap:
416 buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
417 mmap_flags, self->fd, offset);
418 if (buf == MAP_FAILED) {
419 pr_err("failed to mmap file\n");
420 err = -errno;
421 goto out_err;
422 }
423
424more:
425 event = (event_t *)(buf + head);
426
427 if (self->header.needs_swap)
428 perf_event_header__bswap(&event->header);
429 size = event->header.size;
430 if (size == 0)
431 size = 8;
432
433 if (head + event->header.size >= page_size * self->mmap_window) {
434 int munmap_ret;
435
436 shift = page_size * (head / page_size);
437
438 munmap_ret = munmap(buf, page_size * self->mmap_window);
439 assert(munmap_ret == 0);
440
441 offset += shift;
442 head -= shift;
443 goto remap;
444 }
445
446 size = event->header.size;
447
448 dump_printf("\n%#Lx [%#x]: event: %d\n",
449 offset + head, event->header.size, event->header.type);
450
451 if (size == 0 ||
452 perf_session__process_event(self, event, ops, offset, head) < 0) {
453 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
454 offset + head, event->header.size,
455 event->header.type);
456 /*
457 * assume we lost track of the stream, check alignment, and
458 * increment a single u64 in the hope to catch on again 'soon'.
459 */
460 if (unlikely(head & 7))
461 head &= ~7ULL;
462
463 size = 8;
464 }
465
466 head += size;
467
468 if (offset + head >= data_offset + data_size)
469 goto done;
470
471 if (offset + head < file_size)
472 goto more;
473done:
474 err = 0;
475out_err:
476 return err;
477}
478
479int perf_session__process_events(struct perf_session *self,
480 struct perf_event_ops *ops)
481{
482 int err;
483
484 if (perf_session__register_idle_thread(self) == NULL)
485 return -ENOMEM;
486
487 if (!symbol_conf.full_paths) {
488 char bf[PATH_MAX];
489
490 if (getcwd(bf, sizeof(bf)) == NULL) {
491 err = -errno;
492out_getcwd_err:
493 pr_err("failed to get the current directory\n");
494 goto out_err;
495 }
496 self->cwd = strdup(bf);
497 if (self->cwd == NULL) {
498 err = -ENOMEM;
499 goto out_getcwd_err;
500 }
501 self->cwdlen = strlen(self->cwd);
502 }
503
504 err = __perf_session__process_events(self, self->header.data_offset,
505 self->header.data_size,
506 self->size, ops);
507out_err:
508 return err;
509}
510
511bool perf_session__has_traces(struct perf_session *self, const char *msg)
512{
513 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
514 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
515 return false;
516 }
517
518 return true;
519}
520
521int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
522 const char *symbol_name,
523 u64 addr)
524{
525 char *bracket;
526 enum map_type i;
527
528 self->ref_reloc_sym.name = strdup(symbol_name);
529 if (self->ref_reloc_sym.name == NULL)
530 return -ENOMEM;
531
532 bracket = strchr(self->ref_reloc_sym.name, ']');
533 if (bracket)
534 *bracket = '\0';
535
536 self->ref_reloc_sym.addr = addr;
537
538 for (i = 0; i < MAP__NR_TYPES; ++i) {
539 struct kmap *kmap = map__kmap(self->vmlinux_maps[i]);
540 kmap->ref_reloc_sym = &self->ref_reloc_sym;
541 }
542
543 return 0;
544}
545
546static u64 map__reloc_map_ip(struct map *map, u64 ip)
547{
548 return ip + (s64)map->pgoff;
549}
550
551static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
552{
553 return ip - (s64)map->pgoff;
554}
555
556void map__reloc_vmlinux(struct map *self)
557{
558 struct kmap *kmap = map__kmap(self);
559 s64 reloc;
560
561 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
562 return;
563
564 reloc = (kmap->ref_reloc_sym->unrelocated_addr -
565 kmap->ref_reloc_sym->addr);
566
567 if (!reloc)
568 return;
569
570 self->map_ip = map__reloc_map_ip;
571 self->unmap_ip = map__reloc_unmap_ip;
572 self->pgoff = reloc;
573}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 32eaa1bada06..31950fcd8a4d 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -3,13 +3,13 @@
3 3
4#include "event.h" 4#include "event.h"
5#include "header.h" 5#include "header.h"
6#include "symbol.h"
6#include "thread.h" 7#include "thread.h"
7#include <linux/rbtree.h> 8#include <linux/rbtree.h>
8#include "../../../include/linux/perf_event.h" 9#include "../../../include/linux/perf_event.h"
9 10
10struct ip_callchain; 11struct ip_callchain;
11struct thread; 12struct thread;
12struct symbol;
13 13
14struct perf_session { 14struct perf_session {
15 struct perf_header header; 15 struct perf_header header;
@@ -18,10 +18,13 @@ struct perf_session {
18 struct map_groups kmaps; 18 struct map_groups kmaps;
19 struct rb_root threads; 19 struct rb_root threads;
20 struct thread *last_match; 20 struct thread *last_match;
21 struct map *vmlinux_maps[MAP__NR_TYPES];
21 struct events_stats events_stats; 22 struct events_stats events_stats;
22 unsigned long event_total[PERF_RECORD_MAX]; 23 unsigned long event_total[PERF_RECORD_MAX];
24 unsigned long unknown_events;
23 struct rb_root hists; 25 struct rb_root hists;
24 u64 sample_type; 26 u64 sample_type;
27 struct ref_reloc_sym ref_reloc_sym;
25 int fd; 28 int fd;
26 int cwdlen; 29 int cwdlen;
27 char *cwd; 30 char *cwd;
@@ -31,23 +34,25 @@ struct perf_session {
31typedef int (*event_op)(event_t *self, struct perf_session *session); 34typedef int (*event_op)(event_t *self, struct perf_session *session);
32 35
33struct perf_event_ops { 36struct perf_event_ops {
34 event_op process_sample_event; 37 event_op sample,
35 event_op process_mmap_event; 38 mmap,
36 event_op process_comm_event; 39 comm,
37 event_op process_fork_event; 40 fork,
38 event_op process_exit_event; 41 exit,
39 event_op process_lost_event; 42 lost,
40 event_op process_read_event; 43 read,
41 event_op process_throttle_event; 44 throttle,
42 event_op process_unthrottle_event; 45 unthrottle;
43 int (*sample_type_check)(struct perf_session *session);
44 unsigned long total_unknown;
45 bool full_paths;
46}; 46};
47 47
48struct perf_session *perf_session__new(const char *filename, int mode, bool force); 48struct perf_session *perf_session__new(const char *filename, int mode, bool force);
49void perf_session__delete(struct perf_session *self); 49void perf_session__delete(struct perf_session *self);
50 50
51void perf_event_header__bswap(struct perf_event_header *self);
52
53int __perf_session__process_events(struct perf_session *self,
54 u64 data_offset, u64 data_size, u64 size,
55 struct perf_event_ops *ops);
51int perf_session__process_events(struct perf_session *self, 56int perf_session__process_events(struct perf_session *self,
52 struct perf_event_ops *event_ops); 57 struct perf_event_ops *event_ops);
53 58
@@ -56,6 +61,28 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self,
56 struct ip_callchain *chain, 61 struct ip_callchain *chain,
57 struct symbol **parent); 62 struct symbol **parent);
58 63
59int perf_header__read_build_ids(int input, u64 offset, u64 file_size); 64bool perf_session__has_traces(struct perf_session *self, const char *msg);
65
66int perf_header__read_build_ids(struct perf_header *self, int input,
67 u64 offset, u64 file_size);
68
69int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
70 const char *symbol_name,
71 u64 addr);
72
73void mem_bswap_64(void *src, int byte_size);
74
75static inline int __perf_session__create_kernel_maps(struct perf_session *self,
76 struct dso *kernel)
77{
78 return __map_groups__create_kernel_maps(&self->kmaps,
79 self->vmlinux_maps, kernel);
80}
60 81
82static inline struct map *
83 perf_session__new_module_map(struct perf_session *self,
84 u64 start, const char *filename)
85{
86 return map_groups__new_module(&self->kmaps, start, filename);
87}
61#endif /* __PERF_SESSION_H */ 88#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 5352d7dccc61..c397d4f6f748 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -227,16 +227,73 @@ fail:
227 return NULL; 227 return NULL;
228} 228}
229 229
230/* Glob expression pattern matching */ 230/* Character class matching */
231static bool __match_charclass(const char *pat, char c, const char **npat)
232{
233 bool complement = false, ret = true;
234
235 if (*pat == '!') {
236 complement = true;
237 pat++;
238 }
239 if (*pat++ == c) /* First character is special */
240 goto end;
241
242 while (*pat && *pat != ']') { /* Matching */
243 if (*pat == '-' && *(pat + 1) != ']') { /* Range */
244 if (*(pat - 1) <= c && c <= *(pat + 1))
245 goto end;
246 if (*(pat - 1) > *(pat + 1))
247 goto error;
248 pat += 2;
249 } else if (*pat++ == c)
250 goto end;
251 }
252 if (!*pat)
253 goto error;
254 ret = false;
255
256end:
257 while (*pat && *pat != ']') /* Searching closing */
258 pat++;
259 if (!*pat)
260 goto error;
261 *npat = pat + 1;
262 return complement ? !ret : ret;
263
264error:
265 return false;
266}
267
268/**
269 * strglobmatch - glob expression pattern matching
270 * @str: the target string to match
271 * @pat: the pattern string to match
272 *
273 * This returns true if the @str matches @pat. @pat can includes wildcards
274 * ('*','?') and character classes ([CHARS], complementation and ranges are
275 * also supported). Also, this supports escape character ('\') to use special
276 * characters as normal character.
277 *
278 * Note: if @pat syntax is broken, this always returns false.
279 */
231bool strglobmatch(const char *str, const char *pat) 280bool strglobmatch(const char *str, const char *pat)
232{ 281{
233 while (*str && *pat && *pat != '*') { 282 while (*str && *pat && *pat != '*') {
234 if (*pat == '?') { 283 if (*pat == '?') { /* Matches any single character */
235 str++; 284 str++;
236 pat++; 285 pat++;
237 } else 286 continue;
238 if (*str++ != *pat++) 287 } else if (*pat == '[') /* Character classes/Ranges */
288 if (__match_charclass(pat + 1, *str, &pat)) {
289 str++;
290 continue;
291 } else
239 return false; 292 return false;
293 else if (*pat == '\\') /* Escaped char match as normal char */
294 pat++;
295 if (*str++ != *pat++)
296 return false;
240 } 297 }
241 /* Check wild card */ 298 /* Check wild card */
242 if (*pat == '*') { 299 if (*pat == '*') {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index ab92763edb03..323c0aea0a91 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,6 +1,5 @@
1#include "util.h" 1#include "util.h"
2#include "../perf.h" 2#include "../perf.h"
3#include "session.h"
4#include "sort.h" 3#include "sort.h"
5#include "string.h" 4#include "string.h"
6#include "symbol.h" 5#include "symbol.h"
@@ -22,6 +21,7 @@
22enum dso_origin { 21enum dso_origin {
23 DSO__ORIG_KERNEL = 0, 22 DSO__ORIG_KERNEL = 0,
24 DSO__ORIG_JAVA_JIT, 23 DSO__ORIG_JAVA_JIT,
24 DSO__ORIG_BUILD_ID_CACHE,
25 DSO__ORIG_FEDORA, 25 DSO__ORIG_FEDORA,
26 DSO__ORIG_UBUNTU, 26 DSO__ORIG_UBUNTU,
27 DSO__ORIG_BUILDID, 27 DSO__ORIG_BUILDID,
@@ -33,7 +33,7 @@ enum dso_origin {
33static void dsos__add(struct list_head *head, struct dso *dso); 33static void dsos__add(struct list_head *head, struct dso *dso);
34static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 34static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
35static int dso__load_kernel_sym(struct dso *self, struct map *map, 35static int dso__load_kernel_sym(struct dso *self, struct map *map,
36 struct perf_session *session, symbol_filter_t filter); 36 symbol_filter_t filter);
37static int vmlinux_path__nr_entries; 37static int vmlinux_path__nr_entries;
38static char **vmlinux_path; 38static char **vmlinux_path;
39 39
@@ -53,17 +53,12 @@ bool dso__sorted_by_name(const struct dso *self, enum map_type type)
53 return self->sorted_by_name & (1 << type); 53 return self->sorted_by_name & (1 << type);
54} 54}
55 55
56static void dso__set_loaded(struct dso *self, enum map_type type)
57{
58 self->loaded |= (1 << type);
59}
60
61static void dso__set_sorted_by_name(struct dso *self, enum map_type type) 56static void dso__set_sorted_by_name(struct dso *self, enum map_type type)
62{ 57{
63 self->sorted_by_name |= (1 << type); 58 self->sorted_by_name |= (1 << type);
64} 59}
65 60
66static bool symbol_type__is_a(char symbol_type, enum map_type map_type) 61bool symbol_type__is_a(char symbol_type, enum map_type map_type)
67{ 62{
68 switch (map_type) { 63 switch (map_type) {
69 case MAP__FUNCTION: 64 case MAP__FUNCTION:
@@ -142,14 +137,14 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name)
142 self->start = start; 137 self->start = start;
143 self->end = len ? start + len - 1 : start; 138 self->end = len ? start + len - 1 : start;
144 139
145 pr_debug3("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); 140 pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
146 141
147 memcpy(self->name, name, namelen); 142 memcpy(self->name, name, namelen);
148 143
149 return self; 144 return self;
150} 145}
151 146
152static void symbol__delete(struct symbol *self) 147void symbol__delete(struct symbol *self)
153{ 148{
154 free(((void *)self) - symbol_conf.priv_size); 149 free(((void *)self) - symbol_conf.priv_size);
155} 150}
@@ -160,7 +155,7 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp)
160 self->start, self->end, self->name); 155 self->start, self->end, self->name);
161} 156}
162 157
163static void dso__set_long_name(struct dso *self, char *name) 158void dso__set_long_name(struct dso *self, char *name)
164{ 159{
165 if (name == NULL) 160 if (name == NULL)
166 return; 161 return;
@@ -175,7 +170,7 @@ static void dso__set_basename(struct dso *self)
175 170
176struct dso *dso__new(const char *name) 171struct dso *dso__new(const char *name)
177{ 172{
178 struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); 173 struct dso *self = zalloc(sizeof(*self) + strlen(name) + 1);
179 174
180 if (self != NULL) { 175 if (self != NULL) {
181 int i; 176 int i;
@@ -344,10 +339,10 @@ void dso__sort_by_name(struct dso *self, enum map_type type)
344 &self->symbols[type]); 339 &self->symbols[type]);
345} 340}
346 341
347int build_id__sprintf(u8 *self, int len, char *bf) 342int build_id__sprintf(const u8 *self, int len, char *bf)
348{ 343{
349 char *bid = bf; 344 char *bid = bf;
350 u8 *raw = self; 345 const u8 *raw = self;
351 int i; 346 int i;
352 347
353 for (i = 0; i < len; ++i) { 348 for (i = 0; i < len; ++i) {
@@ -372,6 +367,10 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
372 struct rb_node *nd; 367 struct rb_node *nd;
373 size_t ret = fprintf(fp, "dso: %s (", self->short_name); 368 size_t ret = fprintf(fp, "dso: %s (", self->short_name);
374 369
370 if (self->short_name != self->long_name)
371 ret += fprintf(fp, "%s, ", self->long_name);
372 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
373 self->loaded ? "" : "NOT ");
375 ret += dso__fprintf_buildid(self, fp); 374 ret += dso__fprintf_buildid(self, fp);
376 ret += fprintf(fp, ")\n"); 375 ret += fprintf(fp, ")\n");
377 for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { 376 for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) {
@@ -382,24 +381,20 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
382 return ret; 381 return ret;
383} 382}
384 383
385/* 384int kallsyms__parse(const char *filename, void *arg,
386 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 385 int (*process_symbol)(void *arg, const char *name,
387 * so that we can in the next step set the symbol ->end address and then 386 char type, u64 start))
388 * call kernel_maps__split_kallsyms.
389 */
390static int dso__load_all_kallsyms(struct dso *self, struct map *map)
391{ 387{
392 char *line = NULL; 388 char *line = NULL;
393 size_t n; 389 size_t n;
394 struct rb_root *root = &self->symbols[map->type]; 390 int err = 0;
395 FILE *file = fopen("/proc/kallsyms", "r"); 391 FILE *file = fopen(filename, "r");
396 392
397 if (file == NULL) 393 if (file == NULL)
398 goto out_failure; 394 goto out_failure;
399 395
400 while (!feof(file)) { 396 while (!feof(file)) {
401 u64 start; 397 u64 start;
402 struct symbol *sym;
403 int line_len, len; 398 int line_len, len;
404 char symbol_type; 399 char symbol_type;
405 char *symbol_name; 400 char *symbol_name;
@@ -420,43 +415,72 @@ static int dso__load_all_kallsyms(struct dso *self, struct map *map)
420 continue; 415 continue;
421 416
422 symbol_type = toupper(line[len]); 417 symbol_type = toupper(line[len]);
423 if (!symbol_type__is_a(symbol_type, map->type))
424 continue;
425
426 symbol_name = line + len + 2; 418 symbol_name = line + len + 2;
427 /*
428 * Will fix up the end later, when we have all symbols sorted.
429 */
430 sym = symbol__new(start, 0, symbol_name);
431 419
432 if (sym == NULL) 420 err = process_symbol(arg, symbol_name, symbol_type, start);
433 goto out_delete_line; 421 if (err)
434 /* 422 break;
435 * We will pass the symbols to the filter later, in
436 * map__split_kallsyms, when we have split the maps per module
437 */
438 symbols__insert(root, sym);
439 } 423 }
440 424
441 free(line); 425 free(line);
442 fclose(file); 426 fclose(file);
427 return err;
443 428
444 return 0;
445
446out_delete_line:
447 free(line);
448out_failure: 429out_failure:
449 return -1; 430 return -1;
450} 431}
451 432
433struct process_kallsyms_args {
434 struct map *map;
435 struct dso *dso;
436};
437
438static int map__process_kallsym_symbol(void *arg, const char *name,
439 char type, u64 start)
440{
441 struct symbol *sym;
442 struct process_kallsyms_args *a = arg;
443 struct rb_root *root = &a->dso->symbols[a->map->type];
444
445 if (!symbol_type__is_a(type, a->map->type))
446 return 0;
447
448 /*
449 * Will fix up the end later, when we have all symbols sorted.
450 */
451 sym = symbol__new(start, 0, name);
452
453 if (sym == NULL)
454 return -ENOMEM;
455 /*
456 * We will pass the symbols to the filter later, in
457 * map__split_kallsyms, when we have split the maps per module
458 */
459 symbols__insert(root, sym);
460 return 0;
461}
462
463/*
464 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
465 * so that we can in the next step set the symbol ->end address and then
466 * call kernel_maps__split_kallsyms.
467 */
468static int dso__load_all_kallsyms(struct dso *self, const char *filename,
469 struct map *map)
470{
471 struct process_kallsyms_args args = { .map = map, .dso = self, };
472 return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
473}
474
452/* 475/*
453 * Split the symbols into maps, making sure there are no overlaps, i.e. the 476 * Split the symbols into maps, making sure there are no overlaps, i.e. the
454 * kernel range is broken in several maps, named [kernel].N, as we don't have 477 * kernel range is broken in several maps, named [kernel].N, as we don't have
455 * the original ELF section names vmlinux have. 478 * the original ELF section names vmlinux have.
456 */ 479 */
457static int dso__split_kallsyms(struct dso *self, struct map *map, 480static int dso__split_kallsyms(struct dso *self, struct map *map,
458 struct perf_session *session, symbol_filter_t filter) 481 symbol_filter_t filter)
459{ 482{
483 struct map_groups *kmaps = map__kmap(map)->kmaps;
460 struct map *curr_map = map; 484 struct map *curr_map = map;
461 struct symbol *pos; 485 struct symbol *pos;
462 int count = 0; 486 int count = 0;
@@ -477,13 +501,17 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
477 501
478 *module++ = '\0'; 502 *module++ = '\0';
479 503
480 if (strcmp(self->name, module)) { 504 if (strcmp(curr_map->dso->short_name, module)) {
481 curr_map = map_groups__find_by_name(&session->kmaps, map->type, module); 505 curr_map = map_groups__find_by_name(kmaps, map->type, module);
482 if (curr_map == NULL) { 506 if (curr_map == NULL) {
483 pr_debug("/proc/{kallsyms,modules} " 507 pr_debug("/proc/{kallsyms,modules} "
484 "inconsistency!\n"); 508 "inconsistency while looking "
509 "for \"%s\" module!\n", module);
485 return -1; 510 return -1;
486 } 511 }
512
513 if (curr_map->dso->loaded)
514 goto discard_symbol;
487 } 515 }
488 /* 516 /*
489 * So that we look just like we get from .ko files, 517 * So that we look just like we get from .ko files,
@@ -503,13 +531,13 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
503 return -1; 531 return -1;
504 532
505 curr_map = map__new2(pos->start, dso, map->type); 533 curr_map = map__new2(pos->start, dso, map->type);
506 if (map == NULL) { 534 if (curr_map == NULL) {
507 dso__delete(dso); 535 dso__delete(dso);
508 return -1; 536 return -1;
509 } 537 }
510 538
511 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 539 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
512 map_groups__insert(&session->kmaps, curr_map); 540 map_groups__insert(kmaps, curr_map);
513 ++kernel_range; 541 ++kernel_range;
514 } 542 }
515 543
@@ -528,17 +556,16 @@ discard_symbol: rb_erase(&pos->rb_node, root);
528 return count; 556 return count;
529} 557}
530 558
531 559int dso__load_kallsyms(struct dso *self, const char *filename,
532static int dso__load_kallsyms(struct dso *self, struct map *map, 560 struct map *map, symbol_filter_t filter)
533 struct perf_session *session, symbol_filter_t filter)
534{ 561{
535 if (dso__load_all_kallsyms(self, map) < 0) 562 if (dso__load_all_kallsyms(self, filename, map) < 0)
536 return -1; 563 return -1;
537 564
538 symbols__fixup_end(&self->symbols[map->type]); 565 symbols__fixup_end(&self->symbols[map->type]);
539 self->origin = DSO__ORIG_KERNEL; 566 self->origin = DSO__ORIG_KERNEL;
540 567
541 return dso__split_kallsyms(self, map, session, filter); 568 return dso__split_kallsyms(self, map, filter);
542} 569}
543 570
544static int dso__load_perf_map(struct dso *self, struct map *map, 571static int dso__load_perf_map(struct dso *self, struct map *map,
@@ -864,10 +891,10 @@ static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type
864 } 891 }
865} 892}
866 893
867static int dso__load_sym(struct dso *self, struct map *map, 894static int dso__load_sym(struct dso *self, struct map *map, const char *name,
868 struct perf_session *session, const char *name, int fd, 895 int fd, symbol_filter_t filter, int kmodule)
869 symbol_filter_t filter, int kernel, int kmodule)
870{ 896{
897 struct kmap *kmap = self->kernel ? map__kmap(map) : NULL;
871 struct map *curr_map = map; 898 struct map *curr_map = map;
872 struct dso *curr_dso = self; 899 struct dso *curr_dso = self;
873 size_t dso_name_len = strlen(self->short_name); 900 size_t dso_name_len = strlen(self->short_name);
@@ -924,7 +951,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
924 nr_syms = shdr.sh_size / shdr.sh_entsize; 951 nr_syms = shdr.sh_size / shdr.sh_entsize;
925 952
926 memset(&sym, 0, sizeof(sym)); 953 memset(&sym, 0, sizeof(sym));
927 if (!kernel) { 954 if (!self->kernel) {
928 self->adjust_symbols = (ehdr.e_type == ET_EXEC || 955 self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
929 elf_section_by_name(elf, &ehdr, &shdr, 956 elf_section_by_name(elf, &ehdr, &shdr,
930 ".gnu.prelink_undo", 957 ".gnu.prelink_undo",
@@ -933,11 +960,15 @@ static int dso__load_sym(struct dso *self, struct map *map,
933 960
934 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 961 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
935 struct symbol *f; 962 struct symbol *f;
936 const char *elf_name; 963 const char *elf_name = elf_sym__name(&sym, symstrs);
937 char *demangled = NULL; 964 char *demangled = NULL;
938 int is_label = elf_sym__is_label(&sym); 965 int is_label = elf_sym__is_label(&sym);
939 const char *section_name; 966 const char *section_name;
940 967
968 if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
969 strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
970 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
971
941 if (!is_label && !elf_sym__is_a(&sym, map->type)) 972 if (!is_label && !elf_sym__is_a(&sym, map->type))
942 continue; 973 continue;
943 974
@@ -950,10 +981,9 @@ static int dso__load_sym(struct dso *self, struct map *map,
950 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) 981 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
951 continue; 982 continue;
952 983
953 elf_name = elf_sym__name(&sym, symstrs);
954 section_name = elf_sec__name(&shdr, secstrs); 984 section_name = elf_sec__name(&shdr, secstrs);
955 985
956 if (kernel || kmodule) { 986 if (self->kernel || kmodule) {
957 char dso_name[PATH_MAX]; 987 char dso_name[PATH_MAX];
958 988
959 if (strcmp(section_name, 989 if (strcmp(section_name,
@@ -969,7 +999,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
969 snprintf(dso_name, sizeof(dso_name), 999 snprintf(dso_name, sizeof(dso_name),
970 "%s%s", self->short_name, section_name); 1000 "%s%s", self->short_name, section_name);
971 1001
972 curr_map = map_groups__find_by_name(&session->kmaps, map->type, dso_name); 1002 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
973 if (curr_map == NULL) { 1003 if (curr_map == NULL) {
974 u64 start = sym.st_value; 1004 u64 start = sym.st_value;
975 1005
@@ -980,7 +1010,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
980 if (curr_dso == NULL) 1010 if (curr_dso == NULL)
981 goto out_elf_end; 1011 goto out_elf_end;
982 curr_map = map__new2(start, curr_dso, 1012 curr_map = map__new2(start, curr_dso,
983 MAP__FUNCTION); 1013 map->type);
984 if (curr_map == NULL) { 1014 if (curr_map == NULL) {
985 dso__delete(curr_dso); 1015 dso__delete(curr_dso);
986 goto out_elf_end; 1016 goto out_elf_end;
@@ -988,8 +1018,9 @@ static int dso__load_sym(struct dso *self, struct map *map,
988 curr_map->map_ip = identity__map_ip; 1018 curr_map->map_ip = identity__map_ip;
989 curr_map->unmap_ip = identity__map_ip; 1019 curr_map->unmap_ip = identity__map_ip;
990 curr_dso->origin = DSO__ORIG_KERNEL; 1020 curr_dso->origin = DSO__ORIG_KERNEL;
991 map_groups__insert(&session->kmaps, curr_map); 1021 map_groups__insert(kmap->kmaps, curr_map);
992 dsos__add(&dsos__kernel, curr_dso); 1022 dsos__add(&dsos__kernel, curr_dso);
1023 dso__set_loaded(curr_dso, map->type);
993 } else 1024 } else
994 curr_dso = curr_map->dso; 1025 curr_dso = curr_map->dso;
995 1026
@@ -997,9 +1028,10 @@ static int dso__load_sym(struct dso *self, struct map *map,
997 } 1028 }
998 1029
999 if (curr_dso->adjust_symbols) { 1030 if (curr_dso->adjust_symbols) {
1000 pr_debug2("adjusting symbol: st_value: %Lx sh_addr: " 1031 pr_debug4("%s: adjusting symbol: st_value: %#Lx "
1001 "%Lx sh_offset: %Lx\n", (u64)sym.st_value, 1032 "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
1002 (u64)shdr.sh_addr, (u64)shdr.sh_offset); 1033 (u64)sym.st_value, (u64)shdr.sh_addr,
1034 (u64)shdr.sh_offset);
1003 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1035 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1004 } 1036 }
1005 /* 1037 /*
@@ -1027,8 +1059,16 @@ new_symbol:
1027 /* 1059 /*
1028 * For misannotated, zeroed, ASM function sizes. 1060 * For misannotated, zeroed, ASM function sizes.
1029 */ 1061 */
1030 if (nr > 0) 1062 if (nr > 0) {
1031 symbols__fixup_end(&self->symbols[map->type]); 1063 symbols__fixup_end(&self->symbols[map->type]);
1064 if (kmap) {
1065 /*
1066 * We need to fixup this here too because we create new
1067 * maps here, for things like vsyscall sections.
1068 */
1069 __map_groups__fixup_end(kmap->kmaps, map->type);
1070 }
1071 }
1032 err = nr; 1072 err = nr;
1033out_elf_end: 1073out_elf_end:
1034 elf_end(elf); 1074 elf_end(elf);
@@ -1041,25 +1081,28 @@ static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
1041 return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; 1081 return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
1042} 1082}
1043 1083
1044static bool __dsos__read_build_ids(struct list_head *head) 1084static bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1045{ 1085{
1046 bool have_build_id = false; 1086 bool have_build_id = false;
1047 struct dso *pos; 1087 struct dso *pos;
1048 1088
1049 list_for_each_entry(pos, head, node) 1089 list_for_each_entry(pos, head, node) {
1090 if (with_hits && !pos->hit)
1091 continue;
1050 if (filename__read_build_id(pos->long_name, pos->build_id, 1092 if (filename__read_build_id(pos->long_name, pos->build_id,
1051 sizeof(pos->build_id)) > 0) { 1093 sizeof(pos->build_id)) > 0) {
1052 have_build_id = true; 1094 have_build_id = true;
1053 pos->has_build_id = true; 1095 pos->has_build_id = true;
1054 } 1096 }
1097 }
1055 1098
1056 return have_build_id; 1099 return have_build_id;
1057} 1100}
1058 1101
1059bool dsos__read_build_ids(void) 1102bool dsos__read_build_ids(bool with_hits)
1060{ 1103{
1061 bool kbuildids = __dsos__read_build_ids(&dsos__kernel), 1104 bool kbuildids = __dsos__read_build_ids(&dsos__kernel, with_hits),
1062 ubuildids = __dsos__read_build_ids(&dsos__user); 1105 ubuildids = __dsos__read_build_ids(&dsos__user, with_hits);
1063 return kbuildids || ubuildids; 1106 return kbuildids || ubuildids;
1064} 1107}
1065 1108
@@ -1191,6 +1234,7 @@ char dso__symtab_origin(const struct dso *self)
1191 static const char origin[] = { 1234 static const char origin[] = {
1192 [DSO__ORIG_KERNEL] = 'k', 1235 [DSO__ORIG_KERNEL] = 'k',
1193 [DSO__ORIG_JAVA_JIT] = 'j', 1236 [DSO__ORIG_JAVA_JIT] = 'j',
1237 [DSO__ORIG_BUILD_ID_CACHE] = 'B',
1194 [DSO__ORIG_FEDORA] = 'f', 1238 [DSO__ORIG_FEDORA] = 'f',
1195 [DSO__ORIG_UBUNTU] = 'u', 1239 [DSO__ORIG_UBUNTU] = 'u',
1196 [DSO__ORIG_BUILDID] = 'b', 1240 [DSO__ORIG_BUILDID] = 'b',
@@ -1203,19 +1247,19 @@ char dso__symtab_origin(const struct dso *self)
1203 return origin[self->origin]; 1247 return origin[self->origin];
1204} 1248}
1205 1249
1206int dso__load(struct dso *self, struct map *map, struct perf_session *session, 1250int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
1207 symbol_filter_t filter)
1208{ 1251{
1209 int size = PATH_MAX; 1252 int size = PATH_MAX;
1210 char *name; 1253 char *name;
1211 u8 build_id[BUILD_ID_SIZE]; 1254 u8 build_id[BUILD_ID_SIZE];
1255 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
1212 int ret = -1; 1256 int ret = -1;
1213 int fd; 1257 int fd;
1214 1258
1215 dso__set_loaded(self, map->type); 1259 dso__set_loaded(self, map->type);
1216 1260
1217 if (self->kernel) 1261 if (self->kernel)
1218 return dso__load_kernel_sym(self, map, session, filter); 1262 return dso__load_kernel_sym(self, map, filter);
1219 1263
1220 name = malloc(size); 1264 name = malloc(size);
1221 if (!name) 1265 if (!name)
@@ -1230,8 +1274,16 @@ int dso__load(struct dso *self, struct map *map, struct perf_session *session,
1230 return ret; 1274 return ret;
1231 } 1275 }
1232 1276
1233 self->origin = DSO__ORIG_FEDORA - 1; 1277 self->origin = DSO__ORIG_BUILD_ID_CACHE;
1234 1278
1279 if (self->has_build_id) {
1280 build_id__sprintf(self->build_id, sizeof(self->build_id),
1281 build_id_hex);
1282 snprintf(name, size, "%s/%s/.build-id/%.2s/%s",
1283 getenv("HOME"), DEBUG_CACHE_DIR,
1284 build_id_hex, build_id_hex + 2);
1285 goto open_file;
1286 }
1235more: 1287more:
1236 do { 1288 do {
1237 self->origin++; 1289 self->origin++;
@@ -1247,8 +1299,6 @@ more:
1247 case DSO__ORIG_BUILDID: 1299 case DSO__ORIG_BUILDID:
1248 if (filename__read_build_id(self->long_name, build_id, 1300 if (filename__read_build_id(self->long_name, build_id,
1249 sizeof(build_id))) { 1301 sizeof(build_id))) {
1250 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
1251
1252 build_id__sprintf(build_id, sizeof(build_id), 1302 build_id__sprintf(build_id, sizeof(build_id),
1253 build_id_hex); 1303 build_id_hex);
1254 snprintf(name, size, 1304 snprintf(name, size,
@@ -1276,11 +1326,11 @@ compare_build_id:
1276 if (!dso__build_id_equal(self, build_id)) 1326 if (!dso__build_id_equal(self, build_id))
1277 goto more; 1327 goto more;
1278 } 1328 }
1279 1329open_file:
1280 fd = open(name, O_RDONLY); 1330 fd = open(name, O_RDONLY);
1281 } while (fd < 0); 1331 } while (fd < 0);
1282 1332
1283 ret = dso__load_sym(self, map, NULL, name, fd, filter, 0, 0); 1333 ret = dso__load_sym(self, map, name, fd, filter, 0);
1284 close(fd); 1334 close(fd);
1285 1335
1286 /* 1336 /*
@@ -1309,14 +1359,34 @@ struct map *map_groups__find_by_name(struct map_groups *self,
1309 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { 1359 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
1310 struct map *map = rb_entry(nd, struct map, rb_node); 1360 struct map *map = rb_entry(nd, struct map, rb_node);
1311 1361
1312 if (map->dso && strcmp(map->dso->name, name) == 0) 1362 if (map->dso && strcmp(map->dso->short_name, name) == 0)
1313 return map; 1363 return map;
1314 } 1364 }
1315 1365
1316 return NULL; 1366 return NULL;
1317} 1367}
1318 1368
1319static int perf_session__set_modules_path_dir(struct perf_session *self, char *dirname) 1369static int dso__kernel_module_get_build_id(struct dso *self)
1370{
1371 char filename[PATH_MAX];
1372 /*
1373 * kernel module short names are of the form "[module]" and
1374 * we need just "module" here.
1375 */
1376 const char *name = self->short_name + 1;
1377
1378 snprintf(filename, sizeof(filename),
1379 "/sys/module/%.*s/notes/.note.gnu.build-id",
1380 (int)strlen(name - 1), name);
1381
1382 if (sysfs__read_build_id(filename, self->build_id,
1383 sizeof(self->build_id)) == 0)
1384 self->has_build_id = true;
1385
1386 return 0;
1387}
1388
1389static int map_groups__set_modules_path_dir(struct map_groups *self, char *dirname)
1320{ 1390{
1321 struct dirent *dent; 1391 struct dirent *dent;
1322 DIR *dir = opendir(dirname); 1392 DIR *dir = opendir(dirname);
@@ -1336,7 +1406,7 @@ static int perf_session__set_modules_path_dir(struct perf_session *self, char *d
1336 1406
1337 snprintf(path, sizeof(path), "%s/%s", 1407 snprintf(path, sizeof(path), "%s/%s",
1338 dirname, dent->d_name); 1408 dirname, dent->d_name);
1339 if (perf_session__set_modules_path_dir(self, path) < 0) 1409 if (map_groups__set_modules_path_dir(self, path) < 0)
1340 goto failure; 1410 goto failure;
1341 } else { 1411 } else {
1342 char *dot = strrchr(dent->d_name, '.'), 1412 char *dot = strrchr(dent->d_name, '.'),
@@ -1350,7 +1420,7 @@ static int perf_session__set_modules_path_dir(struct perf_session *self, char *d
1350 (int)(dot - dent->d_name), dent->d_name); 1420 (int)(dot - dent->d_name), dent->d_name);
1351 1421
1352 strxfrchar(dso_name, '-', '_'); 1422 strxfrchar(dso_name, '-', '_');
1353 map = map_groups__find_by_name(&self->kmaps, MAP__FUNCTION, dso_name); 1423 map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name);
1354 if (map == NULL) 1424 if (map == NULL)
1355 continue; 1425 continue;
1356 1426
@@ -1361,6 +1431,7 @@ static int perf_session__set_modules_path_dir(struct perf_session *self, char *d
1361 if (long_name == NULL) 1431 if (long_name == NULL)
1362 goto failure; 1432 goto failure;
1363 dso__set_long_name(map->dso, long_name); 1433 dso__set_long_name(map->dso, long_name);
1434 dso__kernel_module_get_build_id(map->dso);
1364 } 1435 }
1365 } 1436 }
1366 1437
@@ -1370,7 +1441,7 @@ failure:
1370 return -1; 1441 return -1;
1371} 1442}
1372 1443
1373static int perf_session__set_modules_path(struct perf_session *self) 1444static int map_groups__set_modules_path(struct map_groups *self)
1374{ 1445{
1375 struct utsname uts; 1446 struct utsname uts;
1376 char modules_path[PATH_MAX]; 1447 char modules_path[PATH_MAX];
@@ -1381,7 +1452,7 @@ static int perf_session__set_modules_path(struct perf_session *self)
1381 snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", 1452 snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel",
1382 uts.release); 1453 uts.release);
1383 1454
1384 return perf_session__set_modules_path_dir(self, modules_path); 1455 return map_groups__set_modules_path_dir(self, modules_path);
1385} 1456}
1386 1457
1387/* 1458/*
@@ -1391,8 +1462,8 @@ static int perf_session__set_modules_path(struct perf_session *self)
1391 */ 1462 */
1392static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) 1463static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
1393{ 1464{
1394 struct map *self = malloc(sizeof(*self)); 1465 struct map *self = zalloc(sizeof(*self) +
1395 1466 (dso->kernel ? sizeof(struct kmap) : 0));
1396 if (self != NULL) { 1467 if (self != NULL) {
1397 /* 1468 /*
1398 * ->end will be filled after we load all the symbols 1469 * ->end will be filled after we load all the symbols
@@ -1403,7 +1474,25 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
1403 return self; 1474 return self;
1404} 1475}
1405 1476
1406static int perf_session__create_module_maps(struct perf_session *self) 1477struct map *map_groups__new_module(struct map_groups *self, u64 start,
1478 const char *filename)
1479{
1480 struct map *map;
1481 struct dso *dso = __dsos__findnew(&dsos__kernel, filename);
1482
1483 if (dso == NULL)
1484 return NULL;
1485
1486 map = map__new2(start, dso, MAP__FUNCTION);
1487 if (map == NULL)
1488 return NULL;
1489
1490 dso->origin = DSO__ORIG_KMODULE;
1491 map_groups__insert(self, map);
1492 return map;
1493}
1494
1495static int map_groups__create_modules(struct map_groups *self)
1407{ 1496{
1408 char *line = NULL; 1497 char *line = NULL;
1409 size_t n; 1498 size_t n;
@@ -1416,7 +1505,6 @@ static int perf_session__create_module_maps(struct perf_session *self)
1416 while (!feof(file)) { 1505 while (!feof(file)) {
1417 char name[PATH_MAX]; 1506 char name[PATH_MAX];
1418 u64 start; 1507 u64 start;
1419 struct dso *dso;
1420 char *sep; 1508 char *sep;
1421 int line_len; 1509 int line_len;
1422 1510
@@ -1442,32 +1530,16 @@ static int perf_session__create_module_maps(struct perf_session *self)
1442 *sep = '\0'; 1530 *sep = '\0';
1443 1531
1444 snprintf(name, sizeof(name), "[%s]", line); 1532 snprintf(name, sizeof(name), "[%s]", line);
1445 dso = dso__new(name); 1533 map = map_groups__new_module(self, start, name);
1446 1534 if (map == NULL)
1447 if (dso == NULL)
1448 goto out_delete_line;
1449
1450 map = map__new2(start, dso, MAP__FUNCTION);
1451 if (map == NULL) {
1452 dso__delete(dso);
1453 goto out_delete_line; 1535 goto out_delete_line;
1454 } 1536 dso__kernel_module_get_build_id(map->dso);
1455
1456 snprintf(name, sizeof(name),
1457 "/sys/module/%s/notes/.note.gnu.build-id", line);
1458 if (sysfs__read_build_id(name, dso->build_id,
1459 sizeof(dso->build_id)) == 0)
1460 dso->has_build_id = true;
1461
1462 dso->origin = DSO__ORIG_KMODULE;
1463 map_groups__insert(&self->kmaps, map);
1464 dsos__add(&dsos__kernel, dso);
1465 } 1537 }
1466 1538
1467 free(line); 1539 free(line);
1468 fclose(file); 1540 fclose(file);
1469 1541
1470 return perf_session__set_modules_path(self); 1542 return map_groups__set_modules_path(self);
1471 1543
1472out_delete_line: 1544out_delete_line:
1473 free(line); 1545 free(line);
@@ -1476,7 +1548,6 @@ out_failure:
1476} 1548}
1477 1549
1478static int dso__load_vmlinux(struct dso *self, struct map *map, 1550static int dso__load_vmlinux(struct dso *self, struct map *map,
1479 struct perf_session *session,
1480 const char *vmlinux, symbol_filter_t filter) 1551 const char *vmlinux, symbol_filter_t filter)
1481{ 1552{
1482 int err = -1, fd; 1553 int err = -1, fd;
@@ -1510,51 +1581,124 @@ static int dso__load_vmlinux(struct dso *self, struct map *map,
1510 return -1; 1581 return -1;
1511 1582
1512 dso__set_loaded(self, map->type); 1583 dso__set_loaded(self, map->type);
1513 err = dso__load_sym(self, map, session, self->long_name, fd, filter, 1, 0); 1584 err = dso__load_sym(self, map, vmlinux, fd, filter, 0);
1514 close(fd); 1585 close(fd);
1515 1586
1587 if (err > 0)
1588 pr_debug("Using %s for symbols\n", vmlinux);
1589
1590 return err;
1591}
1592
1593int dso__load_vmlinux_path(struct dso *self, struct map *map,
1594 symbol_filter_t filter)
1595{
1596 int i, err = 0;
1597
1598 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1599 vmlinux_path__nr_entries);
1600
1601 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1602 err = dso__load_vmlinux(self, map, vmlinux_path[i], filter);
1603 if (err > 0) {
1604 dso__set_long_name(self, strdup(vmlinux_path[i]));
1605 break;
1606 }
1607 }
1608
1516 return err; 1609 return err;
1517} 1610}
1518 1611
1519static int dso__load_kernel_sym(struct dso *self, struct map *map, 1612static int dso__load_kernel_sym(struct dso *self, struct map *map,
1520 struct perf_session *session, symbol_filter_t filter) 1613 symbol_filter_t filter)
1521{ 1614{
1522 int err; 1615 int err;
1523 bool is_kallsyms; 1616 const char *kallsyms_filename = NULL;
1617 char *kallsyms_allocated_filename = NULL;
1618 /*
1619 * Step 1: if the user specified a vmlinux filename, use it and only
1620 * it, reporting errors to the user if it cannot be used.
1621 *
1622 * For instance, try to analyse an ARM perf.data file _without_ a
1623 * build-id, or if the user specifies the wrong path to the right
1624 * vmlinux file, obviously we can't fallback to another vmlinux (a
1625 * x86_86 one, on the machine where analysis is being performed, say),
1626 * or worse, /proc/kallsyms.
1627 *
1628 * If the specified file _has_ a build-id and there is a build-id
1629 * section in the perf.data file, we will still do the expected
1630 * validation in dso__load_vmlinux and will bail out if they don't
1631 * match.
1632 */
1633 if (symbol_conf.vmlinux_name != NULL) {
1634 err = dso__load_vmlinux(self, map,
1635 symbol_conf.vmlinux_name, filter);
1636 goto out_try_fixup;
1637 }
1524 1638
1525 if (vmlinux_path != NULL) { 1639 if (vmlinux_path != NULL) {
1526 int i; 1640 err = dso__load_vmlinux_path(self, map, filter);
1527 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1641 if (err > 0)
1528 vmlinux_path__nr_entries); 1642 goto out_fixup;
1529 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1643 }
1530 err = dso__load_vmlinux(self, map, session, 1644
1531 vmlinux_path[i], filter); 1645 /*
1532 if (err > 0) { 1646 * Say the kernel DSO was created when processing the build-id header table,
1533 pr_debug("Using %s for symbols\n", 1647 * we have a build-id, so check if it is the same as the running kernel,
1534 vmlinux_path[i]); 1648 * using it if it is.
1535 dso__set_long_name(self, 1649 */
1536 strdup(vmlinux_path[i])); 1650 if (self->has_build_id) {
1537 goto out_fixup; 1651 u8 kallsyms_build_id[BUILD_ID_SIZE];
1652 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1653
1654 if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
1655 sizeof(kallsyms_build_id)) == 0) {
1656 if (dso__build_id_equal(self, kallsyms_build_id)) {
1657 kallsyms_filename = "/proc/kallsyms";
1658 goto do_kallsyms;
1538 } 1659 }
1539 } 1660 }
1540 } 1661 /*
1662 * Now look if we have it on the build-id cache in
1663 * $HOME/.debug/[kernel.kallsyms].
1664 */
1665 build_id__sprintf(self->build_id, sizeof(self->build_id),
1666 sbuild_id);
1541 1667
1542 is_kallsyms = self->long_name[0] == '['; 1668 if (asprintf(&kallsyms_allocated_filename,
1543 if (is_kallsyms) 1669 "%s/.debug/[kernel.kallsyms]/%s",
1544 goto do_kallsyms; 1670 getenv("HOME"), sbuild_id) == -1) {
1671 pr_err("Not enough memory for kallsyms file lookup\n");
1672 return -1;
1673 }
1545 1674
1546 err = dso__load_vmlinux(self, map, session, self->long_name, filter); 1675 kallsyms_filename = kallsyms_allocated_filename;
1547 if (err <= 0) { 1676
1548 pr_info("The file %s cannot be used, " 1677 if (access(kallsyms_filename, F_OK)) {
1549 "trying to use /proc/kallsyms...", self->long_name); 1678 pr_err("No kallsyms or vmlinux with build-id %s "
1550do_kallsyms: 1679 "was found\n", sbuild_id);
1551 err = dso__load_kallsyms(self, map, session, filter); 1680 free(kallsyms_allocated_filename);
1552 if (err > 0 && !is_kallsyms) 1681 return -1;
1553 dso__set_long_name(self, strdup("[kernel.kallsyms]")); 1682 }
1683 } else {
1684 /*
1685 * Last resort, if we don't have a build-id and couldn't find
1686 * any vmlinux file, try the running kernel kallsyms table.
1687 */
1688 kallsyms_filename = "/proc/kallsyms";
1554 } 1689 }
1555 1690
1691do_kallsyms:
1692 err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
1693 if (err > 0)
1694 pr_debug("Using %s for symbols\n", kallsyms_filename);
1695 free(kallsyms_allocated_filename);
1696
1697out_try_fixup:
1556 if (err > 0) { 1698 if (err > 0) {
1557out_fixup: 1699out_fixup:
1700 if (kallsyms_filename != NULL)
1701 dso__set_long_name(self, strdup("[kernel.kallsyms]"));
1558 map__fixup_start(map); 1702 map__fixup_start(map);
1559 map__fixup_end(map); 1703 map__fixup_end(map);
1560 } 1704 }
@@ -1564,7 +1708,6 @@ out_fixup:
1564 1708
1565LIST_HEAD(dsos__user); 1709LIST_HEAD(dsos__user);
1566LIST_HEAD(dsos__kernel); 1710LIST_HEAD(dsos__kernel);
1567struct dso *vdso;
1568 1711
1569static void dsos__add(struct list_head *head, struct dso *dso) 1712static void dsos__add(struct list_head *head, struct dso *dso)
1570{ 1713{
@@ -1576,19 +1719,19 @@ static struct dso *dsos__find(struct list_head *head, const char *name)
1576 struct dso *pos; 1719 struct dso *pos;
1577 1720
1578 list_for_each_entry(pos, head, node) 1721 list_for_each_entry(pos, head, node)
1579 if (strcmp(pos->name, name) == 0) 1722 if (strcmp(pos->long_name, name) == 0)
1580 return pos; 1723 return pos;
1581 return NULL; 1724 return NULL;
1582} 1725}
1583 1726
1584struct dso *dsos__findnew(const char *name) 1727struct dso *__dsos__findnew(struct list_head *head, const char *name)
1585{ 1728{
1586 struct dso *dso = dsos__find(&dsos__user, name); 1729 struct dso *dso = dsos__find(head, name);
1587 1730
1588 if (!dso) { 1731 if (!dso) {
1589 dso = dso__new(name); 1732 dso = dso__new(name);
1590 if (dso != NULL) { 1733 if (dso != NULL) {
1591 dsos__add(&dsos__user, dso); 1734 dsos__add(head, dso);
1592 dso__set_basename(dso); 1735 dso__set_basename(dso);
1593 } 1736 }
1594 } 1737 }
@@ -1613,75 +1756,78 @@ void dsos__fprintf(FILE *fp)
1613 __dsos__fprintf(&dsos__user, fp); 1756 __dsos__fprintf(&dsos__user, fp);
1614} 1757}
1615 1758
1616static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp) 1759static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1760 bool with_hits)
1617{ 1761{
1618 struct dso *pos; 1762 struct dso *pos;
1619 size_t ret = 0; 1763 size_t ret = 0;
1620 1764
1621 list_for_each_entry(pos, head, node) { 1765 list_for_each_entry(pos, head, node) {
1766 if (with_hits && !pos->hit)
1767 continue;
1622 ret += dso__fprintf_buildid(pos, fp); 1768 ret += dso__fprintf_buildid(pos, fp);
1623 ret += fprintf(fp, " %s\n", pos->long_name); 1769 ret += fprintf(fp, " %s\n", pos->long_name);
1624 } 1770 }
1625 return ret; 1771 return ret;
1626} 1772}
1627 1773
1628size_t dsos__fprintf_buildid(FILE *fp) 1774size_t dsos__fprintf_buildid(FILE *fp, bool with_hits)
1629{ 1775{
1630 return (__dsos__fprintf_buildid(&dsos__kernel, fp) + 1776 return (__dsos__fprintf_buildid(&dsos__kernel, fp, with_hits) +
1631 __dsos__fprintf_buildid(&dsos__user, fp)); 1777 __dsos__fprintf_buildid(&dsos__user, fp, with_hits));
1632} 1778}
1633 1779
1634static struct dso *dsos__create_kernel( const char *vmlinux) 1780struct dso *dso__new_kernel(const char *name)
1635{ 1781{
1636 struct dso *kernel = dso__new(vmlinux ?: "[kernel.kallsyms]"); 1782 struct dso *self = dso__new(name ?: "[kernel.kallsyms]");
1637 1783
1638 if (kernel == NULL) 1784 if (self != NULL) {
1639 return NULL; 1785 self->short_name = "[kernel]";
1786 self->kernel = 1;
1787 }
1640 1788
1641 kernel->short_name = "[kernel]"; 1789 return self;
1642 kernel->kernel = 1; 1790}
1643 1791
1644 vdso = dso__new("[vdso]"); 1792void dso__read_running_kernel_build_id(struct dso *self)
1645 if (vdso == NULL) 1793{
1646 goto out_delete_kernel_dso; 1794 if (sysfs__read_build_id("/sys/kernel/notes", self->build_id,
1647 dso__set_loaded(vdso, MAP__FUNCTION); 1795 sizeof(self->build_id)) == 0)
1796 self->has_build_id = true;
1797}
1648 1798
1649 if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id, 1799static struct dso *dsos__create_kernel(const char *vmlinux)
1650 sizeof(kernel->build_id)) == 0) 1800{
1651 kernel->has_build_id = true; 1801 struct dso *kernel = dso__new_kernel(vmlinux);
1652 1802
1653 dsos__add(&dsos__kernel, kernel); 1803 if (kernel != NULL) {
1654 dsos__add(&dsos__user, vdso); 1804 dso__read_running_kernel_build_id(kernel);
1805 dsos__add(&dsos__kernel, kernel);
1806 }
1655 1807
1656 return kernel; 1808 return kernel;
1657
1658out_delete_kernel_dso:
1659 dso__delete(kernel);
1660 return NULL;
1661} 1809}
1662 1810
1663static int map_groups__create_kernel_maps(struct map_groups *self, const char *vmlinux) 1811int __map_groups__create_kernel_maps(struct map_groups *self,
1812 struct map *vmlinux_maps[MAP__NR_TYPES],
1813 struct dso *kernel)
1664{ 1814{
1665 struct map *functions, *variables; 1815 enum map_type type;
1666 struct dso *kernel = dsos__create_kernel(vmlinux);
1667 1816
1668 if (kernel == NULL) 1817 for (type = 0; type < MAP__NR_TYPES; ++type) {
1669 return -1; 1818 struct kmap *kmap;
1670 1819
1671 functions = map__new2(0, kernel, MAP__FUNCTION); 1820 vmlinux_maps[type] = map__new2(0, kernel, type);
1672 if (functions == NULL) 1821 if (vmlinux_maps[type] == NULL)
1673 return -1; 1822 return -1;
1674 1823
1675 variables = map__new2(0, kernel, MAP__VARIABLE); 1824 vmlinux_maps[type]->map_ip =
1676 if (variables == NULL) { 1825 vmlinux_maps[type]->unmap_ip = identity__map_ip;
1677 map__delete(functions);
1678 return -1;
1679 }
1680 1826
1681 functions->map_ip = functions->unmap_ip = 1827 kmap = map__kmap(vmlinux_maps[type]);
1682 variables->map_ip = variables->unmap_ip = identity__map_ip; 1828 kmap->kmaps = self;
1683 map_groups__insert(self, functions); 1829 map_groups__insert(self, vmlinux_maps[type]);
1684 map_groups__insert(self, variables); 1830 }
1685 1831
1686 return 0; 1832 return 0;
1687} 1833}
@@ -1791,19 +1937,22 @@ out_free_comm_list:
1791 return -1; 1937 return -1;
1792} 1938}
1793 1939
1794int perf_session__create_kernel_maps(struct perf_session *self) 1940int map_groups__create_kernel_maps(struct map_groups *self,
1941 struct map *vmlinux_maps[MAP__NR_TYPES])
1795{ 1942{
1796 if (map_groups__create_kernel_maps(&self->kmaps, 1943 struct dso *kernel = dsos__create_kernel(symbol_conf.vmlinux_name);
1797 symbol_conf.vmlinux_name) < 0) 1944
1945 if (kernel == NULL)
1946 return -1;
1947
1948 if (__map_groups__create_kernel_maps(self, vmlinux_maps, kernel) < 0)
1798 return -1; 1949 return -1;
1799 1950
1800 if (symbol_conf.use_modules && 1951 if (symbol_conf.use_modules && map_groups__create_modules(self) < 0)
1801 perf_session__create_module_maps(self) < 0) 1952 pr_debug("Problems creating module maps, continuing anyway...\n");
1802 pr_debug("Failed to load list of modules for session %s, "
1803 "continuing...\n", self->filename);
1804 /* 1953 /*
1805 * Now that we have all the maps created, just set the ->end of them: 1954 * Now that we have all the maps created, just set the ->end of them:
1806 */ 1955 */
1807 map_groups__fixup_end(&self->kmaps); 1956 map_groups__fixup_end(self);
1808 return 0; 1957 return 0;
1809} 1958}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 8aded2356f79..280dadd32a08 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -8,6 +8,8 @@
8#include <linux/rbtree.h> 8#include <linux/rbtree.h>
9#include "event.h" 9#include "event.h"
10 10
11#define DEBUG_CACHE_DIR ".debug"
12
11#ifdef HAVE_CPLUS_DEMANGLE 13#ifdef HAVE_CPLUS_DEMANGLE
12extern char *cplus_demangle(const char *, int); 14extern char *cplus_demangle(const char *, int);
13 15
@@ -49,6 +51,8 @@ struct symbol {
49 char name[0]; 51 char name[0];
50}; 52};
51 53
54void symbol__delete(struct symbol *self);
55
52struct strlist; 56struct strlist;
53 57
54struct symbol_conf { 58struct symbol_conf {
@@ -58,7 +62,8 @@ struct symbol_conf {
58 sort_by_name, 62 sort_by_name,
59 show_nr_samples, 63 show_nr_samples,
60 use_callchain, 64 use_callchain,
61 exclude_other; 65 exclude_other,
66 full_paths;
62 const char *vmlinux_name, 67 const char *vmlinux_name,
63 *field_sep; 68 *field_sep;
64 char *dso_list_str, 69 char *dso_list_str,
@@ -77,6 +82,12 @@ static inline void *symbol__priv(struct symbol *self)
77 return ((void *)self) - symbol_conf.priv_size; 82 return ((void *)self) - symbol_conf.priv_size;
78} 83}
79 84
85struct ref_reloc_sym {
86 const char *name;
87 u64 addr;
88 u64 unrelocated_addr;
89};
90
80struct addr_location { 91struct addr_location {
81 struct thread *thread; 92 struct thread *thread;
82 struct map *map; 93 struct map *map;
@@ -94,6 +105,7 @@ struct dso {
94 u8 slen_calculated:1; 105 u8 slen_calculated:1;
95 u8 has_build_id:1; 106 u8 has_build_id:1;
96 u8 kernel:1; 107 u8 kernel:1;
108 u8 hit:1;
97 unsigned char origin; 109 unsigned char origin;
98 u8 sorted_by_name; 110 u8 sorted_by_name;
99 u8 loaded; 111 u8 loaded;
@@ -105,37 +117,55 @@ struct dso {
105}; 117};
106 118
107struct dso *dso__new(const char *name); 119struct dso *dso__new(const char *name);
120struct dso *dso__new_kernel(const char *name);
108void dso__delete(struct dso *self); 121void dso__delete(struct dso *self);
109 122
110bool dso__loaded(const struct dso *self, enum map_type type); 123bool dso__loaded(const struct dso *self, enum map_type type);
111bool dso__sorted_by_name(const struct dso *self, enum map_type type); 124bool dso__sorted_by_name(const struct dso *self, enum map_type type);
112 125
126static inline void dso__set_loaded(struct dso *self, enum map_type type)
127{
128 self->loaded |= (1 << type);
129}
130
113void dso__sort_by_name(struct dso *self, enum map_type type); 131void dso__sort_by_name(struct dso *self, enum map_type type);
114 132
115struct perf_session; 133extern struct list_head dsos__user, dsos__kernel;
134
135struct dso *__dsos__findnew(struct list_head *head, const char *name);
136
137static inline struct dso *dsos__findnew(const char *name)
138{
139 return __dsos__findnew(&dsos__user, name);
140}
116 141
117struct dso *dsos__findnew(const char *name); 142int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
118int dso__load(struct dso *self, struct map *map, struct perf_session *session, 143int dso__load_vmlinux_path(struct dso *self, struct map *map,
119 symbol_filter_t filter); 144 symbol_filter_t filter);
145int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map,
146 symbol_filter_t filter);
120void dsos__fprintf(FILE *fp); 147void dsos__fprintf(FILE *fp);
121size_t dsos__fprintf_buildid(FILE *fp); 148size_t dsos__fprintf_buildid(FILE *fp, bool with_hits);
122 149
123size_t dso__fprintf_buildid(struct dso *self, FILE *fp); 150size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
124size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); 151size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
125char dso__symtab_origin(const struct dso *self); 152char dso__symtab_origin(const struct dso *self);
153void dso__set_long_name(struct dso *self, char *name);
126void dso__set_build_id(struct dso *self, void *build_id); 154void dso__set_build_id(struct dso *self, void *build_id);
155void dso__read_running_kernel_build_id(struct dso *self);
127struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); 156struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
128struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, 157struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
129 const char *name); 158 const char *name);
130 159
131int filename__read_build_id(const char *filename, void *bf, size_t size); 160int filename__read_build_id(const char *filename, void *bf, size_t size);
132int sysfs__read_build_id(const char *filename, void *bf, size_t size); 161int sysfs__read_build_id(const char *filename, void *bf, size_t size);
133bool dsos__read_build_ids(void); 162bool dsos__read_build_ids(bool with_hits);
134int build_id__sprintf(u8 *self, int len, char *bf); 163int build_id__sprintf(const u8 *self, int len, char *bf);
164int kallsyms__parse(const char *filename, void *arg,
165 int (*process_symbol)(void *arg, const char *name,
166 char type, u64 start));
135 167
136int symbol__init(void); 168int symbol__init(void);
137int perf_session__create_kernel_maps(struct perf_session *self); 169bool symbol_type__is_a(char symbol_type, enum map_type map_type);
138 170
139extern struct list_head dsos__user, dsos__kernel;
140extern struct dso *vdso;
141#endif /* __PERF_SYMBOL */ 171#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 4a08dcf50b68..21b92162282b 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -31,12 +31,41 @@ static struct thread *thread__new(pid_t pid)
31 return self; 31 return self;
32} 32}
33 33
34static void map_groups__flush(struct map_groups *self)
35{
36 int type;
37
38 for (type = 0; type < MAP__NR_TYPES; type++) {
39 struct rb_root *root = &self->maps[type];
40 struct rb_node *next = rb_first(root);
41
42 while (next) {
43 struct map *pos = rb_entry(next, struct map, rb_node);
44 next = rb_next(&pos->rb_node);
45 rb_erase(&pos->rb_node, root);
46 /*
47 * We may have references to this map, for
48 * instance in some hist_entry instances, so
49 * just move them to a separate list.
50 */
51 list_add_tail(&pos->node, &self->removed_maps[pos->type]);
52 }
53 }
54}
55
34int thread__set_comm(struct thread *self, const char *comm) 56int thread__set_comm(struct thread *self, const char *comm)
35{ 57{
58 int err;
59
36 if (self->comm) 60 if (self->comm)
37 free(self->comm); 61 free(self->comm);
38 self->comm = strdup(comm); 62 self->comm = strdup(comm);
39 return self->comm ? 0 : -ENOMEM; 63 err = self->comm == NULL ? -ENOMEM : 0;
64 if (!err) {
65 self->comm_set = true;
66 map_groups__flush(&self->mg);
67 }
68 return err;
40} 69}
41 70
42int thread__comm_len(struct thread *self) 71int thread__comm_len(struct thread *self)
@@ -50,11 +79,6 @@ int thread__comm_len(struct thread *self)
50 return self->comm_len; 79 return self->comm_len;
51} 80}
52 81
53static const char *map_type__name[MAP__NR_TYPES] = {
54 [MAP__FUNCTION] = "Functions",
55 [MAP__VARIABLE] = "Variables",
56};
57
58static size_t __map_groups__fprintf_maps(struct map_groups *self, 82static size_t __map_groups__fprintf_maps(struct map_groups *self,
59 enum map_type type, FILE *fp) 83 enum map_type type, FILE *fp)
60{ 84{
@@ -255,11 +279,14 @@ int thread__fork(struct thread *self, struct thread *parent)
255{ 279{
256 int i; 280 int i;
257 281
258 if (self->comm) 282 if (parent->comm_set) {
259 free(self->comm); 283 if (self->comm)
260 self->comm = strdup(parent->comm); 284 free(self->comm);
261 if (!self->comm) 285 self->comm = strdup(parent->comm);
262 return -ENOMEM; 286 if (!self->comm)
287 return -ENOMEM;
288 self->comm_set = true;
289 }
263 290
264 for (i = 0; i < MAP__NR_TYPES; ++i) 291 for (i = 0; i < MAP__NR_TYPES; ++i)
265 if (map_groups__clone(&self->mg, &parent->mg, i) < 0) 292 if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
@@ -282,14 +309,13 @@ size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
282} 309}
283 310
284struct symbol *map_groups__find_symbol(struct map_groups *self, 311struct symbol *map_groups__find_symbol(struct map_groups *self,
285 struct perf_session *session,
286 enum map_type type, u64 addr, 312 enum map_type type, u64 addr,
287 symbol_filter_t filter) 313 symbol_filter_t filter)
288{ 314{
289 struct map *map = map_groups__find(self, type, addr); 315 struct map *map = map_groups__find(self, type, addr);
290 316
291 if (map != NULL) 317 if (map != NULL)
292 return map__find_symbol(map, session, map->map_ip(map, addr), filter); 318 return map__find_symbol(map, map->map_ip(map, addr), filter);
293 319
294 return NULL; 320 return NULL;
295} 321}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index c206f72c8881..0a28f39de545 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -15,6 +15,7 @@ struct thread {
15 struct map_groups mg; 15 struct map_groups mg;
16 pid_t pid; 16 pid_t pid;
17 char shortname[3]; 17 char shortname[3];
18 bool comm_set;
18 char *comm; 19 char *comm;
19 int comm_len; 20 int comm_len;
20}; 21};
@@ -48,23 +49,36 @@ static inline struct map *thread__find_map(struct thread *self,
48 return self ? map_groups__find(&self->mg, type, addr) : NULL; 49 return self ? map_groups__find(&self->mg, type, addr) : NULL;
49} 50}
50 51
52void thread__find_addr_map(struct thread *self,
53 struct perf_session *session, u8 cpumode,
54 enum map_type type, u64 addr,
55 struct addr_location *al);
56
51void thread__find_addr_location(struct thread *self, 57void thread__find_addr_location(struct thread *self,
52 struct perf_session *session, u8 cpumode, 58 struct perf_session *session, u8 cpumode,
53 enum map_type type, u64 addr, 59 enum map_type type, u64 addr,
54 struct addr_location *al, 60 struct addr_location *al,
55 symbol_filter_t filter); 61 symbol_filter_t filter);
56struct symbol *map_groups__find_symbol(struct map_groups *self, 62struct symbol *map_groups__find_symbol(struct map_groups *self,
57 struct perf_session *session,
58 enum map_type type, u64 addr, 63 enum map_type type, u64 addr,
59 symbol_filter_t filter); 64 symbol_filter_t filter);
60 65
61static inline struct symbol * 66static inline struct symbol *map_groups__find_function(struct map_groups *self,
62map_groups__find_function(struct map_groups *self, struct perf_session *session, 67 u64 addr,
63 u64 addr, symbol_filter_t filter) 68 symbol_filter_t filter)
64{ 69{
65 return map_groups__find_symbol(self, session, MAP__FUNCTION, addr, filter); 70 return map_groups__find_symbol(self, MAP__FUNCTION, addr, filter);
66} 71}
67 72
68struct map *map_groups__find_by_name(struct map_groups *self, 73struct map *map_groups__find_by_name(struct map_groups *self,
69 enum map_type type, const char *name); 74 enum map_type type, const char *name);
75
76int __map_groups__create_kernel_maps(struct map_groups *self,
77 struct map *vmlinux_maps[MAP__NR_TYPES],
78 struct dso *kernel);
79int map_groups__create_kernel_maps(struct map_groups *self,
80 struct map *vmlinux_maps[MAP__NR_TYPES]);
81
82struct map *map_groups__new_module(struct map_groups *self, u64 start,
83 const char *filename);
70#endif /* __PERF_THREAD_H */ 84#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index cace35595530..5ea8973ad331 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -20,6 +20,7 @@
20 */ 20 */
21#define _GNU_SOURCE 21#define _GNU_SOURCE
22#include <dirent.h> 22#include <dirent.h>
23#include <mntent.h>
23#include <stdio.h> 24#include <stdio.h>
24#include <stdlib.h> 25#include <stdlib.h>
25#include <string.h> 26#include <string.h>
@@ -37,6 +38,7 @@
37 38
38#include "../perf.h" 39#include "../perf.h"
39#include "trace-event.h" 40#include "trace-event.h"
41#include "debugfs.h"
40 42
41#define VERSION "0.5" 43#define VERSION "0.5"
42 44
@@ -101,32 +103,12 @@ void *malloc_or_die(unsigned int size)
101 103
102static const char *find_debugfs(void) 104static const char *find_debugfs(void)
103{ 105{
104 static char debugfs[MAX_PATH+1]; 106 const char *path = debugfs_mount(NULL);
105 static int debugfs_found;
106 char type[100];
107 FILE *fp;
108
109 if (debugfs_found)
110 return debugfs;
111
112 if ((fp = fopen("/proc/mounts","r")) == NULL)
113 die("Can't open /proc/mounts for read");
114
115 while (fscanf(fp, "%*s %"
116 STR(MAX_PATH)
117 "s %99s %*s %*d %*d\n",
118 debugfs, type) == 2) {
119 if (strcmp(type, "debugfs") == 0)
120 break;
121 }
122 fclose(fp);
123
124 if (strcmp(type, "debugfs") != 0)
125 die("debugfs not mounted, please mount");
126 107
127 debugfs_found = 1; 108 if (!path)
109 die("Your kernel not support debugfs filesystem");
128 110
129 return debugfs; 111 return path;
130} 112}
131 113
132/* 114/*
@@ -271,6 +253,8 @@ static void read_header_files(void)
271 write_or_die("header_page", 12); 253 write_or_die("header_page", 12);
272 write_or_die(&size, 8); 254 write_or_die(&size, 8);
273 check_size = copy_file_fd(fd); 255 check_size = copy_file_fd(fd);
256 close(fd);
257
274 if (size != check_size) 258 if (size != check_size)
275 die("wrong size for '%s' size=%lld read=%lld", 259 die("wrong size for '%s' size=%lld read=%lld",
276 path, size, check_size); 260 path, size, check_size);
@@ -289,6 +273,7 @@ static void read_header_files(void)
289 if (size != check_size) 273 if (size != check_size)
290 die("wrong size for '%s'", path); 274 die("wrong size for '%s'", path);
291 put_tracing_file(path); 275 put_tracing_file(path);
276 close(fd);
292} 277}
293 278
294static bool name_in_tp_list(char *sys, struct tracepoint_path *tps) 279static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
@@ -317,7 +302,8 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
317 die("can't read directory '%s'", sys); 302 die("can't read directory '%s'", sys);
318 303
319 while ((dent = readdir(dir))) { 304 while ((dent = readdir(dir))) {
320 if (strcmp(dent->d_name, ".") == 0 || 305 if (dent->d_type != DT_DIR ||
306 strcmp(dent->d_name, ".") == 0 ||
321 strcmp(dent->d_name, "..") == 0 || 307 strcmp(dent->d_name, "..") == 0 ||
322 !name_in_tp_list(dent->d_name, tps)) 308 !name_in_tp_list(dent->d_name, tps))
323 continue; 309 continue;
@@ -334,7 +320,8 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
334 320
335 rewinddir(dir); 321 rewinddir(dir);
336 while ((dent = readdir(dir))) { 322 while ((dent = readdir(dir))) {
337 if (strcmp(dent->d_name, ".") == 0 || 323 if (dent->d_type != DT_DIR ||
324 strcmp(dent->d_name, ".") == 0 ||
338 strcmp(dent->d_name, "..") == 0 || 325 strcmp(dent->d_name, "..") == 0 ||
339 !name_in_tp_list(dent->d_name, tps)) 326 !name_in_tp_list(dent->d_name, tps))
340 continue; 327 continue;
@@ -353,6 +340,7 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
353 340
354 free(format); 341 free(format);
355 } 342 }
343 closedir(dir);
356} 344}
357 345
358static void read_ftrace_files(struct tracepoint_path *tps) 346static void read_ftrace_files(struct tracepoint_path *tps)
@@ -394,26 +382,21 @@ static void read_event_files(struct tracepoint_path *tps)
394 die("can't read directory '%s'", path); 382 die("can't read directory '%s'", path);
395 383
396 while ((dent = readdir(dir))) { 384 while ((dent = readdir(dir))) {
397 if (strcmp(dent->d_name, ".") == 0 || 385 if (dent->d_type != DT_DIR ||
386 strcmp(dent->d_name, ".") == 0 ||
398 strcmp(dent->d_name, "..") == 0 || 387 strcmp(dent->d_name, "..") == 0 ||
399 strcmp(dent->d_name, "ftrace") == 0 || 388 strcmp(dent->d_name, "ftrace") == 0 ||
400 !system_in_tp_list(dent->d_name, tps)) 389 !system_in_tp_list(dent->d_name, tps))
401 continue; 390 continue;
402 sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2); 391 count++;
403 sprintf(sys, "%s/%s", path, dent->d_name);
404 ret = stat(sys, &st);
405 free(sys);
406 if (ret < 0)
407 continue;
408 if (S_ISDIR(st.st_mode))
409 count++;
410 } 392 }
411 393
412 write_or_die(&count, 4); 394 write_or_die(&count, 4);
413 395
414 rewinddir(dir); 396 rewinddir(dir);
415 while ((dent = readdir(dir))) { 397 while ((dent = readdir(dir))) {
416 if (strcmp(dent->d_name, ".") == 0 || 398 if (dent->d_type != DT_DIR ||
399 strcmp(dent->d_name, ".") == 0 ||
417 strcmp(dent->d_name, "..") == 0 || 400 strcmp(dent->d_name, "..") == 0 ||
418 strcmp(dent->d_name, "ftrace") == 0 || 401 strcmp(dent->d_name, "ftrace") == 0 ||
419 !system_in_tp_list(dent->d_name, tps)) 402 !system_in_tp_list(dent->d_name, tps))
@@ -422,14 +405,13 @@ static void read_event_files(struct tracepoint_path *tps)
422 sprintf(sys, "%s/%s", path, dent->d_name); 405 sprintf(sys, "%s/%s", path, dent->d_name);
423 ret = stat(sys, &st); 406 ret = stat(sys, &st);
424 if (ret >= 0) { 407 if (ret >= 0) {
425 if (S_ISDIR(st.st_mode)) { 408 write_or_die(dent->d_name, strlen(dent->d_name) + 1);
426 write_or_die(dent->d_name, strlen(dent->d_name) + 1); 409 copy_event_system(sys, tps);
427 copy_event_system(sys, tps);
428 }
429 } 410 }
430 free(sys); 411 free(sys);
431 } 412 }
432 413
414 closedir(dir);
433 put_tracing_file(path); 415 put_tracing_file(path);
434} 416}
435 417
@@ -533,7 +515,7 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
533 write_or_die(buf, 1); 515 write_or_die(buf, 1);
534 516
535 /* save page_size */ 517 /* save page_size */
536 page_size = getpagesize(); 518 page_size = sysconf(_SC_PAGESIZE);
537 write_or_die(&page_size, 4); 519 write_or_die(&page_size, 4);
538 520
539 read_header_files(); 521 read_header_files();
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index c5c32be040bf..9b3c20f42f98 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1925,6 +1925,15 @@ void *raw_field_ptr(struct event *event, const char *name, void *data)
1925 if (!field) 1925 if (!field)
1926 return NULL; 1926 return NULL;
1927 1927
1928 if (field->flags & FIELD_IS_STRING) {
1929 int offset;
1930
1931 offset = *(int *)(data + field->offset);
1932 offset &= 0xffff;
1933
1934 return data + offset;
1935 }
1936
1928 return data + field->offset; 1937 return data + field->offset;
1929} 1938}
1930 1939
@@ -3277,3 +3286,18 @@ void parse_set_info(int nr_cpus, int long_sz)
3277 cpus = nr_cpus; 3286 cpus = nr_cpus;
3278 long_size = long_sz; 3287 long_size = long_sz;
3279} 3288}
3289
3290int common_pc(struct scripting_context *context)
3291{
3292 return parse_common_pc(context->event_data);
3293}
3294
3295int common_flags(struct scripting_context *context)
3296{
3297 return parse_common_flags(context->event_data);
3298}
3299
3300int common_lock_depth(struct scripting_context *context)
3301{
3302 return parse_common_lock_depth(context->event_data);
3303}
diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h
deleted file mode 100644
index e88fb26137bb..000000000000
--- a/tools/perf/util/trace-event-perl.h
+++ /dev/null
@@ -1,55 +0,0 @@
1#ifndef __PERF_TRACE_EVENT_PERL_H
2#define __PERF_TRACE_EVENT_PERL_H
3#ifdef NO_LIBPERL
4typedef int INTERP;
5#define dSP
6#define ENTER
7#define SAVETMPS
8#define PUTBACK
9#define SPAGAIN
10#define FREETMPS
11#define LEAVE
12#define SP
13#define ERRSV
14#define G_SCALAR (0)
15#define G_DISCARD (0)
16#define G_NOARGS (0)
17#define PUSHMARK(a)
18#define SvTRUE(a) (0)
19#define XPUSHs(s)
20#define sv_2mortal(a)
21#define newSVpv(a,b)
22#define newSVuv(a)
23#define newSViv(a)
24#define get_cv(a,b) (0)
25#define call_pv(a,b) (0)
26#define perl_alloc() (0)
27#define perl_construct(a) (0)
28#define perl_parse(a,b,c,d,e) (0)
29#define perl_run(a) (0)
30#define perl_destruct(a) (0)
31#define perl_free(a) (0)
32#define pTHX void
33#define CV void
34#define dXSUB_SYS
35#define pTHX_
36static inline void newXS(const char *a, void *b, const char *c) {}
37static void boot_Perf__Trace__Context(pTHX_ CV *cv) {}
38static void boot_DynaLoader(pTHX_ CV *cv) {}
39#else
40#include <EXTERN.h>
41#include <perl.h>
42void boot_Perf__Trace__Context(pTHX_ CV *cv);
43void boot_DynaLoader(pTHX_ CV *cv);
44typedef PerlInterpreter * INTERP;
45#endif
46
47struct scripting_context {
48 void *event_data;
49};
50
51int common_pc(struct scripting_context *context);
52int common_flags(struct scripting_context *context);
53int common_lock_depth(struct scripting_context *context);
54
55#endif /* __PERF_TRACE_EVENT_PERL_H */
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 1744422cafcb..7cd1193918c7 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */ 20 */
21#define _LARGEFILE64_SOURCE 21#define _FILE_OFFSET_BITS 64
22 22
23#include <dirent.h> 23#include <dirent.h>
24#include <stdio.h> 24#include <stdio.h>
@@ -83,7 +83,7 @@ static char *read_string(void)
83 char *str = NULL; 83 char *str = NULL;
84 int size = 0; 84 int size = 0;
85 int i; 85 int i;
86 int r; 86 off_t r;
87 87
88 for (;;) { 88 for (;;) {
89 r = read(input_fd, buf, BUFSIZ); 89 r = read(input_fd, buf, BUFSIZ);
@@ -118,7 +118,7 @@ static char *read_string(void)
118 118
119 /* move the file descriptor to the end of the string */ 119 /* move the file descriptor to the end of the string */
120 r = lseek(input_fd, -(r - i), SEEK_CUR); 120 r = lseek(input_fd, -(r - i), SEEK_CUR);
121 if (r < 0) 121 if (r == (off_t)-1)
122 die("lseek"); 122 die("lseek");
123 123
124 if (str) { 124 if (str) {
@@ -282,8 +282,8 @@ static void update_cpu_data_index(int cpu)
282 282
283static void get_next_page(int cpu) 283static void get_next_page(int cpu)
284{ 284{
285 off64_t save_seek; 285 off_t save_seek;
286 off64_t ret; 286 off_t ret;
287 287
288 if (!cpu_data[cpu].page) 288 if (!cpu_data[cpu].page)
289 return; 289 return;
@@ -298,17 +298,17 @@ static void get_next_page(int cpu)
298 update_cpu_data_index(cpu); 298 update_cpu_data_index(cpu);
299 299
300 /* other parts of the code may expect the pointer to not move */ 300 /* other parts of the code may expect the pointer to not move */
301 save_seek = lseek64(input_fd, 0, SEEK_CUR); 301 save_seek = lseek(input_fd, 0, SEEK_CUR);
302 302
303 ret = lseek64(input_fd, cpu_data[cpu].offset, SEEK_SET); 303 ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET);
304 if (ret < 0) 304 if (ret == (off_t)-1)
305 die("failed to lseek"); 305 die("failed to lseek");
306 ret = read(input_fd, cpu_data[cpu].page, page_size); 306 ret = read(input_fd, cpu_data[cpu].page, page_size);
307 if (ret < 0) 307 if (ret < 0)
308 die("failed to read page"); 308 die("failed to read page");
309 309
310 /* reset the file pointer back */ 310 /* reset the file pointer back */
311 lseek64(input_fd, save_seek, SEEK_SET); 311 lseek(input_fd, save_seek, SEEK_SET);
312 312
313 return; 313 return;
314 } 314 }
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
new file mode 100644
index 000000000000..7ea983acfaea
--- /dev/null
+++ b/tools/perf/util/trace-event-scripting.c
@@ -0,0 +1,167 @@
1/*
2 * trace-event-scripting. Scripting engine common and initialization code.
3 *
4 * Copyright (C) 2009-2010 Tom Zanussi <tzanussi@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <ctype.h>
26#include <errno.h>
27
28#include "../perf.h"
29#include "util.h"
30#include "trace-event.h"
31
32struct scripting_context *scripting_context;
33
34static int stop_script_unsupported(void)
35{
36 return 0;
37}
38
39static void process_event_unsupported(int cpu __unused,
40 void *data __unused,
41 int size __unused,
42 unsigned long long nsecs __unused,
43 char *comm __unused)
44{
45}
46
47static void print_python_unsupported_msg(void)
48{
49 fprintf(stderr, "Python scripting not supported."
50 " Install libpython and rebuild perf to enable it.\n"
51 "For example:\n # apt-get install python-dev (ubuntu)"
52 "\n # yum install python-devel (Fedora)"
53 "\n etc.\n");
54}
55
56static int python_start_script_unsupported(const char *script __unused,
57 int argc __unused,
58 const char **argv __unused)
59{
60 print_python_unsupported_msg();
61
62 return -1;
63}
64
65static int python_generate_script_unsupported(const char *outfile __unused)
66{
67 print_python_unsupported_msg();
68
69 return -1;
70}
71
72struct scripting_ops python_scripting_unsupported_ops = {
73 .name = "Python",
74 .start_script = python_start_script_unsupported,
75 .stop_script = stop_script_unsupported,
76 .process_event = process_event_unsupported,
77 .generate_script = python_generate_script_unsupported,
78};
79
80static void register_python_scripting(struct scripting_ops *scripting_ops)
81{
82 int err;
83 err = script_spec_register("Python", scripting_ops);
84 if (err)
85 die("error registering Python script extension");
86
87 err = script_spec_register("py", scripting_ops);
88 if (err)
89 die("error registering py script extension");
90
91 scripting_context = malloc(sizeof(struct scripting_context));
92}
93
94#ifdef NO_LIBPYTHON
95void setup_python_scripting(void)
96{
97 register_python_scripting(&python_scripting_unsupported_ops);
98}
99#else
100struct scripting_ops python_scripting_ops;
101
102void setup_python_scripting(void)
103{
104 register_python_scripting(&python_scripting_ops);
105}
106#endif
107
108static void print_perl_unsupported_msg(void)
109{
110 fprintf(stderr, "Perl scripting not supported."
111 " Install libperl and rebuild perf to enable it.\n"
112 "For example:\n # apt-get install libperl-dev (ubuntu)"
113 "\n # yum install 'perl(ExtUtils::Embed)' (Fedora)"
114 "\n etc.\n");
115}
116
117static int perl_start_script_unsupported(const char *script __unused,
118 int argc __unused,
119 const char **argv __unused)
120{
121 print_perl_unsupported_msg();
122
123 return -1;
124}
125
126static int perl_generate_script_unsupported(const char *outfile __unused)
127{
128 print_perl_unsupported_msg();
129
130 return -1;
131}
132
133struct scripting_ops perl_scripting_unsupported_ops = {
134 .name = "Perl",
135 .start_script = perl_start_script_unsupported,
136 .stop_script = stop_script_unsupported,
137 .process_event = process_event_unsupported,
138 .generate_script = perl_generate_script_unsupported,
139};
140
141static void register_perl_scripting(struct scripting_ops *scripting_ops)
142{
143 int err;
144 err = script_spec_register("Perl", scripting_ops);
145 if (err)
146 die("error registering Perl script extension");
147
148 err = script_spec_register("pl", scripting_ops);
149 if (err)
150 die("error registering pl script extension");
151
152 scripting_context = malloc(sizeof(struct scripting_context));
153}
154
155#ifdef NO_LIBPERL
156void setup_perl_scripting(void)
157{
158 register_perl_scripting(&perl_scripting_unsupported_ops);
159}
160#else
161struct scripting_ops perl_scripting_ops;
162
163void setup_perl_scripting(void)
164{
165 register_perl_scripting(&perl_scripting_ops);
166}
167#endif
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 6ad405620c9b..c3269b937db4 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -279,7 +279,15 @@ struct scripting_ops {
279 279
280int script_spec_register(const char *spec, struct scripting_ops *ops); 280int script_spec_register(const char *spec, struct scripting_ops *ops);
281 281
282extern struct scripting_ops perl_scripting_ops;
283void setup_perl_scripting(void); 282void setup_perl_scripting(void);
283void setup_python_scripting(void);
284
285struct scripting_context {
286 void *event_data;
287};
288
289int common_pc(struct scripting_context *context);
290int common_flags(struct scripting_context *context);
291int common_lock_depth(struct scripting_context *context);
284 292
285#endif /* __PERF_TRACE_EVENTS_H */ 293#endif /* __PERF_TRACE_EVENTS_H */
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
new file mode 100644
index 000000000000..f9b890fde681
--- /dev/null
+++ b/tools/perf/util/util.c
@@ -0,0 +1,94 @@
1#include "util.h"
2#include <sys/mman.h>
3
4int mkdir_p(char *path, mode_t mode)
5{
6 struct stat st;
7 int err;
8 char *d = path;
9
10 if (*d != '/')
11 return -1;
12
13 if (stat(path, &st) == 0)
14 return 0;
15
16 while (*++d == '/');
17
18 while ((d = strchr(d, '/'))) {
19 *d = '\0';
20 err = stat(path, &st) && mkdir(path, mode);
21 *d++ = '/';
22 if (err)
23 return -1;
24 while (*d == '/')
25 ++d;
26 }
27 return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
28}
29
30static int slow_copyfile(const char *from, const char *to)
31{
32 int err = 0;
33 char *line = NULL;
34 size_t n;
35 FILE *from_fp = fopen(from, "r"), *to_fp;
36
37 if (from_fp == NULL)
38 goto out;
39
40 to_fp = fopen(to, "w");
41 if (to_fp == NULL)
42 goto out_fclose_from;
43
44 while (getline(&line, &n, from_fp) > 0)
45 if (fputs(line, to_fp) == EOF)
46 goto out_fclose_to;
47 err = 0;
48out_fclose_to:
49 fclose(to_fp);
50 free(line);
51out_fclose_from:
52 fclose(from_fp);
53out:
54 return err;
55}
56
57int copyfile(const char *from, const char *to)
58{
59 int fromfd, tofd;
60 struct stat st;
61 void *addr;
62 int err = -1;
63
64 if (stat(from, &st))
65 goto out;
66
67 if (st.st_size == 0) /* /proc? do it slowly... */
68 return slow_copyfile(from, to);
69
70 fromfd = open(from, O_RDONLY);
71 if (fromfd < 0)
72 goto out;
73
74 tofd = creat(to, 0755);
75 if (tofd < 0)
76 goto out_close_from;
77
78 addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0);
79 if (addr == MAP_FAILED)
80 goto out_close_to;
81
82 if (write(tofd, addr, st.st_size) == st.st_size)
83 err = 0;
84
85 munmap(addr, st.st_size);
86out_close_to:
87 close(tofd);
88 if (err)
89 unlink(to);
90out_close_from:
91 close(fromfd);
92out:
93 return err;
94}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index c673d8825883..0f5b2a6f1080 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -403,4 +403,7 @@ void git_qsort(void *base, size_t nmemb, size_t size,
403#endif 403#endif
404#endif 404#endif
405 405
406int mkdir_p(char *path, mode_t mode);
407int copyfile(const char *from, const char *to);
408
406#endif 409#endif
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index 1c15e39f99e3..cfa55d686e3b 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -169,6 +169,7 @@ static void perf_read_values__display_pretty(FILE *fp,
169 counterwidth[j], values->value[i][j]); 169 counterwidth[j], values->value[i][j]);
170 fprintf(fp, "\n"); 170 fprintf(fp, "\n");
171 } 171 }
172 free(counterwidth);
172} 173}
173 174
174static void perf_read_values__display_raw(FILE *fp, 175static void perf_read_values__display_raw(FILE *fp,