aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-events6
-rw-r--r--Documentation/RCU/stallwarn.txt16
-rw-r--r--Documentation/RCU/trace.txt32
-rw-r--r--Documentation/acpi/enumeration.txt2
-rw-r--r--Documentation/cgroups/00-INDEX2
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt8
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt9
-rw-r--r--Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt110
-rw-r--r--Documentation/devicetree/bindings/mfd/max77686.txt14
-rw-r--r--Documentation/devicetree/bindings/pci/versatile.txt59
-rw-r--r--Documentation/devicetree/bindings/regulator/da9211.txt7
-rw-r--r--Documentation/devicetree/bindings/regulator/isl9305.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6397-regulator.txt217
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt94
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt16
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sirf.txt41
-rw-r--r--Documentation/devicetree/bindings/spi/spi-st-ssc.txt40
-rw-r--r--Documentation/futex-requeue-pi.txt8
-rw-r--r--Documentation/hwmon/ina2xx23
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/locking/lockdep-design.txt2
-rw-r--r--Documentation/memory-barriers.txt46
-rw-r--r--Documentation/power/s2ram.txt4
-rw-r--r--Documentation/x86/entry_64.txt18
-rw-r--r--Documentation/x86/x86_64/kernel-stacks8
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts37
-rw-r--r--arch/arm/include/asm/mach/pci.h6
-rw-r--r--arch/arm/include/asm/pci.h7
-rw-r--r--arch/arm/include/asm/xen/page.h2
-rw-r--r--arch/arm/kernel/bios32.c8
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c92
-rw-r--r--arch/arm/mach-integrator/pci_v3.c62
-rw-r--r--arch/arm/mach-ks8695/pci.c77
-rw-r--r--arch/arm/mach-sa1100/pci-nanoengine.c94
-rw-r--r--arch/arm/xen/enlighten.c4
-rw-r--r--arch/arm/xen/mm.c2
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/arm64/kernel/efi-stub.c14
-rw-r--r--arch/arm64/kernel/pci.c22
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c4
-rw-r--r--arch/ia64/kernel/acpi-ext.c6
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/pci/pci.c14
-rw-r--r--arch/m68k/atari/atakeyb.c72
-rw-r--r--arch/m68k/atari/stdma.c2
-rw-r--r--arch/m68k/atari/time.c3
-rw-r--r--arch/m68k/configs/amiga_defconfig73
-rw-r--r--arch/m68k/configs/apollo_defconfig73
-rw-r--r--arch/m68k/configs/atari_defconfig78
-rw-r--r--arch/m68k/configs/bvme6000_defconfig73
-rw-r--r--arch/m68k/configs/hp300_defconfig73
-rw-r--r--arch/m68k/configs/mac_defconfig72
-rw-r--r--arch/m68k/configs/multi_defconfig78
-rw-r--r--arch/m68k/configs/mvme147_defconfig73
-rw-r--r--arch/m68k/configs/mvme16x_defconfig72
-rw-r--r--arch/m68k/configs/q40_defconfig73
-rw-r--r--arch/m68k/configs/sun3_defconfig72
-rw-r--r--arch/m68k/configs/sun3x_defconfig73
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/atariints.h5
-rw-r--r--arch/m68k/include/asm/futex.h94
-rw-r--r--arch/m68k/include/asm/macintosh.h2
-rw-r--r--arch/m68k/mac/config.c32
-rw-r--r--arch/m68k/mvme147/config.c46
-rw-r--r--arch/m68k/mvme16x/rtc.c2
-rw-r--r--arch/microblaze/boot/Makefile3
-rw-r--r--arch/microblaze/boot/dts/Makefile2
-rw-r--r--arch/microblaze/include/asm/delay.h4
-rw-r--r--arch/microblaze/include/asm/kgdb.h3
-rw-r--r--arch/microblaze/include/asm/linkage.h16
-rw-r--r--arch/microblaze/include/asm/pgalloc.h14
-rw-r--r--arch/microblaze/include/asm/syscall.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h6
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h1
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/cpu/cache.c6
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c7
-rw-r--r--arch/microblaze/kernel/intc.c8
-rw-r--r--arch/microblaze/kernel/kgdb.c10
-rw-r--r--arch/microblaze/kernel/prom_parse.c35
-rw-r--r--arch/microblaze/kernel/ptrace.c4
-rw-r--r--arch/microblaze/kernel/reset.c1
-rw-r--r--arch/microblaze/kernel/signal.c4
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/microblaze/kernel/unwind.c2
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/pci/pci-bcm1480.c4
-rw-r--r--arch/mips/pci/pci-octeon.c4
-rw-r--r--arch/mips/pci/pcie-octeon.c12
-rw-r--r--arch/mn10300/unit-asb2305/pci.c4
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c4
-rw-r--r--arch/powerpc/platforms/powermac/pci.c209
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c46
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/tile/kernel/pci.c4
-rw-r--r--arch/tile/kvm/Kconfig1
-rw-r--r--arch/x86/Kconfig16
-rw-r--r--arch/x86/boot/ctype.h5
-rw-r--r--arch/x86/boot/early_serial_console.c6
-rw-r--r--arch/x86/ia32/ia32entry.S4
-rw-r--r--arch/x86/include/asm/apic.h58
-rw-r--r--arch/x86/include/asm/calling.h1
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/debugreg.h5
-rw-r--r--arch/x86/include/asm/fpu-internal.h10
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h1
-rw-r--r--arch/x86/include/asm/i387.h6
-rw-r--r--arch/x86/include/asm/io_apic.h5
-rw-r--r--arch/x86/include/asm/irq_remapping.h4
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/pmc_atom.h22
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h68
-rw-r--r--arch/x86/include/asm/thread_info.h15
-rw-r--r--arch/x86/include/asm/traps.h6
-rw-r--r--arch/x86/include/asm/xen/page.h20
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h5
-rw-r--r--arch/x86/kernel/acpi/boot.c18
-rw-r--r--arch/x86/kernel/apb_timer.c8
-rw-r--r--arch/x86/kernel/apic/apic.c456
-rw-r--r--arch/x86/kernel/apic/io_apic.c13
-rw-r--r--arch/x86/kernel/cpu/amd.c19
-rw-r--r--arch/x86/kernel/cpu/common.c15
-rw-r--r--arch/x86/kernel/cpu/intel.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c137
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c5
-rw-r--r--arch/x86/kernel/e820.c26
-rw-r--r--arch/x86/kernel/entry_64.S317
-rw-r--r--arch/x86/kernel/hw_breakpoint.c45
-rw-r--r--arch/x86/kernel/i387.c39
-rw-r--r--arch/x86/kernel/irq_32.c13
-rw-r--r--arch/x86/kernel/pmc_atom.c81
-rw-r--r--arch/x86/kernel/rtc.c2
-rw-r--r--arch/x86/kernel/setup.c8
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/kernel/smpboot.c113
-rw-r--r--arch/x86/kernel/traps.c131
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/pci/acpi.c293
-rw-r--r--arch/x86/pci/bus_numa.c4
-rw-r--r--arch/x86/pci/common.c34
-rw-r--r--arch/x86/pci/intel_mid_pci.c5
-rw-r--r--arch/x86/pci/irq.c15
-rw-r--r--arch/x86/pci/mmconfig-shared.c34
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--arch/x86/vdso/Makefile2
-rw-r--r--arch/x86/xen/mmu.c17
-rw-r--r--arch/x86/xen/p2m.c267
-rw-r--r--arch/x86/xen/setup.c37
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/time.c4
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--block/partitions/efi.c2
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_apd.c150
-rw-r--r--drivers/acpi/acpi_lpss.c12
-rw-r--r--drivers/acpi/acpi_memhotplug.c8
-rw-r--r--drivers/acpi/acpi_platform.c4
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c164
-rw-r--r--drivers/acpi/acpica/evgpeblk.c10
-rw-r--r--drivers/acpi/acpica/evgpeinit.c10
-rw-r--r--drivers/acpi/acpica/evgpeutil.c61
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c132
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c123
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c10
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c46
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c11
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c61
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsserial.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c12
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c41
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uterror.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utfileio.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c2
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstring.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/apei/apei-base.c32
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/ec.c453
-rw-r--r--drivers/acpi/internal.h11
-rw-r--r--drivers/acpi/ioapic.c229
-rw-r--r--drivers/acpi/numa.c12
-rw-r--r--drivers/acpi/pci_irq.c9
-rw-r--r--drivers/acpi/pci_root.c9
-rw-r--r--drivers/acpi/processor_core.c123
-rw-r--r--drivers/acpi/processor_idle.c182
-rw-r--r--drivers/acpi/resource.c353
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c2
-rw-r--r--drivers/acpi/video.c18
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/ahci_da850.c11
-rw-r--r--drivers/ata/ahci_imx.c25
-rw-r--r--drivers/ata/ahci_mvebu.c11
-rw-r--r--drivers/ata/ahci_platform.c11
-rw-r--r--drivers/ata/ahci_st.c11
-rw-r--r--drivers/ata/ahci_sunxi.c11
-rw-r--r--drivers/ata/ahci_tegra.c11
-rw-r--r--drivers/ata/ahci_xgene.c203
-rw-r--r--drivers/ata/libahci_platform.c249
-rw-r--r--drivers/ata/libata-core.c27
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_cs5530.c6
-rw-r--r--drivers/ata/pata_of_platform.c10
-rw-r--r--drivers/ata/pata_pdc2027x.c10
-rw-r--r--drivers/ata/pata_platform.c8
-rw-r--r--drivers/ata/sata_dwc_460ex.c116
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/ata/sata_rcar.c25
-rw-r--r--drivers/base/firmware_class.c1
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/power/common.c18
-rw-r--r--drivers/base/power/domain.c157
-rw-r--r--drivers/base/power/opp.c202
-rw-r--r--drivers/base/power/qos.c4
-rw-r--r--drivers/base/regmap/internal.h10
-rw-r--r--drivers/base/regmap/regmap-ac97.c4
-rw-r--r--drivers/base/regmap/regmap-i2c.c46
-rw-r--r--drivers/base/regmap/regmap.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c177
-rw-r--r--drivers/block/xen-blkback/common.h3
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/random.c8
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig.x8610
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c3
-rw-r--r--drivers/cpufreq/cpufreq.c174
-rw-r--r--drivers/cpufreq/cpufreq_stats.c219
-rw-r--r--drivers/cpufreq/intel_pstate.c55
-rw-r--r--drivers/cpufreq/ls1x-cpufreq.c1
-rw-r--r--drivers/cpufreq/sfi-cpufreq.c136
-rw-r--r--drivers/cpufreq/speedstep-lib.c3
-rw-r--r--drivers/cpufreq/speedstep-smi.c12
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c4
-rw-r--r--drivers/devfreq/Kconfig13
-rw-r--r--drivers/devfreq/Makefile5
-rw-r--r--drivers/devfreq/devfreq-event.c494
-rw-r--r--drivers/devfreq/event/Kconfig25
-rw-r--r--drivers/devfreq/event/Makefile2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c374
-rw-r--r--drivers/devfreq/event/exynos-ppmu.h93
-rw-r--r--drivers/devfreq/tegra-devfreq.c718
-rw-r--r--drivers/dma/acpi-dma.c10
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/edac_mc_sysfs.c51
-rw-r--r--drivers/edac/i5100_edac.c5
-rw-r--r--drivers/edac/mce_amd_inj.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.h2
-rw-r--r--drivers/edac/mv64x60_edac.c3
-rw-r--r--drivers/edac/synopsys_edac.c535
-rw-r--r--drivers/firmware/efi/Kconfig4
-rw-r--r--drivers/firmware/efi/efi.c17
-rw-r--r--drivers/firmware/efi/efivars.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile14
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c8
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c18
-rw-r--r--drivers/firmware/efi/runtime-map.c2
-rw-r--r--drivers/hv/vmbus_drv.c4
-rw-r--r--drivers/hwmon/Kconfig5
-rw-r--r--drivers/hwmon/abx500.c6
-rw-r--r--drivers/hwmon/ad7314.c5
-rw-r--r--drivers/hwmon/adc128d818.c3
-rw-r--r--drivers/hwmon/ads7828.c102
-rw-r--r--drivers/hwmon/ina2xx.c334
-rw-r--r--drivers/hwmon/jc42.c15
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/tmp102.c15
-rw-r--r--drivers/idle/intel_idle.c1
-rw-r--r--drivers/iommu/amd_iommu.c1
-rw-r--r--drivers/iommu/amd_iommu_init.c15
-rw-r--r--drivers/iommu/amd_iommu_proto.h1
-rw-r--r--drivers/iommu/intel_irq_remapping.c96
-rw-r--r--drivers/iommu/irq_remapping.c74
-rw-r--r--drivers/iommu/irq_remapping.h5
-rw-r--r--drivers/mailbox/pcc.c4
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c106
-rw-r--r--drivers/of/of_pci.c4
-rw-r--r--drivers/parport/parport_atari.c4
-rw-r--r--drivers/pci/access.c87
-rw-r--r--drivers/pci/bus.c18
-rw-r--r--drivers/pci/host-bridge.c8
-rw-r--r--drivers/pci/host/Kconfig4
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-host-generic.c55
-rw-r--r--drivers/pci/host/pci-keystone.c4
-rw-r--r--drivers/pci/host/pci-layerscape.c1
-rw-r--r--drivers/pci/host/pci-mvebu.c15
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c51
-rw-r--r--drivers/pci/host/pci-tegra.c68
-rw-r--r--drivers/pci/host/pci-versatile.c237
-rw-r--r--drivers/pci/host/pci-xgene.c156
-rw-r--r--drivers/pci/host/pcie-designware.c3
-rw-r--r--drivers/pci/host/pcie-rcar.c7
-rw-r--r--drivers/pci/host/pcie-xilinx.c96
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c3
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c13
-rw-r--r--drivers/pci/msi.c5
-rw-r--r--drivers/pci/pci-acpi.c17
-rw-r--r--drivers/pci/pci-driver.c13
-rw-r--r--drivers/pci/pci.c77
-rw-r--r--drivers/pci/pci.h6
-rw-r--r--drivers/pci/pcie/aspm.c12
-rw-r--r--drivers/pci/probe.c10
-rw-r--r--drivers/pci/quirks.c64
-rw-r--r--drivers/pci/rom.c7
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c45
-rw-r--r--drivers/rapidio/devices/tsi721.c2
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/regulator/Kconfig17
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/axp20x-regulator.c93
-rw-r--r--drivers/regulator/core.c375
-rw-r--r--drivers/regulator/da9211-regulator.c16
-rw-r--r--drivers/regulator/fan53555.c4
-rw-r--r--drivers/regulator/internal.h2
-rw-r--r--drivers/regulator/isl9305.c6
-rw-r--r--drivers/regulator/lp872x.c24
-rw-r--r--drivers/regulator/max14577.c62
-rw-r--r--drivers/regulator/max77686.c70
-rw-r--r--drivers/regulator/max77843.c227
-rw-r--r--drivers/regulator/max8649.c4
-rw-r--r--drivers/regulator/mt6397-regulator.c332
-rw-r--r--drivers/regulator/of_regulator.c11
-rw-r--r--drivers/regulator/pfuze100-regulator.c134
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c15
-rw-r--r--drivers/regulator/rk808-regulator.c6
-rw-r--r--drivers/regulator/rt5033-regulator.c8
-rw-r--r--drivers/regulator/tps65023-regulator.c6
-rw-r--r--drivers/rtc/hctosys.c18
-rw-r--r--drivers/rtc/interface.c22
-rw-r--r--drivers/rtc/rtc-dev.c8
-rw-r--r--drivers/rtc/rtc-efi.c1
-rw-r--r--drivers/rtc/systohc.c6
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c5
-rw-r--r--drivers/sfi/sfi_core.c4
-rw-r--r--drivers/spi/Kconfig24
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-atmel.c12
-rw-r--r--drivers/spi/spi-au1550.c4
-rw-r--r--drivers/spi/spi-bcm2835.c4
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-bitbang.c4
-rw-r--r--drivers/spi/spi-butterfly.c4
-rw-r--r--drivers/spi/spi-coldfire-qspi.c5
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dln2.c881
-rw-r--r--drivers/spi/spi-dw-mid.c15
-rw-r--r--drivers/spi/spi-dw-pci.c38
-rw-r--r--drivers/spi/spi-dw.c9
-rw-r--r--drivers/spi/spi-falcon.c12
-rw-r--r--drivers/spi/spi-fsl-cpm.c9
-rw-r--r--drivers/spi/spi-fsl-dspi.c157
-rw-r--r--drivers/spi/spi-fsl-lib.c16
-rw-r--r--drivers/spi/spi-fsl-lib.h4
-rw-r--r--drivers/spi/spi-gpio.c8
-rw-r--r--drivers/spi/spi-img-spfi.c49
-rw-r--r--drivers/spi/spi-imx.c28
-rw-r--r--drivers/spi/spi-lm70llp.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c2
-rw-r--r--drivers/spi/spi-mxs.c5
-rw-r--r--drivers/spi/spi-omap-100k.c5
-rw-r--r--drivers/spi/spi-omap-uwire.c4
-rw-r--r--drivers/spi/spi-omap2-mcspi.c5
-rw-r--r--drivers/spi/spi-orion.c88
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c17
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c34
-rw-r--r--drivers/spi/spi-pxa2xx.c207
-rw-r--r--drivers/spi/spi-pxa2xx.h34
-rw-r--r--drivers/spi/spi-qup.c11
-rw-r--r--drivers/spi/spi-rockchip.c6
-rw-r--r--drivers/spi/spi-rspi.c5
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sc18is602.c4
-rw-r--r--drivers/spi/spi-sh-hspi.c5
-rw-r--r--drivers/spi/spi-sh-msiof.c91
-rw-r--r--drivers/spi/spi-sh.c5
-rw-r--r--drivers/spi/spi-sirf.c1
-rw-r--r--drivers/spi/spi-st-ssc4.c504
-rw-r--r--drivers/spi/spi-ti-qspi.c14
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-xilinx.c298
-rw-r--r--drivers/spi/spi.c120
-rw-r--r--drivers/spi/spidev.c125
-rw-r--r--drivers/usb/core/hub.c12
-rw-r--r--drivers/video/fbdev/atafb.c3
-rw-r--r--drivers/xen/balloon.c86
-rw-r--r--drivers/xen/gntdev.c143
-rw-r--r--drivers/xen/grant-table.c120
-rw-r--r--drivers/xen/manage.c8
-rw-r--r--drivers/xen/tmem.c2
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c8
-rw-r--r--drivers/xen/xen-scsiback.c6
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c11
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/efivarfs/Kconfig1
-rw-r--r--fs/efivarfs/super.c2
-rw-r--r--fs/notify/Kconfig1
-rw-r--r--fs/quota/Kconfig1
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h18
-rw-r--r--include/acpi/acrestyp.h42
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/acpi/actbl2.h2
-rw-r--r--include/acpi/actbl3.h2
-rw-r--r--include/acpi/actypes.h14
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/linux/acpi.h22
-rw-r--r--include/linux/ahci_platform.h6
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/ata_platform.h5
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cgroup_subsys.h8
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/devfreq-event.h196
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/ktime.h17
-rw-r--r--include/linux/libata.h6
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/osq_lock.h12
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/pci.h24
-rw-r--r--include/linux/percpu-refcount.h34
-rw-r--r--include/linux/perf_event.h30
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h4
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/rculist.h16
-rw-r--r--include/linux/rcupdate.h13
-rw-r--r--include/linux/rcutiny.h45
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/regmap.h2
-rw-r--r--include/linux/regulator/da9211.h2
-rw-r--r--include/linux/regulator/driver.h13
-rw-r--r--include/linux/regulator/machine.h13
-rw-r--r--include/linux/regulator/mt6397-regulator.h49
-rw-r--r--include/linux/regulator/pfuze100.h14
-rw-r--r--include/linux/resource_ext.h77
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/spi/at86rf230.h4
-rw-r--r--include/linux/spi/l4f00242t03.h4
-rw-r--r--include/linux/spi/lms283gf05.h4
-rw-r--r--include/linux/spi/mxs-spi.h4
-rw-r--r--include/linux/spi/pxa2xx_spi.h5
-rw-r--r--include/linux/spi/rspi.h5
-rw-r--r--include/linux/spi/sh_hspi.h4
-rw-r--r--include/linux/spi/sh_msiof.h2
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spi/tle62x0.h4
-rw-r--r--include/linux/spi/tsc2005.h5
-rw-r--r--include/linux/srcu.h14
-rw-r--r--include/linux/timekeeping.h21
-rw-r--r--include/linux/wait.h26
-rw-r--r--include/linux/workqueue.h8
-rw-r--r--include/trace/ftrace.h7
-rw-r--r--include/uapi/linux/pci_regs.h4
-rw-r--r--include/xen/grant_table.h43
-rw-r--r--include/xen/interface/features.h6
-rw-r--r--include/xen/interface/grant_table.h7
-rw-r--r--init/Kconfig18
-rw-r--r--init/main.c13
-rw-r--r--kernel/Kconfig.locks4
-rw-r--r--kernel/cpu.c56
-rw-r--r--kernel/events/core.c464
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/futex.c6
-rw-r--r--kernel/locking/Makefile3
-rw-r--r--kernel/locking/mcs_spinlock.h16
-rw-r--r--kernel/locking/mutex.c62
-rw-r--r--kernel/locking/osq_lock.c (renamed from kernel/locking/mcs_spinlock.c)9
-rw-r--r--kernel/locking/rtmutex.c7
-rw-r--r--kernel/locking/rwsem-spinlock.c2
-rw-r--r--kernel/locking/rwsem-xadd.c3
-rw-r--r--kernel/notifier.c3
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/power/qos.c91
-rw-r--r--kernel/power/snapshot.c11
-rw-r--r--kernel/rcu/Makefile3
-rw-r--r--kernel/rcu/rcu.h6
-rw-r--r--kernel/rcu/rcutorture.c66
-rw-r--r--kernel/rcu/srcu.c2
-rw-r--r--kernel/rcu/tiny.c113
-rw-r--r--kernel/rcu/tiny_plugin.h9
-rw-r--r--kernel/rcu/tree.c355
-rw-r--r--kernel/rcu/tree.h62
-rw-r--r--kernel/rcu/tree_plugin.h271
-rw-r--r--kernel/rcu/tree_trace.c8
-rw-r--r--kernel/resource.c25
-rw-r--r--kernel/sched/completion.c18
-rw-r--r--kernel/sched/core.c109
-rw-r--r--kernel/sched/cpudeadline.c27
-rw-r--r--kernel/sched/cpudeadline.h2
-rw-r--r--kernel/sched/deadline.c51
-rw-r--r--kernel/sched/debug.c1
-rw-r--r--kernel/sched/fair.c7
-rw-r--r--kernel/sched/idle.c3
-rw-r--r--kernel/sched/rt.c26
-rw-r--r--kernel/sched/sched.h22
-rw-r--r--kernel/softirq.c9
-rw-r--r--kernel/time/hrtimer.c112
-rw-r--r--kernel/time/ntp.c4
-rw-r--r--kernel/time/timekeeping.c12
-rw-r--r--kernel/trace/power-traces.c1
-rw-r--r--kernel/trace/trace_event_perf.c4
-rw-r--r--kernel/trace/trace_kprobe.c4
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/memory.c2
-rw-r--r--security/tomoyo/Kconfig1
-rw-r--r--sound/oss/dmasound/dmasound_atari.c2
-rw-r--r--tools/lib/api/fs/debugfs.c43
-rw-r--r--tools/lib/api/fs/debugfs.h3
-rw-r--r--tools/lib/traceevent/event-parse.c328
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt2
-rw-r--r--tools/perf/Documentation/perf-list.txt13
-rw-r--r--tools/perf/Documentation/perf-mem.txt9
-rw-r--r--tools/perf/Documentation/perf-record.txt19
-rw-r--r--tools/perf/Documentation/perf-script.txt28
-rw-r--r--tools/perf/Documentation/perf-stat.txt20
-rw-r--r--tools/perf/bench/futex.h13
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-diff.c248
-rw-r--r--tools/perf/builtin-inject.c5
-rw-r--r--tools/perf/builtin-mem.c131
-rw-r--r--tools/perf/builtin-record.c70
-rw-r--r--tools/perf/builtin-report.c16
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c106
-rw-r--r--tools/perf/config/Makefile6
-rw-r--r--tools/perf/config/feature-checks/Makefile4
-rw-r--r--tools/perf/config/feature-checks/test-all.c5
-rw-r--r--tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c14
-rw-r--r--tools/perf/tests/attr.py1
-rw-r--r--tools/perf/tests/hists_cumulate.c2
-rw-r--r--tools/perf/tests/hists_output.c2
-rw-r--r--tools/perf/tests/make1
-rw-r--r--tools/perf/tests/parse-events.c60
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/ui/browsers/annotate.c3
-rw-r--r--tools/perf/ui/hist.c12
-rw-r--r--tools/perf/ui/progress.h4
-rw-r--r--tools/perf/ui/tui/helpline.c3
-rw-r--r--tools/perf/ui/tui/setup.c3
-rw-r--r--tools/perf/util/annotate.c2
-rw-r--r--tools/perf/util/color.c126
-rw-r--r--tools/perf/util/color.h2
-rw-r--r--tools/perf/util/dso.c6
-rw-r--r--tools/perf/util/dso.h1
-rw-r--r--tools/perf/util/evlist.c27
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c4
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c48
-rw-r--r--tools/perf/util/hist.h11
-rw-r--r--tools/perf/util/parse-events.c27
-rw-r--r--tools/perf/util/parse-events.h3
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/parse-events.y26
-rw-r--r--tools/perf/util/parse-options.c2
-rw-r--r--tools/perf/util/pmu.c102
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/session.c8
-rw-r--r--tools/perf/util/session.h3
-rw-r--r--tools/perf/util/sort.c37
-rw-r--r--tools/perf/util/symbol-elf.c13
-rw-r--r--tools/perf/util/symbol.c2
-rw-r--r--tools/perf/util/unwind-libunwind.c31
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslibcfs.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh18
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh9
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh20
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh2
781 files changed, 16099 insertions, 7777 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
index 20979f8b3edb..505f080d20a1 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
@@ -52,12 +52,18 @@ Description: Per-pmu performance monitoring events specific to the running syste
52 event=0x2abc 52 event=0x2abc
53 event=0x423,inv,cmask=0x3 53 event=0x423,inv,cmask=0x3
54 domain=0x1,offset=0x8,starting_index=0xffff 54 domain=0x1,offset=0x8,starting_index=0xffff
55 domain=0x1,offset=0x8,core=?
55 56
56 Each of the assignments indicates a value to be assigned to a 57 Each of the assignments indicates a value to be assigned to a
57 particular set of bits (as defined by the format file 58 particular set of bits (as defined by the format file
58 corresponding to the <term>) in the perf_event structure passed 59 corresponding to the <term>) in the perf_event structure passed
59 to the perf_open syscall. 60 to the perf_open syscall.
60 61
62 In the case of the last example, a value replacing "?" would
63 need to be provided by the user selecting the particular event.
64 This is referred to as "event parameterization". Event
65 parameters have the format 'param=?'.
66
61What: /sys/bus/event_source/devices/<pmu>/events/<event>.unit 67What: /sys/bus/event_source/devices/<pmu>/events/<event>.unit
62Date: 2014/02/24 68Date: 2014/02/24
63Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> 69Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index ed186a902d31..b57c0c1cdac6 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -15,7 +15,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
15 21 seconds. 15 21 seconds.
16 16
17 This configuration parameter may be changed at runtime via the 17 This configuration parameter may be changed at runtime via the
18 /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however 18 /sys/module/rcupdate/parameters/rcu_cpu_stall_timeout, however
19 this parameter is checked only at the beginning of a cycle. 19 this parameter is checked only at the beginning of a cycle.
20 So if you are 10 seconds into a 40-second stall, setting this 20 So if you are 10 seconds into a 40-second stall, setting this
21 sysfs parameter to (say) five will shorten the timeout for the 21 sysfs parameter to (say) five will shorten the timeout for the
@@ -152,6 +152,15 @@ no non-lazy callbacks ("." is printed otherwise, as shown above) and
152"D" indicates that dyntick-idle processing is enabled ("." is printed 152"D" indicates that dyntick-idle processing is enabled ("." is printed
153otherwise, for example, if disabled via the "nohz=" kernel boot parameter). 153otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
154 154
155If the relevant grace-period kthread has been unable to run prior to
156the stall warning, the following additional line is printed:
157
158 rcu_preempt kthread starved for 2023 jiffies!
159
160Starving the grace-period kthreads of CPU time can of course result in
161RCU CPU stall warnings even when all CPUs and tasks have passed through
162the required quiescent states.
163
155 164
156Multiple Warnings From One Stall 165Multiple Warnings From One Stall
157 166
@@ -187,6 +196,11 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
187 behavior, you might need to replace some of the cond_resched() 196 behavior, you might need to replace some of the cond_resched()
188 calls with calls to cond_resched_rcu_qs(). 197 calls with calls to cond_resched_rcu_qs().
189 198
199o Anything that prevents RCU's grace-period kthreads from running.
200 This can result in the "All QSes seen" console-log message.
201 This message will include information on when the kthread last
202 ran and how often it should be expected to run.
203
190o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might 204o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
191 happen to preempt a low-priority task in the middle of an RCU 205 happen to preempt a low-priority task in the middle of an RCU
192 read-side critical section. This is especially damaging if 206 read-side critical section. This is especially damaging if
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index b63b9bb3bc0c..08651da15448 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -56,14 +56,14 @@ rcuboost:
56 56
57The output of "cat rcu/rcu_preempt/rcudata" looks as follows: 57The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
58 58
59 0!c=30455 g=30456 pq=1 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716 59 0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
60 1!c=30719 g=30720 pq=1 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982 60 1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
61 2!c=30150 g=30151 pq=1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458 61 2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
62 3 c=31249 g=31250 pq=1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622 62 3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
63 4!c=29502 g=29503 pq=1 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521 63 4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
64 5 c=31201 g=31202 pq=1 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698 64 5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
65 6!c=30253 g=30254 pq=1 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353 65 6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
66 7 c=31178 g=31178 pq=1 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969 66 7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
67 67
68This file has one line per CPU, or eight for this 8-CPU system. 68This file has one line per CPU, or eight for this 8-CPU system.
69The fields are as follows: 69The fields are as follows:
@@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this
188Kernels compiled with CONFIG_RCU_BOOST=y display the following from 188Kernels compiled with CONFIG_RCU_BOOST=y display the following from
189/debug/rcu/rcu_preempt/rcudata: 189/debug/rcu/rcu_preempt/rcudata:
190 190
191 0!c=12865 g=12866 pq=1 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871 191 0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
192 1 c=14407 g=14408 pq=1 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485 192 1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
193 2 c=14407 g=14408 pq=1 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490 193 2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
194 3 c=14407 g=14408 pq=1 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290 194 3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
195 4 c=14405 g=14406 pq=1 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114 195 4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
196 5!c=14168 g=14169 pq=1 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722 196 5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
197 6 c=14404 g=14405 pq=1 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811 197 6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
198 7 c=14407 g=14408 pq=1 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042 198 7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
199 199
200This is similar to the output discussed above, but contains the following 200This is similar to the output discussed above, but contains the following
201additional fields: 201additional fields:
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index b60d2ab69497..9b121a569ab4 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -243,7 +243,7 @@ input driver:
243 .owner = THIS_MODULE, 243 .owner = THIS_MODULE,
244 .pm = &mpu3050_pm, 244 .pm = &mpu3050_pm,
245 .of_match_table = mpu3050_of_match, 245 .of_match_table = mpu3050_of_match,
246 .acpi_match_table ACPI_PTR(mpu3050_acpi_match), 246 .acpi_match_table = ACPI_PTR(mpu3050_acpi_match),
247 }, 247 },
248 .probe = mpu3050_probe, 248 .probe = mpu3050_probe,
249 .remove = mpu3050_remove, 249 .remove = mpu3050_remove,
diff --git a/Documentation/cgroups/00-INDEX b/Documentation/cgroups/00-INDEX
index bc461b6425a7..96ce071a3633 100644
--- a/Documentation/cgroups/00-INDEX
+++ b/Documentation/cgroups/00-INDEX
@@ -24,3 +24,5 @@ net_prio.txt
24 - Network priority cgroups details and usages. 24 - Network priority cgroups details and usages.
25resource_counter.txt 25resource_counter.txt
26 - Resource Counter API. 26 - Resource Counter API.
27unified-hierarchy.txt
28 - Description the new/next cgroup interface.
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index 765d7fc0e692..655750743fb0 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -37,6 +37,14 @@ controlling P state selection. These files have been added to
37 no_turbo: limits the driver to selecting P states below the turbo 37 no_turbo: limits the driver to selecting P states below the turbo
38 frequency range. 38 frequency range.
39 39
40 turbo_pct: displays the percentage of the total performance that
41 is supported by hardware that is in the turbo range. This number
42 is independent of whether turbo has been disabled or not.
43
44 num_pstates: displays the number of pstates that are supported
45 by hardware. This number is independent of whether turbo has
46 been disabled or not.
47
40For contemporary Intel processors, the frequency is controlled by the 48For contemporary Intel processors, the frequency is controlled by the
41processor itself and the P-states exposed to software are related to 49processor itself and the P-states exposed to software are related to
42performance levels. The idea that frequency can be set to a single 50performance levels. The idea that frequency can be set to a single
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index 4ab09f2202d4..c2340eeeb97f 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -37,9 +37,10 @@ Required properties when using sub-nodes:
37 37
38 38
39Sub-nodes required properties: 39Sub-nodes required properties:
40- reg : the port number 40- reg : the port number
41- phys : reference to the SATA PHY node 41And at least one of the following properties:
42 42- phys : reference to the SATA PHY node
43- target-supply : regulator for SATA target power
43 44
44Examples: 45Examples:
45 sata@ffe08000 { 46 sata@ffe08000 {
@@ -68,10 +69,12 @@ With sub-nodes:
68 sata0: sata-port@0 { 69 sata0: sata-port@0 {
69 reg = <0>; 70 reg = <0>;
70 phys = <&sata_phy 0>; 71 phys = <&sata_phy 0>;
72 target-supply = <&reg_sata0>;
71 }; 73 };
72 74
73 sata1: sata-port@1 { 75 sata1: sata-port@1 {
74 reg = <1>; 76 reg = <1>;
75 phys = <&sata_phy 1>; 77 phys = <&sata_phy 1>;
78 target-supply = <&reg_sata1>;;
76 }; 79 };
77 }; 80 };
diff --git a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
new file mode 100644
index 000000000000..b54bf3a2ff57
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
@@ -0,0 +1,110 @@
1
2* Samsung Exynos PPMU (Platform Performance Monitoring Unit) device
3
4The Samsung Exynos SoC has PPMU (Platform Performance Monitoring Unit) for
5each IP. PPMU provides the primitive values to get performance data. These
6PPMU events provide information of the SoC's behaviors so that you may
7use to analyze system performance, to make behaviors visible and to count
8usages of each IP (DMC, CPU, RIGHTBUS, LEFTBUS, CAM interface, LCD, G3D, MFC).
9The Exynos PPMU driver uses the devfreq-event class to provide event data
10to various devfreq devices. The devfreq devices would use the event data when
11derterming the current state of each IP.
12
13Required properties:
14- compatible: Should be "samsung,exynos-ppmu".
15- reg: physical base address of each PPMU and length of memory mapped region.
16
17Optional properties:
18- clock-names : the name of clock used by the PPMU, "ppmu"
19- clocks : phandles for clock specified in "clock-names" property
20- #clock-cells: should be 1.
21
22Example1 : PPMU nodes in exynos3250.dtsi are listed below.
23
24 ppmu_dmc0: ppmu_dmc0@106a0000 {
25 compatible = "samsung,exynos-ppmu";
26 reg = <0x106a0000 0x2000>;
27 status = "disabled";
28 };
29
30 ppmu_dmc1: ppmu_dmc1@106b0000 {
31 compatible = "samsung,exynos-ppmu";
32 reg = <0x106b0000 0x2000>;
33 status = "disabled";
34 };
35
36 ppmu_cpu: ppmu_cpu@106c0000 {
37 compatible = "samsung,exynos-ppmu";
38 reg = <0x106c0000 0x2000>;
39 status = "disabled";
40 };
41
42 ppmu_rightbus: ppmu_rightbus@112a0000 {
43 compatible = "samsung,exynos-ppmu";
44 reg = <0x112a0000 0x2000>;
45 clocks = <&cmu CLK_PPMURIGHT>;
46 clock-names = "ppmu";
47 status = "disabled";
48 };
49
50 ppmu_leftbus: ppmu_leftbus0@116a0000 {
51 compatible = "samsung,exynos-ppmu";
52 reg = <0x116a0000 0x2000>;
53 clocks = <&cmu CLK_PPMULEFT>;
54 clock-names = "ppmu";
55 status = "disabled";
56 };
57
58Example2 : Events of each PPMU node in exynos3250-rinato.dts are listed below.
59
60 &ppmu_dmc0 {
61 status = "okay";
62
63 events {
64 ppmu_dmc0_3: ppmu-event3-dmc0 {
65 event-name = "ppmu-event3-dmc0";
66 };
67
68 ppmu_dmc0_2: ppmu-event2-dmc0 {
69 event-name = "ppmu-event2-dmc0";
70 };
71
72 ppmu_dmc0_1: ppmu-event1-dmc0 {
73 event-name = "ppmu-event1-dmc0";
74 };
75
76 ppmu_dmc0_0: ppmu-event0-dmc0 {
77 event-name = "ppmu-event0-dmc0";
78 };
79 };
80 };
81
82 &ppmu_dmc1 {
83 status = "okay";
84
85 events {
86 ppmu_dmc1_3: ppmu-event3-dmc1 {
87 event-name = "ppmu-event3-dmc1";
88 };
89 };
90 };
91
92 &ppmu_leftbus {
93 status = "okay";
94
95 events {
96 ppmu_leftbus_3: ppmu-event3-leftbus {
97 event-name = "ppmu-event3-leftbus";
98 };
99 };
100 };
101
102 &ppmu_rightbus {
103 status = "okay";
104
105 events {
106 ppmu_rightbus_3: ppmu-event3-rightbus {
107 event-name = "ppmu-event3-rightbus";
108 };
109 };
110 };
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 75fdfaf41831..e39f0bc1f55e 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -39,6 +39,12 @@ to get matched with their hardware counterparts as follow:
39 -BUCKn : 1-4. 39 -BUCKn : 1-4.
40 Use standard regulator bindings for it ('regulator-off-in-suspend'). 40 Use standard regulator bindings for it ('regulator-off-in-suspend').
41 41
42 LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
43 control. To turn this feature on this property must be added to the regulator
44 sub-node:
45 - maxim,ena-gpios : one GPIO specifier enable control (the gpio
46 flags are actually ignored and always
47 ACTIVE_HIGH is used)
42 48
43Example: 49Example:
44 50
@@ -65,4 +71,12 @@ Example:
65 regulator-always-on; 71 regulator-always-on;
66 regulator-boot-on; 72 regulator-boot-on;
67 }; 73 };
74
75 buck9_reg {
76 regulator-compatible = "BUCK9";
77 regulator-name = "CAM_ISP_CORE_1.2V";
78 regulator-min-microvolt = <1000000>;
79 regulator-max-microvolt = <1200000>;
80 maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
81 };
68 } 82 }
diff --git a/Documentation/devicetree/bindings/pci/versatile.txt b/Documentation/devicetree/bindings/pci/versatile.txt
new file mode 100644
index 000000000000..ebd1e7d0403e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/versatile.txt
@@ -0,0 +1,59 @@
1* ARM Versatile Platform Baseboard PCI interface
2
3PCI host controller found on the ARM Versatile PB board's FPGA.
4
5Required properties:
6- compatible: should contain "arm,versatile-pci" to identify the Versatile PCI
7 controller.
8- reg: base addresses and lengths of the pci controller. There must be 3
9 entries:
10 - Versatile-specific registers
11 - Self Config space
12 - Config space
13- #address-cells: set to <3>
14- #size-cells: set to <2>
15- device_type: set to "pci"
16- bus-range: set to <0 0xff>
17- ranges: ranges for the PCI memory and I/O regions
18- #interrupt-cells: set to <1>
19- interrupt-map-mask and interrupt-map: standard PCI properties to define
20 the mapping of the PCI interface to interrupt numbers.
21
22Example:
23
24pci-controller@10001000 {
25 compatible = "arm,versatile-pci";
26 device_type = "pci";
27 reg = <0x10001000 0x1000
28 0x41000000 0x10000
29 0x42000000 0x100000>;
30 bus-range = <0 0xff>;
31 #address-cells = <3>;
32 #size-cells = <2>;
33 #interrupt-cells = <1>;
34
35 ranges = <0x01000000 0 0x00000000 0x43000000 0 0x00010000 /* downstream I/O */
36 0x02000000 0 0x50000000 0x50000000 0 0x10000000 /* non-prefetchable memory */
37 0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */
38
39 interrupt-map-mask = <0x1800 0 0 7>;
40 interrupt-map = <0x1800 0 0 1 &sic 28
41 0x1800 0 0 2 &sic 29
42 0x1800 0 0 3 &sic 30
43 0x1800 0 0 4 &sic 27
44
45 0x1000 0 0 1 &sic 27
46 0x1000 0 0 2 &sic 28
47 0x1000 0 0 3 &sic 29
48 0x1000 0 0 4 &sic 30
49
50 0x0800 0 0 1 &sic 30
51 0x0800 0 0 2 &sic 27
52 0x0800 0 0 3 &sic 28
53 0x0800 0 0 4 &sic 29
54
55 0x0000 0 0 1 &sic 29
56 0x0000 0 0 2 &sic 30
57 0x0000 0 0 3 &sic 27
58 0x0000 0 0 4 &sic 28>;
59};
diff --git a/Documentation/devicetree/bindings/regulator/da9211.txt b/Documentation/devicetree/bindings/regulator/da9211.txt
index 240019a82f9a..eb618907c7de 100644
--- a/Documentation/devicetree/bindings/regulator/da9211.txt
+++ b/Documentation/devicetree/bindings/regulator/da9211.txt
@@ -11,6 +11,7 @@ Required properties:
11 BUCKA and BUCKB. 11 BUCKA and BUCKB.
12 12
13Optional properties: 13Optional properties:
14- enable-gpios: platform gpio for control of BUCKA/BUCKB.
14- Any optional property defined in regulator.txt 15- Any optional property defined in regulator.txt
15 16
16Example 1) DA9211 17Example 1) DA9211
@@ -27,6 +28,7 @@ Example 1) DA9211
27 regulator-max-microvolt = <1570000>; 28 regulator-max-microvolt = <1570000>;
28 regulator-min-microamp = <2000000>; 29 regulator-min-microamp = <2000000>;
29 regulator-max-microamp = <5000000>; 30 regulator-max-microamp = <5000000>;
31 enable-gpios = <&gpio 27 0>;
30 }; 32 };
31 BUCKB { 33 BUCKB {
32 regulator-name = "VBUCKB"; 34 regulator-name = "VBUCKB";
@@ -34,11 +36,12 @@ Example 1) DA9211
34 regulator-max-microvolt = <1570000>; 36 regulator-max-microvolt = <1570000>;
35 regulator-min-microamp = <2000000>; 37 regulator-min-microamp = <2000000>;
36 regulator-max-microamp = <5000000>; 38 regulator-max-microamp = <5000000>;
39 enable-gpios = <&gpio 17 0>;
37 }; 40 };
38 }; 41 };
39 }; 42 };
40 43
41Example 2) DA92113 44Example 2) DA9213
42 pmic: da9213@68 { 45 pmic: da9213@68 {
43 compatible = "dlg,da9213"; 46 compatible = "dlg,da9213";
44 reg = <0x68>; 47 reg = <0x68>;
@@ -51,6 +54,7 @@ Example 2) DA92113
51 regulator-max-microvolt = <1570000>; 54 regulator-max-microvolt = <1570000>;
52 regulator-min-microamp = <3000000>; 55 regulator-min-microamp = <3000000>;
53 regulator-max-microamp = <6000000>; 56 regulator-max-microamp = <6000000>;
57 enable-gpios = <&gpio 27 0>;
54 }; 58 };
55 BUCKB { 59 BUCKB {
56 regulator-name = "VBUCKB"; 60 regulator-name = "VBUCKB";
@@ -58,6 +62,7 @@ Example 2) DA92113
58 regulator-max-microvolt = <1570000>; 62 regulator-max-microvolt = <1570000>;
59 regulator-min-microamp = <3000000>; 63 regulator-min-microamp = <3000000>;
60 regulator-max-microamp = <6000000>; 64 regulator-max-microamp = <6000000>;
65 enable-gpios = <&gpio 17 0>;
61 }; 66 };
62 }; 67 };
63 }; 68 };
diff --git a/Documentation/devicetree/bindings/regulator/isl9305.txt b/Documentation/devicetree/bindings/regulator/isl9305.txt
index a626fc1bbf0d..d6e7c9ec9413 100644
--- a/Documentation/devicetree/bindings/regulator/isl9305.txt
+++ b/Documentation/devicetree/bindings/regulator/isl9305.txt
@@ -2,7 +2,7 @@ Intersil ISL9305/ISL9305H voltage regulator
2 2
3Required properties: 3Required properties:
4 4
5- compatible: "isl,isl9305" or "isl,isl9305h" 5- compatible: "isil,isl9305" or "isil,isl9305h"
6- reg: I2C slave address, usually 0x68. 6- reg: I2C slave address, usually 0x68.
7- regulators: A node that houses a sub-node for each regulator within the 7- regulators: A node that houses a sub-node for each regulator within the
8 device. Each sub-node is identified using the node's name, with valid 8 device. Each sub-node is identified using the node's name, with valid
@@ -19,7 +19,7 @@ Optional properties:
19Example 19Example
20 20
21 pmic: isl9305@68 { 21 pmic: isl9305@68 {
22 compatible = "isl,isl9305"; 22 compatible = "isil,isl9305";
23 reg = <0x68>; 23 reg = <0x68>;
24 24
25 VINDCD1-supply = <&system_power>; 25 VINDCD1-supply = <&system_power>;
diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
new file mode 100644
index 000000000000..a42b1d6e9863
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
@@ -0,0 +1,217 @@
1Mediatek MT6397 Regulator Driver
2
3Required properties:
4- compatible: "mediatek,mt6397-regulator"
5- mt6397regulator: List of regulators provided by this controller. It is named
6 according to its regulator type, buck_<name> and ldo_<name>.
7 The definition for each of these nodes is defined using the standard binding
8 for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
9
10The valid names for regulators are::
11BUCK:
12 buck_vpca15, buck_vpca7, buck_vsramca15, buck_vsramca7, buck_vcore, buck_vgpu,
13 buck_vdrm, buck_vio18
14LDO:
15 ldo_vtcxo, ldo_va28, ldo_vcama, ldo_vio28, ldo_vusb, ldo_vmc, ldo_vmch,
16 ldo_vemc3v3, ldo_vgp1, ldo_vgp2, ldo_vgp3, ldo_vgp4, ldo_vgp5, ldo_vgp6,
17 ldo_vibr
18
19Example:
20 pmic {
21 compatible = "mediatek,mt6397";
22
23 mt6397regulator: mt6397regulator {
24 compatible = "mediatek,mt6397-regulator";
25
26 mt6397_vpca15_reg: buck_vpca15 {
27 regulator-compatible = "buck_vpca15";
28 regulator-name = "vpca15";
29 regulator-min-microvolt = < 850000>;
30 regulator-max-microvolt = <1350000>;
31 regulator-ramp-delay = <12500>;
32 regulator-enable-ramp-delay = <200>;
33 };
34
35 mt6397_vpca7_reg: buck_vpca7 {
36 regulator-compatible = "buck_vpca7";
37 regulator-name = "vpca7";
38 regulator-min-microvolt = < 850000>;
39 regulator-max-microvolt = <1350000>;
40 regulator-ramp-delay = <12500>;
41 regulator-enable-ramp-delay = <115>;
42 };
43
44 mt6397_vsramca15_reg: buck_vsramca15 {
45 regulator-compatible = "buck_vsramca15";
46 regulator-name = "vsramca15";
47 regulator-min-microvolt = < 850000>;
48 regulator-max-microvolt = <1350000>;
49 regulator-ramp-delay = <12500>;
50 regulator-enable-ramp-delay = <115>;
51
52 };
53
54 mt6397_vsramca7_reg: buck_vsramca7 {
55 regulator-compatible = "buck_vsramca7";
56 regulator-name = "vsramca7";
57 regulator-min-microvolt = < 850000>;
58 regulator-max-microvolt = <1350000>;
59 regulator-ramp-delay = <12500>;
60 regulator-enable-ramp-delay = <115>;
61
62 };
63
64 mt6397_vcore_reg: buck_vcore {
65 regulator-compatible = "buck_vcore";
66 regulator-name = "vcore";
67 regulator-min-microvolt = < 850000>;
68 regulator-max-microvolt = <1350000>;
69 regulator-ramp-delay = <12500>;
70 regulator-enable-ramp-delay = <115>;
71 };
72
73 mt6397_vgpu_reg: buck_vgpu {
74 regulator-compatible = "buck_vgpu";
75 regulator-name = "vgpu";
76 regulator-min-microvolt = < 700000>;
77 regulator-max-microvolt = <1350000>;
78 regulator-ramp-delay = <12500>;
79 regulator-enable-ramp-delay = <115>;
80 };
81
82 mt6397_vdrm_reg: buck_vdrm {
83 regulator-compatible = "buck_vdrm";
84 regulator-name = "vdrm";
85 regulator-min-microvolt = < 800000>;
86 regulator-max-microvolt = <1400000>;
87 regulator-ramp-delay = <12500>;
88 regulator-enable-ramp-delay = <500>;
89 };
90
91 mt6397_vio18_reg: buck_vio18 {
92 regulator-compatible = "buck_vio18";
93 regulator-name = "vio18";
94 regulator-min-microvolt = <1500000>;
95 regulator-max-microvolt = <2120000>;
96 regulator-ramp-delay = <12500>;
97 regulator-enable-ramp-delay = <500>;
98 };
99
100 mt6397_vtcxo_reg: ldo_vtcxo {
101 regulator-compatible = "ldo_vtcxo";
102 regulator-name = "vtcxo";
103 regulator-min-microvolt = <2800000>;
104 regulator-max-microvolt = <2800000>;
105 regulator-enable-ramp-delay = <90>;
106 };
107
108 mt6397_va28_reg: ldo_va28 {
109 regulator-compatible = "ldo_va28";
110 regulator-name = "va28";
111 /* fixed output 2.8 V */
112 regulator-enable-ramp-delay = <218>;
113 };
114
115 mt6397_vcama_reg: ldo_vcama {
116 regulator-compatible = "ldo_vcama";
117 regulator-name = "vcama";
118 regulator-min-microvolt = <1500000>;
119 regulator-max-microvolt = <2800000>;
120 regulator-enable-ramp-delay = <218>;
121 };
122
123 mt6397_vio28_reg: ldo_vio28 {
124 regulator-compatible = "ldo_vio28";
125 regulator-name = "vio28";
126 /* fixed output 2.8 V */
127 regulator-enable-ramp-delay = <240>;
128 };
129
130 mt6397_usb_reg: ldo_vusb {
131 regulator-compatible = "ldo_vusb";
132 regulator-name = "vusb";
133 /* fixed output 3.3 V */
134 regulator-enable-ramp-delay = <218>;
135 };
136
137 mt6397_vmc_reg: ldo_vmc {
138 regulator-compatible = "ldo_vmc";
139 regulator-name = "vmc";
140 regulator-min-microvolt = <1800000>;
141 regulator-max-microvolt = <3300000>;
142 regulator-enable-ramp-delay = <218>;
143 };
144
145 mt6397_vmch_reg: ldo_vmch {
146 regulator-compatible = "ldo_vmch";
147 regulator-name = "vmch";
148 regulator-min-microvolt = <3000000>;
149 regulator-max-microvolt = <3300000>;
150 regulator-enable-ramp-delay = <218>;
151 };
152
153 mt6397_vemc_3v3_reg: ldo_vemc3v3 {
154 regulator-compatible = "ldo_vemc3v3";
155 regulator-name = "vemc_3v3";
156 regulator-min-microvolt = <3000000>;
157 regulator-max-microvolt = <3300000>;
158 regulator-enable-ramp-delay = <218>;
159 };
160
161 mt6397_vgp1_reg: ldo_vgp1 {
162 regulator-compatible = "ldo_vgp1";
163 regulator-name = "vcamd";
164 regulator-min-microvolt = <1220000>;
165 regulator-max-microvolt = <3300000>;
166 regulator-enable-ramp-delay = <240>;
167 };
168
169 mt6397_vgp2_reg: ldo_vgp2 {
170 egulator-compatible = "ldo_vgp2";
171 regulator-name = "vcamio";
172 regulator-min-microvolt = <1000000>;
173 regulator-max-microvolt = <3300000>;
174 regulator-enable-ramp-delay = <218>;
175 };
176
177 mt6397_vgp3_reg: ldo_vgp3 {
178 regulator-compatible = "ldo_vgp3";
179 regulator-name = "vcamaf";
180 regulator-min-microvolt = <1200000>;
181 regulator-max-microvolt = <3300000>;
182 regulator-enable-ramp-delay = <218>;
183 };
184
185 mt6397_vgp4_reg: ldo_vgp4 {
186 regulator-compatible = "ldo_vgp4";
187 regulator-name = "vgp4";
188 regulator-min-microvolt = <1200000>;
189 regulator-max-microvolt = <3300000>;
190 regulator-enable-ramp-delay = <218>;
191 };
192
193 mt6397_vgp5_reg: ldo_vgp5 {
194 regulator-compatible = "ldo_vgp5";
195 regulator-name = "vgp5";
196 regulator-min-microvolt = <1200000>;
197 regulator-max-microvolt = <3000000>;
198 regulator-enable-ramp-delay = <218>;
199 };
200
201 mt6397_vgp6_reg: ldo_vgp6 {
202 regulator-compatible = "ldo_vgp6";
203 regulator-name = "vgp6";
204 regulator-min-microvolt = <1200000>;
205 regulator-max-microvolt = <3300000>;
206 regulator-enable-ramp-delay = <218>;
207 };
208
209 mt6397_vibr_reg: ldo_vibr {
210 regulator-compatible = "ldo_vibr";
211 regulator-name = "vibr";
212 regulator-min-microvolt = <1200000>;
213 regulator-max-microvolt = <3300000>;
214 regulator-enable-ramp-delay = <218>;
215 };
216 };
217 };
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index 34ef5d16d0f1..9b40db88f637 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -1,7 +1,7 @@
1PFUZE100 family of regulators 1PFUZE100 family of regulators
2 2
3Required properties: 3Required properties:
4- compatible: "fsl,pfuze100" or "fsl,pfuze200" 4- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000"
5- reg: I2C slave address 5- reg: I2C slave address
6 6
7Required child node: 7Required child node:
@@ -14,6 +14,8 @@ Required child node:
14 sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6 14 sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6
15 --PFUZE200 15 --PFUZE200
16 sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6 16 sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6
17 --PFUZE3000
18 sw1a,sw1b,sw2,sw3,swbst,vsnvs,vrefddr,vldo1,vldo2,vccsd,v33,vldo3,vldo4
17 19
18Each regulator is defined using the standard binding for regulators. 20Each regulator is defined using the standard binding for regulators.
19 21
@@ -205,3 +207,93 @@ Example 2: PFUZE200
205 }; 207 };
206 }; 208 };
207 }; 209 };
210
211Example 3: PFUZE3000
212
213 pmic: pfuze3000@08 {
214 compatible = "fsl,pfuze3000";
215 reg = <0x08>;
216
217 regulators {
218 sw1a_reg: sw1a {
219 regulator-min-microvolt = <700000>;
220 regulator-max-microvolt = <1475000>;
221 regulator-boot-on;
222 regulator-always-on;
223 regulator-ramp-delay = <6250>;
224 };
225 /* use sw1c_reg to align with pfuze100/pfuze200 */
226 sw1c_reg: sw1b {
227 regulator-min-microvolt = <700000>;
228 regulator-max-microvolt = <1475000>;
229 regulator-boot-on;
230 regulator-always-on;
231 regulator-ramp-delay = <6250>;
232 };
233
234 sw2_reg: sw2 {
235 regulator-min-microvolt = <2500000>;
236 regulator-max-microvolt = <3300000>;
237 regulator-boot-on;
238 regulator-always-on;
239 };
240
241 sw3a_reg: sw3 {
242 regulator-min-microvolt = <900000>;
243 regulator-max-microvolt = <1650000>;
244 regulator-boot-on;
245 regulator-always-on;
246 };
247
248 swbst_reg: swbst {
249 regulator-min-microvolt = <5000000>;
250 regulator-max-microvolt = <5150000>;
251 };
252
253 snvs_reg: vsnvs {
254 regulator-min-microvolt = <1000000>;
255 regulator-max-microvolt = <3000000>;
256 regulator-boot-on;
257 regulator-always-on;
258 };
259
260 vref_reg: vrefddr {
261 regulator-boot-on;
262 regulator-always-on;
263 };
264
265 vgen1_reg: vldo1 {
266 regulator-min-microvolt = <1800000>;
267 regulator-max-microvolt = <3300000>;
268 regulator-always-on;
269 };
270
271 vgen2_reg: vldo2 {
272 regulator-min-microvolt = <800000>;
273 regulator-max-microvolt = <1550000>;
274 };
275
276 vgen3_reg: vccsd {
277 regulator-min-microvolt = <2850000>;
278 regulator-max-microvolt = <3300000>;
279 regulator-always-on;
280 };
281
282 vgen4_reg: v33 {
283 regulator-min-microvolt = <2850000>;
284 regulator-max-microvolt = <3300000>;
285 };
286
287 vgen5_reg: vldo3 {
288 regulator-min-microvolt = <1800000>;
289 regulator-max-microvolt = <3300000>;
290 regulator-always-on;
291 };
292
293 vgen6_reg: vldo4 {
294 regulator-min-microvolt = <1800000>;
295 regulator-max-microvolt = <3300000>;
296 regulator-always-on;
297 };
298 };
299 };
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index d11c3721e7cd..4c388bb2f0a2 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -30,6 +30,22 @@ Optional properties:
30 specifiers, one for transmission, and one for 30 specifiers, one for transmission, and one for
31 reception. 31 reception.
32- dma-names : Must contain a list of two DMA names, "tx" and "rx". 32- dma-names : Must contain a list of two DMA names, "tx" and "rx".
33- renesas,dtdl : delay sync signal (setup) in transmit mode.
34 Must contain one of the following values:
35 0 (no bit delay)
36 50 (0.5-clock-cycle delay)
37 100 (1-clock-cycle delay)
38 150 (1.5-clock-cycle delay)
39 200 (2-clock-cycle delay)
40
41- renesas,syncdl : delay sync signal (hold) in transmit mode.
42 Must contain one of the following values:
43 0 (no bit delay)
44 50 (0.5-clock-cycle delay)
45 100 (1-clock-cycle delay)
46 150 (1.5-clock-cycle delay)
47 200 (2-clock-cycle delay)
48 300 (3-clock-cycle delay)
33 49
34Optional properties, deprecated for soctype-specific bindings: 50Optional properties, deprecated for soctype-specific bindings:
35- renesas,tx-fifo-size : Overrides the default tx fifo size given in words 51- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
diff --git a/Documentation/devicetree/bindings/spi/spi-sirf.txt b/Documentation/devicetree/bindings/spi/spi-sirf.txt
new file mode 100644
index 000000000000..4c7adb8f777c
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sirf.txt
@@ -0,0 +1,41 @@
1* CSR SiRFprimaII Serial Peripheral Interface
2
3Required properties:
4- compatible : Should be "sirf,prima2-spi"
5- reg : Offset and length of the register set for the device
6- interrupts : Should contain SPI interrupt
7- resets: phandle to the reset controller asserting this device in
8 reset
9 See ../reset/reset.txt for details.
10- dmas : Must contain an entry for each entry in clock-names.
11 See ../dma/dma.txt for details.
12- dma-names : Must include the following entries:
13 - rx
14 - tx
15- clocks : Must contain an entry for each entry in clock-names.
16 See ../clocks/clock-bindings.txt for details.
17
18- #address-cells: Number of cells required to define a chip select
19 address on the SPI bus. Should be set to 1.
20- #size-cells: Should be zero.
21
22Optional properties:
23- spi-max-frequency: Specifies maximum SPI clock frequency,
24 Units - Hz. Definition as per
25 Documentation/devicetree/bindings/spi/spi-bus.txt
26- cs-gpios: should specify GPIOs used for chipselects.
27
28Example:
29
30spi0: spi@b00d0000 {
31 compatible = "sirf,prima2-spi";
32 reg = <0xb00d0000 0x10000>;
33 interrupts = <15>;
34 dmas = <&dmac1 9>,
35 <&dmac1 4>;
36 dma-names = "rx", "tx";
37 #address-cells = <1>;
38 #size-cells = <0>;
39 clocks = <&clks 19>;
40 resets = <&rstc 26>;
41};
diff --git a/Documentation/devicetree/bindings/spi/spi-st-ssc.txt b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
new file mode 100644
index 000000000000..fe54959ec957
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
@@ -0,0 +1,40 @@
1STMicroelectronics SSC (SPI) Controller
2---------------------------------------
3
4Required properties:
5- compatible : "st,comms-ssc4-spi"
6- reg : Offset and length of the device's register set
7- interrupts : The interrupt specifier
8- clock-names : Must contain "ssc"
9- clocks : Must contain an entry for each name in clock-names
10 See ../clk/*
11- pinctrl-names : Uses "default", can use "sleep" if provided
12 See ../pinctrl/pinctrl-binding.txt
13
14Optional properties:
15- cs-gpios : List of GPIO chip selects
16 See ../spi/spi-bus.txt
17
18Child nodes represent devices on the SPI bus
19 See ../spi/spi-bus.txt
20
21Example:
22 spi@9840000 {
23 compatible = "st,comms-ssc4-spi";
24 reg = <0x9840000 0x110>;
25 interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
26 clocks = <&clk_s_c0_flexgen CLK_EXT2F_A9>;
27 clock-names = "ssc";
28 pinctrl-0 = <&pinctrl_spi0_default>;
29 pinctrl-names = "default";
30 cs-gpios = <&pio17 5 0>;
31 #address-cells = <1>;
32 #size-cells = <0>;
33
34 st95hf@0{
35 compatible = "st,st95hf";
36 reg = <0>;
37 spi-max-frequency = <1000000>;
38 interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
39 };
40 };
diff --git a/Documentation/futex-requeue-pi.txt b/Documentation/futex-requeue-pi.txt
index 31b16610c416..77b36f59d16b 100644
--- a/Documentation/futex-requeue-pi.txt
+++ b/Documentation/futex-requeue-pi.txt
@@ -98,7 +98,7 @@ rt_mutex_start_proxy_lock() and rt_mutex_finish_proxy_lock(), which
98allow the requeue code to acquire an uncontended rt_mutex on behalf 98allow the requeue code to acquire an uncontended rt_mutex on behalf
99of the waiter and to enqueue the waiter on a contended rt_mutex. 99of the waiter and to enqueue the waiter on a contended rt_mutex.
100Two new system calls provide the kernel<->user interface to 100Two new system calls provide the kernel<->user interface to
101requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_REQUEUE_CMP_PI. 101requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI.
102 102
103FUTEX_WAIT_REQUEUE_PI is called by the waiter (pthread_cond_wait() 103FUTEX_WAIT_REQUEUE_PI is called by the waiter (pthread_cond_wait()
104and pthread_cond_timedwait()) to block on the initial futex and wait 104and pthread_cond_timedwait()) to block on the initial futex and wait
@@ -107,7 +107,7 @@ result of a high-speed collision between futex_wait() and
107futex_lock_pi(), with some extra logic to check for the additional 107futex_lock_pi(), with some extra logic to check for the additional
108wake-up scenarios. 108wake-up scenarios.
109 109
110FUTEX_REQUEUE_CMP_PI is called by the waker 110FUTEX_CMP_REQUEUE_PI is called by the waker
111(pthread_cond_broadcast() and pthread_cond_signal()) to requeue and 111(pthread_cond_broadcast() and pthread_cond_signal()) to requeue and
112possibly wake the waiting tasks. Internally, this system call is 112possibly wake the waiting tasks. Internally, this system call is
113still handled by futex_requeue (by passing requeue_pi=1). Before 113still handled by futex_requeue (by passing requeue_pi=1). Before
@@ -120,12 +120,12 @@ task as a waiter on the underlying rt_mutex. It is possible that
120the lock can be acquired at this stage as well, if so, the next 120the lock can be acquired at this stage as well, if so, the next
121waiter is woken to finish the acquisition of the lock. 121waiter is woken to finish the acquisition of the lock.
122 122
123FUTEX_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but 123FUTEX_CMP_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but
124their sum is all that really matters. futex_requeue() will wake or 124their sum is all that really matters. futex_requeue() will wake or
125requeue up to nr_wake + nr_requeue tasks. It will wake only as many 125requeue up to nr_wake + nr_requeue tasks. It will wake only as many
126tasks as it can acquire the lock for, which in the majority of cases 126tasks as it can acquire the lock for, which in the majority of cases
127should be 0 as good programming practice dictates that the caller of 127should be 0 as good programming practice dictates that the caller of
128either pthread_cond_broadcast() or pthread_cond_signal() acquire the 128either pthread_cond_broadcast() or pthread_cond_signal() acquire the
129mutex prior to making the call. FUTEX_REQUEUE_PI requires that 129mutex prior to making the call. FUTEX_CMP_REQUEUE_PI requires that
130nr_wake=1. nr_requeue should be INT_MAX for broadcast and 0 for 130nr_wake=1. nr_requeue should be INT_MAX for broadcast and 0 for
131signal. 131signal.
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index 4223c2d3b508..cfd31d94c872 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -26,6 +26,12 @@ Supported chips:
26 Datasheet: Publicly available at the Texas Instruments website 26 Datasheet: Publicly available at the Texas Instruments website
27 http://www.ti.com/ 27 http://www.ti.com/
28 28
29 * Texas Instruments INA231
30 Prefix: 'ina231'
31 Addresses: I2C 0x40 - 0x4f
32 Datasheet: Publicly available at the Texas Instruments website
33 http://www.ti.com/
34
29Author: Lothar Felten <l-felten@ti.com> 35Author: Lothar Felten <l-felten@ti.com>
30 36
31Description 37Description
@@ -41,9 +47,18 @@ interface. The INA220 monitors both shunt drop and supply voltage.
41The INA226 is a current shunt and power monitor with an I2C interface. 47The INA226 is a current shunt and power monitor with an I2C interface.
42The INA226 monitors both a shunt voltage drop and bus supply voltage. 48The INA226 monitors both a shunt voltage drop and bus supply voltage.
43 49
44The INA230 is a high or low side current shunt and power monitor with an I2C 50INA230 and INA231 are high or low side current shunt and power monitors
45interface. The INA230 monitors both a shunt voltage drop and bus supply voltage. 51with an I2C interface. The chips monitor both a shunt voltage drop and
52bus supply voltage.
46 53
47The shunt value in micro-ohms can be set via platform data or device tree. 54The shunt value in micro-ohms can be set via platform data or device tree at
48Please refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings 55compile-time or via the shunt_resistor attribute in sysfs at run-time. Please
56refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
49if the device tree is used. 57if the device tree is used.
58
59Additionally ina226 supports update_interval attribute as described in
60Documentation/hwmon/sysfs-interface. Internally the interval is the sum of
61bus and shunt voltage conversion times multiplied by the averaging rate. We
62don't touch the conversion times and only modify the number of averages. The
63lower limit of the update_interval is 2 ms, the upper limit is 2253 ms.
64The actual programmed interval may vary from the desired value.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 176d4fe4f076..f06f1f609cb7 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1470,6 +1470,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1470 no_hwp 1470 no_hwp
1471 Do not enable hardware P state control (HWP) 1471 Do not enable hardware P state control (HWP)
1472 if available. 1472 if available.
1473 hwp_only
1474 Only load intel_pstate on systems which support
1475 hardware P state control (HWP) if available.
1473 1476
1474 intremap= [X86-64, Intel-IOMMU] 1477 intremap= [X86-64, Intel-IOMMU]
1475 on enable Interrupt Remapping (default) 1478 on enable Interrupt Remapping (default)
diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index 5dbc99c04f6e..5001280e9d82 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -34,7 +34,7 @@ The validator tracks lock-class usage history into 4n + 1 separate state bits:
34- 'ever held with STATE enabled' 34- 'ever held with STATE enabled'
35- 'ever held as readlock with STATE enabled' 35- 'ever held as readlock with STATE enabled'
36 36
37Where STATE can be either one of (kernel/lockdep_states.h) 37Where STATE can be either one of (kernel/locking/lockdep_states.h)
38 - hardirq 38 - hardirq
39 - softirq 39 - softirq
40 - reclaim_fs 40 - reclaim_fs
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 70a09f8a0383..ca2387ef27ab 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -269,6 +269,50 @@ And there are a number of things that _must_ or _must_not_ be assumed:
269 STORE *(A + 4) = Y; STORE *A = X; 269 STORE *(A + 4) = Y; STORE *A = X;
270 STORE {*A, *(A + 4) } = {X, Y}; 270 STORE {*A, *(A + 4) } = {X, Y};
271 271
272And there are anti-guarantees:
273
274 (*) These guarantees do not apply to bitfields, because compilers often
275 generate code to modify these using non-atomic read-modify-write
276 sequences. Do not attempt to use bitfields to synchronize parallel
277 algorithms.
278
279 (*) Even in cases where bitfields are protected by locks, all fields
280 in a given bitfield must be protected by one lock. If two fields
281 in a given bitfield are protected by different locks, the compiler's
282 non-atomic read-modify-write sequences can cause an update to one
283 field to corrupt the value of an adjacent field.
284
285 (*) These guarantees apply only to properly aligned and sized scalar
286 variables. "Properly sized" currently means variables that are
287 the same size as "char", "short", "int" and "long". "Properly
288 aligned" means the natural alignment, thus no constraints for
289 "char", two-byte alignment for "short", four-byte alignment for
290 "int", and either four-byte or eight-byte alignment for "long",
291 on 32-bit and 64-bit systems, respectively. Note that these
292 guarantees were introduced into the C11 standard, so beware when
293 using older pre-C11 compilers (for example, gcc 4.6). The portion
294 of the standard containing this guarantee is Section 3.14, which
295 defines "memory location" as follows:
296
297 memory location
298 either an object of scalar type, or a maximal sequence
299 of adjacent bit-fields all having nonzero width
300
301 NOTE 1: Two threads of execution can update and access
302 separate memory locations without interfering with
303 each other.
304
305 NOTE 2: A bit-field and an adjacent non-bit-field member
306 are in separate memory locations. The same applies
307 to two bit-fields, if one is declared inside a nested
308 structure declaration and the other is not, or if the two
309 are separated by a zero-length bit-field declaration,
310 or if they are separated by a non-bit-field member
311 declaration. It is not safe to concurrently update two
312 bit-fields in the same structure if all members declared
313 between them are also bit-fields, no matter what the
314 sizes of those intervening bit-fields happen to be.
315
272 316
273========================= 317=========================
274WHAT ARE MEMORY BARRIERS? 318WHAT ARE MEMORY BARRIERS?
@@ -750,7 +794,7 @@ In summary:
750 However, they do -not- guarantee any other sort of ordering: 794 However, they do -not- guarantee any other sort of ordering:
751 Not prior loads against later loads, nor prior stores against 795 Not prior loads against later loads, nor prior stores against
752 later anything. If you need these other forms of ordering, 796 later anything. If you need these other forms of ordering,
753 use smb_rmb(), smp_wmb(), or, in the case of prior stores and 797 use smp_rmb(), smp_wmb(), or, in the case of prior stores and
754 later loads, smp_mb(). 798 later loads, smp_mb().
755 799
756 (*) If both legs of the "if" statement begin with identical stores 800 (*) If both legs of the "if" statement begin with identical stores
diff --git a/Documentation/power/s2ram.txt b/Documentation/power/s2ram.txt
index 1bdfa0443773..4685aee197fd 100644
--- a/Documentation/power/s2ram.txt
+++ b/Documentation/power/s2ram.txt
@@ -69,6 +69,10 @@ Reason for this is that the RTC is the only reliably available piece of
69hardware during resume operations where a value can be set that will 69hardware during resume operations where a value can be set that will
70survive a reboot. 70survive a reboot.
71 71
72pm_trace is not compatible with asynchronous suspend, so it turns
73asynchronous suspend off (which may work around timing or
74ordering-sensitive bugs).
75
72Consequence is that after a resume (even if it is successful) your system 76Consequence is that after a resume (even if it is successful) your system
73clock will have a value corresponding to the magic number instead of the 77clock will have a value corresponding to the magic number instead of the
74correct date/time! It is therefore advisable to use a program like ntp-date 78correct date/time! It is therefore advisable to use a program like ntp-date
diff --git a/Documentation/x86/entry_64.txt b/Documentation/x86/entry_64.txt
index 4a1c5c2dc5a9..9132b86176a3 100644
--- a/Documentation/x86/entry_64.txt
+++ b/Documentation/x86/entry_64.txt
@@ -78,9 +78,6 @@ The expensive (paranoid) way is to read back the MSR_GS_BASE value
78 xorl %ebx,%ebx 78 xorl %ebx,%ebx
791: ret 791: ret
80 80
81and the whole paranoid non-paranoid macro complexity is about whether
82to suffer that RDMSR cost.
83
84If we are at an interrupt or user-trap/gate-alike boundary then we can 81If we are at an interrupt or user-trap/gate-alike boundary then we can
85use the faster check: the stack will be a reliable indicator of 82use the faster check: the stack will be a reliable indicator of
86whether SWAPGS was already done: if we see that we are a secondary 83whether SWAPGS was already done: if we see that we are a secondary
@@ -93,6 +90,15 @@ which might have triggered right after a normal entry wrote CS to the
93stack but before we executed SWAPGS, then the only safe way to check 90stack but before we executed SWAPGS, then the only safe way to check
94for GS is the slower method: the RDMSR. 91for GS is the slower method: the RDMSR.
95 92
96So we try only to mark those entry methods 'paranoid' that absolutely 93Therefore, super-atomic entries (except NMI, which is handled separately)
97need the more expensive check for the GS base - and we generate all 94must use idtentry with paranoid=1 to handle gsbase correctly. This
98'normal' entry points with the regular (faster) entry macros. 95triggers three main behavior changes:
96
97 - Interrupt entry will use the slower gsbase check.
98 - Interrupt entry from user mode will switch off the IST stack.
99 - Interrupt exit to kernel mode will not attempt to reschedule.
100
101We try to only use IST entries and the paranoid entry code for vectors
102that absolutely need the more expensive check for the GS base - and we
103generate all 'normal' entry points with the regular (faster) paranoid=0
104variant.
diff --git a/Documentation/x86/x86_64/kernel-stacks b/Documentation/x86/x86_64/kernel-stacks
index a01eec5d1d0b..e3c8a49d1a2f 100644
--- a/Documentation/x86/x86_64/kernel-stacks
+++ b/Documentation/x86/x86_64/kernel-stacks
@@ -40,9 +40,11 @@ An IST is selected by a non-zero value in the IST field of an
40interrupt-gate descriptor. When an interrupt occurs and the hardware 40interrupt-gate descriptor. When an interrupt occurs and the hardware
41loads such a descriptor, the hardware automatically sets the new stack 41loads such a descriptor, the hardware automatically sets the new stack
42pointer based on the IST value, then invokes the interrupt handler. If 42pointer based on the IST value, then invokes the interrupt handler. If
43software wants to allow nested IST interrupts then the handler must 43the interrupt came from user mode, then the interrupt handler prologue
44adjust the IST values on entry to and exit from the interrupt handler. 44will switch back to the per-thread stack. If software wants to allow
45(This is occasionally done, e.g. for debug exceptions.) 45nested IST interrupts then the handler must adjust the IST values on
46entry to and exit from the interrupt handler. (This is occasionally
47done, e.g. for debug exceptions.)
46 48
47Events with different IST codes (i.e. with different stacks) can be 49Events with different IST codes (i.e. with different stacks) can be
48nested. For example, a debug interrupt can safely be interrupted by an 50nested. For example, a debug interrupt can safely be interrupted by an
diff --git a/MAINTAINERS b/MAINTAINERS
index d66a97dd3a12..54c7ce00d85f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -270,12 +270,12 @@ F: drivers/acpi/
270F: drivers/pnp/pnpacpi/ 270F: drivers/pnp/pnpacpi/
271F: include/linux/acpi.h 271F: include/linux/acpi.h
272F: include/acpi/ 272F: include/acpi/
273F: Documentation/acpi 273F: Documentation/acpi/
274F: Documentation/ABI/testing/sysfs-bus-acpi 274F: Documentation/ABI/testing/sysfs-bus-acpi
275F: drivers/pci/*acpi* 275F: drivers/pci/*acpi*
276F: drivers/pci/*/*acpi* 276F: drivers/pci/*/*acpi*
277F: drivers/pci/*/*/*acpi* 277F: drivers/pci/*/*/*acpi*
278F: tools/power/acpi 278F: tools/power/acpi/
279 279
280ACPI COMPONENT ARCHITECTURE (ACPICA) 280ACPI COMPONENT ARCHITECTURE (ACPICA)
281M: Robert Moore <robert.moore@intel.com> 281M: Robert Moore <robert.moore@intel.com>
@@ -1586,6 +1586,7 @@ N: xilinx
1586F: drivers/clocksource/cadence_ttc_timer.c 1586F: drivers/clocksource/cadence_ttc_timer.c
1587F: drivers/i2c/busses/i2c-cadence.c 1587F: drivers/i2c/busses/i2c-cadence.c
1588F: drivers/mmc/host/sdhci-of-arasan.c 1588F: drivers/mmc/host/sdhci-of-arasan.c
1589F: drivers/edac/synopsys_edac.c
1589 1590
1590ARM SMMU DRIVER 1591ARM SMMU DRIVER
1591M: Will Deacon <will.deacon@arm.com> 1592M: Will Deacon <will.deacon@arm.com>
@@ -3513,6 +3514,8 @@ M: Borislav Petkov <bp@alien8.de>
3513M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 3514M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3514L: linux-edac@vger.kernel.org 3515L: linux-edac@vger.kernel.org
3515W: bluesmoke.sourceforge.net 3516W: bluesmoke.sourceforge.net
3517T: git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git#for-next
3518T: git://git.kernel.org/pub/linux/kernel/git/mchehab/linux-edac.git#linux_next
3516S: Supported 3519S: Supported
3517F: Documentation/edac.txt 3520F: Documentation/edac.txt
3518F: drivers/edac/ 3521F: drivers/edac/
@@ -7274,6 +7277,14 @@ F: include/linux/pci*
7274F: arch/x86/pci/ 7277F: arch/x86/pci/
7275F: arch/x86/kernel/quirks.c 7278F: arch/x86/kernel/quirks.c
7276 7279
7280PCI DRIVER FOR ARM VERSATILE PLATFORM
7281M: Rob Herring <robh@kernel.org>
7282L: linux-pci@vger.kernel.org
7283L: linux-arm-kernel@lists.infradead.org
7284S: Maintained
7285F: Documentation/devicetree/bindings/pci/versatile.txt
7286F: drivers/pci/host/pci-versatile.c
7287
7277PCI DRIVER FOR APPLIEDMICRO XGENE 7288PCI DRIVER FOR APPLIEDMICRO XGENE
7278M: Tanmay Inamdar <tinamdar@apm.com> 7289M: Tanmay Inamdar <tinamdar@apm.com>
7279L: linux-pci@vger.kernel.org 7290L: linux-pci@vger.kernel.org
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 97d07ed60a0b..dcb2e0c55be4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1279,6 +1279,9 @@ config PCI_DOMAINS
1279 bool 1279 bool
1280 depends on PCI 1280 depends on PCI
1281 1281
1282config PCI_DOMAINS_GENERIC
1283 def_bool PCI_DOMAINS
1284
1282config PCI_NANOENGINE 1285config PCI_NANOENGINE
1283 bool "BSE nanoEngine PCI support" 1286 bool "BSE nanoEngine PCI support"
1284 depends on SA1100_NANOENGINE 1287 depends on SA1100_NANOENGINE
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index e36c1e82fea7..b83137f66034 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -29,6 +29,43 @@
29 clock-names = "apb_pclk"; 29 clock-names = "apb_pclk";
30 }; 30 };
31 31
32 pci-controller@10001000 {
33 compatible = "arm,versatile-pci";
34 device_type = "pci";
35 reg = <0x10001000 0x1000
36 0x41000000 0x10000
37 0x42000000 0x100000>;
38 bus-range = <0 0xff>;
39 #address-cells = <3>;
40 #size-cells = <2>;
41 #interrupt-cells = <1>;
42
43 ranges = <0x01000000 0 0x00000000 0x43000000 0 0x00010000 /* downstream I/O */
44 0x02000000 0 0x50000000 0x50000000 0 0x10000000 /* non-prefetchable memory */
45 0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */
46
47 interrupt-map-mask = <0x1800 0 0 7>;
48 interrupt-map = <0x1800 0 0 1 &sic 28
49 0x1800 0 0 2 &sic 29
50 0x1800 0 0 3 &sic 30
51 0x1800 0 0 4 &sic 27
52
53 0x1000 0 0 1 &sic 27
54 0x1000 0 0 2 &sic 28
55 0x1000 0 0 3 &sic 29
56 0x1000 0 0 4 &sic 30
57
58 0x0800 0 0 1 &sic 30
59 0x0800 0 0 2 &sic 27
60 0x0800 0 0 3 &sic 28
61 0x0800 0 0 4 &sic 29
62
63 0x0000 0 0 1 &sic 29
64 0x0000 0 0 2 &sic 30
65 0x0000 0 0 3 &sic 27
66 0x0000 0 0 4 &sic 28>;
67 };
68
32 fpga { 69 fpga {
33 uart@9000 { 70 uart@9000 {
34 compatible = "arm,pl011", "arm,primecell"; 71 compatible = "arm,pl011", "arm,primecell";
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 8292b5f81e23..28b9bb35949e 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -19,9 +19,6 @@ struct pci_bus;
19struct device; 19struct device;
20 20
21struct hw_pci { 21struct hw_pci {
22#ifdef CONFIG_PCI_DOMAINS
23 int domain;
24#endif
25#ifdef CONFIG_PCI_MSI 22#ifdef CONFIG_PCI_MSI
26 struct msi_controller *msi_ctrl; 23 struct msi_controller *msi_ctrl;
27#endif 24#endif
@@ -45,9 +42,6 @@ struct hw_pci {
45 * Per-controller structure 42 * Per-controller structure
46 */ 43 */
47struct pci_sys_data { 44struct pci_sys_data {
48#ifdef CONFIG_PCI_DOMAINS
49 int domain;
50#endif
51#ifdef CONFIG_PCI_MSI 45#ifdef CONFIG_PCI_MSI
52 struct msi_controller *msi_ctrl; 46 struct msi_controller *msi_ctrl;
53#endif 47#endif
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 7e95d8535e24..585dc33a7a24 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -18,13 +18,6 @@ static inline int pcibios_assign_all_busses(void)
18} 18}
19 19
20#ifdef CONFIG_PCI_DOMAINS 20#ifdef CONFIG_PCI_DOMAINS
21static inline int pci_domain_nr(struct pci_bus *bus)
22{
23 struct pci_sys_data *root = bus->sysdata;
24
25 return root->domain;
26}
27
28static inline int pci_proc_domain(struct pci_bus *bus) 21static inline int pci_proc_domain(struct pci_bus *bus)
29{ 22{
30 return pci_domain_nr(bus); 23 return pci_domain_nr(bus);
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 68c739b3fdf4..2f7e6ff67d51 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -92,7 +92,7 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
92 struct page **pages, unsigned int count); 92 struct page **pages, unsigned int count);
93 93
94extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 94extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
95 struct gnttab_map_grant_ref *kmap_ops, 95 struct gnttab_unmap_grant_ref *kunmap_ops,
96 struct page **pages, unsigned int count); 96 struct page **pages, unsigned int count);
97 97
98bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 98bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index a4effd6d8f2f..ab19b7c03423 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -422,17 +422,16 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
422static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) 422static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
423{ 423{
424 int ret; 424 int ret;
425 struct pci_host_bridge_window *window; 425 struct resource_entry *window;
426 426
427 if (list_empty(&sys->resources)) { 427 if (list_empty(&sys->resources)) {
428 pci_add_resource_offset(&sys->resources, 428 pci_add_resource_offset(&sys->resources,
429 &iomem_resource, sys->mem_offset); 429 &iomem_resource, sys->mem_offset);
430 } 430 }
431 431
432 list_for_each_entry(window, &sys->resources, list) { 432 resource_list_for_each_entry(window, &sys->resources)
433 if (resource_type(window->res) == IORESOURCE_IO) 433 if (resource_type(window->res) == IORESOURCE_IO)
434 return 0; 434 return 0;
435 }
436 435
437 sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io; 436 sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io;
438 sys->io_res.end = (busnr + 1) * SZ_64K - 1; 437 sys->io_res.end = (busnr + 1) * SZ_64K - 1;
@@ -463,9 +462,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
463 if (!sys) 462 if (!sys)
464 panic("PCI: unable to allocate sys data!"); 463 panic("PCI: unable to allocate sys data!");
465 464
466#ifdef CONFIG_PCI_DOMAINS
467 sys->domain = hw->domain;
468#endif
469#ifdef CONFIG_PCI_MSI 465#ifdef CONFIG_PCI_MSI
470 sys->msi_ctrl = hw->msi_ctrl; 466 sys->msi_ctrl = hw->msi_ctrl;
471#endif 467#endif
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 466bd299b1a8..3afee5f40f4f 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -23,6 +23,7 @@ config KVM
23 select HAVE_KVM_CPU_RELAX_INTERCEPT 23 select HAVE_KVM_CPU_RELAX_INTERCEPT
24 select KVM_MMIO 24 select KVM_MMIO
25 select KVM_ARM_HOST 25 select KVM_ARM_HOST
26 select SRCU
26 depends on ARM_VIRT_EXT && ARM_LPAE 27 depends on ARM_VIRT_EXT && ARM_LPAE
27 ---help--- 28 ---help---
28 Support hosting virtualized guest machines. You will also 29 Support hosting virtualized guest machines. You will also
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 45d6bd09e6ef..c622c306c390 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -30,18 +30,15 @@ struct cns3xxx_pcie {
30 unsigned int irqs[2]; 30 unsigned int irqs[2];
31 struct resource res_io; 31 struct resource res_io;
32 struct resource res_mem; 32 struct resource res_mem;
33 struct hw_pci hw_pci; 33 int port;
34
35 bool linked; 34 bool linked;
36}; 35};
37 36
38static struct cns3xxx_pcie cns3xxx_pcie[]; /* forward decl. */
39
40static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata) 37static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
41{ 38{
42 struct pci_sys_data *root = sysdata; 39 struct pci_sys_data *root = sysdata;
43 40
44 return &cns3xxx_pcie[root->domain]; 41 return root->private_data;
45} 42}
46 43
47static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev) 44static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
@@ -54,8 +51,8 @@ static struct cns3xxx_pcie *pbus_to_cnspci(struct pci_bus *bus)
54 return sysdata_to_cnspci(bus->sysdata); 51 return sysdata_to_cnspci(bus->sysdata);
55} 52}
56 53
57static void __iomem *cns3xxx_pci_cfg_base(struct pci_bus *bus, 54static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
58 unsigned int devfn, int where) 55 unsigned int devfn, int where)
59{ 56{
60 struct cns3xxx_pcie *cnspci = pbus_to_cnspci(bus); 57 struct cns3xxx_pcie *cnspci = pbus_to_cnspci(bus);
61 int busno = bus->number; 58 int busno = bus->number;
@@ -91,55 +88,22 @@ static void __iomem *cns3xxx_pci_cfg_base(struct pci_bus *bus,
91static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, 88static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
92 int where, int size, u32 *val) 89 int where, int size, u32 *val)
93{ 90{
94 u32 v; 91 int ret;
95 void __iomem *base;
96 u32 mask = (0x1ull << (size * 8)) - 1; 92 u32 mask = (0x1ull << (size * 8)) - 1;
97 int shift = (where % 4) * 8; 93 int shift = (where % 4) * 8;
98 94
99 base = cns3xxx_pci_cfg_base(bus, devfn, where); 95 ret = pci_generic_config_read32(bus, devfn, where, size, val);
100 if (!base) {
101 *val = 0xffffffff;
102 return PCIBIOS_SUCCESSFUL;
103 }
104
105 v = __raw_readl(base);
106 96
107 if (bus->number == 0 && devfn == 0 && 97 if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
108 (where & 0xffc) == PCI_CLASS_REVISION) { 98 (where & 0xffc) == PCI_CLASS_REVISION)
109 /* 99 /*
110 * RC's class is 0xb, but Linux PCI driver needs 0x604 100 * RC's class is 0xb, but Linux PCI driver needs 0x604
111 * for a PCIe bridge. So we must fixup the class code 101 * for a PCIe bridge. So we must fixup the class code
112 * to 0x604 here. 102 * to 0x604 here.
113 */ 103 */
114 v &= 0xff; 104 *val = ((((*val << shift) & 0xff) | (0x604 << 16)) >> shift) & mask;
115 v |= 0x604 << 16;
116 }
117 105
118 *val = (v >> shift) & mask; 106 return ret;
119
120 return PCIBIOS_SUCCESSFUL;
121}
122
123static int cns3xxx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
124 int where, int size, u32 val)
125{
126 u32 v;
127 void __iomem *base;
128 u32 mask = (0x1ull << (size * 8)) - 1;
129 int shift = (where % 4) * 8;
130
131 base = cns3xxx_pci_cfg_base(bus, devfn, where);
132 if (!base)
133 return PCIBIOS_SUCCESSFUL;
134
135 v = __raw_readl(base);
136
137 v &= ~(mask << shift);
138 v |= (val & mask) << shift;
139
140 __raw_writel(v, base);
141
142 return PCIBIOS_SUCCESSFUL;
143} 107}
144 108
145static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys) 109static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys)
@@ -158,8 +122,9 @@ static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys)
158} 122}
159 123
160static struct pci_ops cns3xxx_pcie_ops = { 124static struct pci_ops cns3xxx_pcie_ops = {
125 .map_bus = cns3xxx_pci_map_bus,
161 .read = cns3xxx_pci_read_config, 126 .read = cns3xxx_pci_read_config,
162 .write = cns3xxx_pci_write_config, 127 .write = pci_generic_config_write,
163}; 128};
164 129
165static int cns3xxx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 130static int cns3xxx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -192,13 +157,7 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
192 .flags = IORESOURCE_MEM, 157 .flags = IORESOURCE_MEM,
193 }, 158 },
194 .irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, }, 159 .irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, },
195 .hw_pci = { 160 .port = 0,
196 .domain = 0,
197 .nr_controllers = 1,
198 .ops = &cns3xxx_pcie_ops,
199 .setup = cns3xxx_pci_setup,
200 .map_irq = cns3xxx_pcie_map_irq,
201 },
202 }, 161 },
203 [1] = { 162 [1] = {
204 .host_regs = (void __iomem *)CNS3XXX_PCIE1_HOST_BASE_VIRT, 163 .host_regs = (void __iomem *)CNS3XXX_PCIE1_HOST_BASE_VIRT,
@@ -217,19 +176,13 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
217 .flags = IORESOURCE_MEM, 176 .flags = IORESOURCE_MEM,
218 }, 177 },
219 .irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, }, 178 .irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, },
220 .hw_pci = { 179 .port = 1,
221 .domain = 1,
222 .nr_controllers = 1,
223 .ops = &cns3xxx_pcie_ops,
224 .setup = cns3xxx_pci_setup,
225 .map_irq = cns3xxx_pcie_map_irq,
226 },
227 }, 180 },
228}; 181};
229 182
230static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci) 183static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
231{ 184{
232 int port = cnspci->hw_pci.domain; 185 int port = cnspci->port;
233 u32 reg; 186 u32 reg;
234 unsigned long time; 187 unsigned long time;
235 188
@@ -260,9 +213,9 @@ static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
260 213
261static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci) 214static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
262{ 215{
263 int port = cnspci->hw_pci.domain; 216 int port = cnspci->port;
264 struct pci_sys_data sd = { 217 struct pci_sys_data sd = {
265 .domain = port, 218 .private_data = cnspci,
266 }; 219 };
267 struct pci_bus bus = { 220 struct pci_bus bus = {
268 .number = 0, 221 .number = 0,
@@ -323,6 +276,14 @@ static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr,
323void __init cns3xxx_pcie_init_late(void) 276void __init cns3xxx_pcie_init_late(void)
324{ 277{
325 int i; 278 int i;
279 void *private_data;
280 struct hw_pci hw_pci = {
281 .nr_controllers = 1,
282 .ops = &cns3xxx_pcie_ops,
283 .setup = cns3xxx_pci_setup,
284 .map_irq = cns3xxx_pcie_map_irq,
285 .private_data = &private_data,
286 };
326 287
327 pcibios_min_io = 0; 288 pcibios_min_io = 0;
328 pcibios_min_mem = 0; 289 pcibios_min_mem = 0;
@@ -335,7 +296,8 @@ void __init cns3xxx_pcie_init_late(void)
335 cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i)); 296 cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
336 cns3xxx_pcie_check_link(&cns3xxx_pcie[i]); 297 cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
337 cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]); 298 cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
338 pci_common_init(&cns3xxx_pcie[i].hw_pci); 299 private_data = &cns3xxx_pcie[i];
300 pci_common_init(&hw_pci);
339 } 301 }
340 302
341 pci_assign_unassigned_resources(); 303 pci_assign_unassigned_resources();
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index c186a17c2cff..2565f0e7b5cf 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -356,7 +356,6 @@ static u64 pre_mem_pci_sz;
356 * 7:2 register number 356 * 7:2 register number
357 * 357 *
358 */ 358 */
359static DEFINE_RAW_SPINLOCK(v3_lock);
360 359
361#undef V3_LB_BASE_PREFETCH 360#undef V3_LB_BASE_PREFETCH
362#define V3_LB_BASE_PREFETCH 0 361#define V3_LB_BASE_PREFETCH 0
@@ -457,67 +456,21 @@ static void v3_close_config_window(void)
457static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where, 456static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where,
458 int size, u32 *val) 457 int size, u32 *val)
459{ 458{
460 void __iomem *addr; 459 int ret = pci_generic_config_read(bus, devfn, where, size, val);
461 unsigned long flags;
462 u32 v;
463
464 raw_spin_lock_irqsave(&v3_lock, flags);
465 addr = v3_open_config_window(bus, devfn, where);
466
467 switch (size) {
468 case 1:
469 v = __raw_readb(addr);
470 break;
471
472 case 2:
473 v = __raw_readw(addr);
474 break;
475
476 default:
477 v = __raw_readl(addr);
478 break;
479 }
480
481 v3_close_config_window(); 460 v3_close_config_window();
482 raw_spin_unlock_irqrestore(&v3_lock, flags); 461 return ret;
483
484 *val = v;
485 return PCIBIOS_SUCCESSFUL;
486} 462}
487 463
488static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where, 464static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
489 int size, u32 val) 465 int size, u32 val)
490{ 466{
491 void __iomem *addr; 467 int ret = pci_generic_config_write(bus, devfn, where, size, val);
492 unsigned long flags;
493
494 raw_spin_lock_irqsave(&v3_lock, flags);
495 addr = v3_open_config_window(bus, devfn, where);
496
497 switch (size) {
498 case 1:
499 __raw_writeb((u8)val, addr);
500 __raw_readb(addr);
501 break;
502
503 case 2:
504 __raw_writew((u16)val, addr);
505 __raw_readw(addr);
506 break;
507
508 case 4:
509 __raw_writel(val, addr);
510 __raw_readl(addr);
511 break;
512 }
513
514 v3_close_config_window(); 468 v3_close_config_window();
515 raw_spin_unlock_irqrestore(&v3_lock, flags); 469 return ret;
516
517 return PCIBIOS_SUCCESSFUL;
518} 470}
519 471
520static struct pci_ops pci_v3_ops = { 472static struct pci_ops pci_v3_ops = {
473 .map_bus = v3_open_config_window,
521 .read = v3_read_config, 474 .read = v3_read_config,
522 .write = v3_write_config, 475 .write = v3_write_config,
523}; 476};
@@ -658,7 +611,6 @@ static int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
658 */ 611 */
659static void __init pci_v3_preinit(void) 612static void __init pci_v3_preinit(void)
660{ 613{
661 unsigned long flags;
662 unsigned int temp; 614 unsigned int temp;
663 phys_addr_t io_address = pci_pio_to_address(io_mem.start); 615 phys_addr_t io_address = pci_pio_to_address(io_mem.start);
664 616
@@ -672,8 +624,6 @@ static void __init pci_v3_preinit(void)
672 hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); 624 hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
673 hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); 625 hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
674 626
675 raw_spin_lock_irqsave(&v3_lock, flags);
676
677 /* 627 /*
678 * Unlock V3 registers, but only if they were previously locked. 628 * Unlock V3 registers, but only if they were previously locked.
679 */ 629 */
@@ -736,8 +686,6 @@ static void __init pci_v3_preinit(void)
736 v3_writew(V3_LB_CFG, v3_readw(V3_LB_CFG) | (1 << 10)); 686 v3_writew(V3_LB_CFG, v3_readw(V3_LB_CFG) | (1 << 10));
737 v3_writeb(V3_LB_IMASK, 0x28); 687 v3_writeb(V3_LB_IMASK, 0x28);
738 __raw_writel(3, ap_syscon_base + INTEGRATOR_SC_PCIENABLE_OFFSET); 688 __raw_writel(3, ap_syscon_base + INTEGRATOR_SC_PCIENABLE_OFFSET);
739
740 raw_spin_unlock_irqrestore(&v3_lock, flags);
741} 689}
742 690
743static void __init pci_v3_postinit(void) 691static void __init pci_v3_postinit(void)
diff --git a/arch/arm/mach-ks8695/pci.c b/arch/arm/mach-ks8695/pci.c
index bb18193b4bac..c1bc4c3716ed 100644
--- a/arch/arm/mach-ks8695/pci.c
+++ b/arch/arm/mach-ks8695/pci.c
@@ -38,8 +38,6 @@
38 38
39 39
40static int pci_dbg; 40static int pci_dbg;
41static int pci_cfg_dbg;
42
43 41
44static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsigned int where) 42static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsigned int where)
45{ 43{
@@ -59,75 +57,11 @@ static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsi
59 } 57 }
60} 58}
61 59
62 60static void __iomem *ks8695_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
63/* 61 int where)
64 * The KS8695 datasheet prohibits anything other than 32bit accesses
65 * to the IO registers, so all our configuration must be done with
66 * 32bit operations, and the correct bit masking and shifting.
67 */
68
69static int ks8695_pci_readconfig(struct pci_bus *bus,
70 unsigned int devfn, int where, int size, u32 *value)
71{
72 ks8695_pci_setupconfig(bus->number, devfn, where);
73
74 *value = __raw_readl(KS8695_PCI_VA + KS8695_PBCD);
75
76 switch (size) {
77 case 4:
78 break;
79 case 2:
80 *value = *value >> ((where & 2) * 8);
81 *value &= 0xffff;
82 break;
83 case 1:
84 *value = *value >> ((where & 3) * 8);
85 *value &= 0xff;
86 break;
87 }
88
89 if (pci_cfg_dbg) {
90 printk("read: %d,%08x,%02x,%d: %08x (%08x)\n",
91 bus->number, devfn, where, size, *value,
92 __raw_readl(KS8695_PCI_VA + KS8695_PBCD));
93 }
94
95 return PCIBIOS_SUCCESSFUL;
96}
97
98static int ks8695_pci_writeconfig(struct pci_bus *bus,
99 unsigned int devfn, int where, int size, u32 value)
100{ 62{
101 unsigned long tmp;
102
103 if (pci_cfg_dbg) {
104 printk("write: %d,%08x,%02x,%d: %08x\n",
105 bus->number, devfn, where, size, value);
106 }
107
108 ks8695_pci_setupconfig(bus->number, devfn, where); 63 ks8695_pci_setupconfig(bus->number, devfn, where);
109 64 return KS8695_PCI_VA + KS8695_PBCD;
110 switch (size) {
111 case 4:
112 __raw_writel(value, KS8695_PCI_VA + KS8695_PBCD);
113 break;
114 case 2:
115 tmp = __raw_readl(KS8695_PCI_VA + KS8695_PBCD);
116 tmp &= ~(0xffff << ((where & 2) * 8));
117 tmp |= value << ((where & 2) * 8);
118
119 __raw_writel(tmp, KS8695_PCI_VA + KS8695_PBCD);
120 break;
121 case 1:
122 tmp = __raw_readl(KS8695_PCI_VA + KS8695_PBCD);
123 tmp &= ~(0xff << ((where & 3) * 8));
124 tmp |= value << ((where & 3) * 8);
125
126 __raw_writel(tmp, KS8695_PCI_VA + KS8695_PBCD);
127 break;
128 }
129
130 return PCIBIOS_SUCCESSFUL;
131} 65}
132 66
133static void ks8695_local_writeconfig(int where, u32 value) 67static void ks8695_local_writeconfig(int where, u32 value)
@@ -137,8 +71,9 @@ static void ks8695_local_writeconfig(int where, u32 value)
137} 71}
138 72
139static struct pci_ops ks8695_pci_ops = { 73static struct pci_ops ks8695_pci_ops = {
140 .read = ks8695_pci_readconfig, 74 .map_bus = ks8695_pci_map_bus,
141 .write = ks8695_pci_writeconfig, 75 .read = pci_generic_config_read32,
76 .write = pci_generic_config_write32,
142}; 77};
143 78
144static struct resource pci_mem = { 79static struct resource pci_mem = {
diff --git a/arch/arm/mach-sa1100/pci-nanoengine.c b/arch/arm/mach-sa1100/pci-nanoengine.c
index b704433c529c..d7ae8d50f6d8 100644
--- a/arch/arm/mach-sa1100/pci-nanoengine.c
+++ b/arch/arm/mach-sa1100/pci-nanoengine.c
@@ -22,7 +22,6 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/spinlock.h>
26 25
27#include <asm/mach/pci.h> 26#include <asm/mach/pci.h>
28#include <asm/mach-types.h> 27#include <asm/mach-types.h>
@@ -30,97 +29,20 @@
30#include <mach/nanoengine.h> 29#include <mach/nanoengine.h>
31#include <mach/hardware.h> 30#include <mach/hardware.h>
32 31
33static DEFINE_SPINLOCK(nano_lock); 32static void __iomem *nanoengine_pci_map_bus(struct pci_bus *bus,
34 33 unsigned int devfn, int where)
35static int nanoengine_get_pci_address(struct pci_bus *bus,
36 unsigned int devfn, int where, void __iomem **address)
37{ 34{
38 int ret = PCIBIOS_DEVICE_NOT_FOUND; 35 if (bus->number != 0 || (devfn >> 3) != 0)
39 unsigned int busnr = bus->number; 36 return NULL;
40 37
41 *address = (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT + 38 return (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT +
42 ((bus->number << 16) | (devfn << 8) | (where & ~3)); 39 ((bus->number << 16) | (devfn << 8) | (where & ~3));
43
44 ret = (busnr > 255 || devfn > 255 || where > 255) ?
45 PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
46
47 return ret;
48}
49
50static int nanoengine_read_config(struct pci_bus *bus, unsigned int devfn, int where,
51 int size, u32 *val)
52{
53 int ret;
54 void __iomem *address;
55 unsigned long flags;
56 u32 v;
57
58 /* nanoEngine PCI bridge does not return -1 for a non-existing
59 * device. We must fake the answer. We know that the only valid
60 * device is device zero at bus 0, which is the network chip. */
61 if (bus->number != 0 || (devfn >> 3) != 0) {
62 v = -1;
63 nanoengine_get_pci_address(bus, devfn, where, &address);
64 goto exit_function;
65 }
66
67 spin_lock_irqsave(&nano_lock, flags);
68
69 ret = nanoengine_get_pci_address(bus, devfn, where, &address);
70 if (ret != PCIBIOS_SUCCESSFUL)
71 return ret;
72 v = __raw_readl(address);
73
74 spin_unlock_irqrestore(&nano_lock, flags);
75
76 v >>= ((where & 3) * 8);
77 v &= (unsigned long)(-1) >> ((4 - size) * 8);
78
79exit_function:
80 *val = v;
81 return PCIBIOS_SUCCESSFUL;
82}
83
84static int nanoengine_write_config(struct pci_bus *bus, unsigned int devfn, int where,
85 int size, u32 val)
86{
87 int ret;
88 void __iomem *address;
89 unsigned long flags;
90 unsigned shift;
91 u32 v;
92
93 shift = (where & 3) * 8;
94
95 spin_lock_irqsave(&nano_lock, flags);
96
97 ret = nanoengine_get_pci_address(bus, devfn, where, &address);
98 if (ret != PCIBIOS_SUCCESSFUL)
99 return ret;
100 v = __raw_readl(address);
101 switch (size) {
102 case 1:
103 v &= ~(0xFF << shift);
104 v |= val << shift;
105 break;
106 case 2:
107 v &= ~(0xFFFF << shift);
108 v |= val << shift;
109 break;
110 case 4:
111 v = val;
112 break;
113 }
114 __raw_writel(v, address);
115
116 spin_unlock_irqrestore(&nano_lock, flags);
117
118 return PCIBIOS_SUCCESSFUL;
119} 40}
120 41
121static struct pci_ops pci_nano_ops = { 42static struct pci_ops pci_nano_ops = {
122 .read = nanoengine_read_config, 43 .map_bus = nanoengine_pci_map_bus,
123 .write = nanoengine_write_config, 44 .read = pci_generic_config_read32,
45 .write = pci_generic_config_write32,
124}; 46};
125 47
126static int __init pci_nanoengine_map_irq(const struct pci_dev *dev, u8 slot, 48static int __init pci_nanoengine_map_irq(const struct pci_dev *dev, u8 slot,
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index c7ca936ebd99..263a2044c65b 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -29,10 +29,10 @@
29 29
30struct start_info _xen_start_info; 30struct start_info _xen_start_info;
31struct start_info *xen_start_info = &_xen_start_info; 31struct start_info *xen_start_info = &_xen_start_info;
32EXPORT_SYMBOL_GPL(xen_start_info); 32EXPORT_SYMBOL(xen_start_info);
33 33
34enum xen_domain_type xen_domain_type = XEN_NATIVE; 34enum xen_domain_type xen_domain_type = XEN_NATIVE;
35EXPORT_SYMBOL_GPL(xen_domain_type); 35EXPORT_SYMBOL(xen_domain_type);
36 36
37struct shared_info xen_dummy_shared_info; 37struct shared_info xen_dummy_shared_info;
38struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; 38struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 351b24a979d4..793551d15f1d 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -149,7 +149,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
149EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); 149EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
150 150
151struct dma_map_ops *xen_dma_ops; 151struct dma_map_ops *xen_dma_ops;
152EXPORT_SYMBOL_GPL(xen_dma_ops); 152EXPORT_SYMBOL(xen_dma_ops);
153 153
154static struct dma_map_ops xen_swiotlb_dma_ops = { 154static struct dma_map_ops xen_swiotlb_dma_ops = {
155 .mapping_error = xen_swiotlb_dma_mapping_error, 155 .mapping_error = xen_swiotlb_dma_mapping_error,
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 054857776254..cb7a14c5cd69 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -102,7 +102,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
102EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); 102EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
103 103
104int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 104int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
105 struct gnttab_map_grant_ref *kmap_ops, 105 struct gnttab_unmap_grant_ref *kunmap_ops,
106 struct page **pages, unsigned int count) 106 struct page **pages, unsigned int count)
107{ 107{
108 int i; 108 int i;
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index d27dd982ff26..f5374065ad53 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -13,13 +13,13 @@
13#include <asm/efi.h> 13#include <asm/efi.h>
14#include <asm/sections.h> 14#include <asm/sections.h>
15 15
16efi_status_t handle_kernel_image(efi_system_table_t *sys_table, 16efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
17 unsigned long *image_addr, 17 unsigned long *image_addr,
18 unsigned long *image_size, 18 unsigned long *image_size,
19 unsigned long *reserve_addr, 19 unsigned long *reserve_addr,
20 unsigned long *reserve_size, 20 unsigned long *reserve_size,
21 unsigned long dram_base, 21 unsigned long dram_base,
22 efi_loaded_image_t *image) 22 efi_loaded_image_t *image)
23{ 23{
24 efi_status_t status; 24 efi_status_t status;
25 unsigned long kernel_size, kernel_memsize = 0; 25 unsigned long kernel_size, kernel_memsize = 0;
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index ce5836c14ec1..6f93c24ca801 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -46,25 +46,3 @@ int pcibios_add_device(struct pci_dev *dev)
46 46
47 return 0; 47 return 0;
48} 48}
49
50
51#ifdef CONFIG_PCI_DOMAINS_GENERIC
52static bool dt_domain_found = false;
53
54void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
55{
56 int domain = of_get_pci_domain_nr(parent->of_node);
57
58 if (domain >= 0) {
59 dt_domain_found = true;
60 } else if (dt_domain_found == true) {
61 dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
62 parent->of_node->full_name);
63 return;
64 } else {
65 domain = pci_get_new_domain_nr();
66 }
67
68 bus->domain_nr = domain;
69}
70#endif
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 8ba85e9ea388..b334084d3675 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
26 select KVM_ARM_HOST 26 select KVM_ARM_HOST
27 select KVM_ARM_VGIC 27 select KVM_ARM_VGIC
28 select KVM_ARM_TIMER 28 select KVM_ARM_TIMER
29 select SRCU
29 ---help--- 30 ---help---
30 Support hosting virtualized guest machines. 31 Support hosting virtualized guest machines.
31 32
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index efa5d65b0007..b073f4d771a5 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -168,8 +168,8 @@ static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int whe
168} 168}
169 169
170static struct pci_ops pci_direct_frv = { 170static struct pci_ops pci_direct_frv = {
171 pci_frv_read_config, 171 .read = pci_frv_read_config,
172 pci_frv_write_config, 172 .write = pci_frv_write_config,
173}; 173};
174 174
175/* 175/*
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 8b9318d311a0..bd09bf74f187 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -69,10 +69,10 @@ static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
69 status = acpi_resource_to_address64(resource, &addr); 69 status = acpi_resource_to_address64(resource, &addr);
70 if (ACPI_SUCCESS(status) && 70 if (ACPI_SUCCESS(status) &&
71 addr.resource_type == ACPI_MEMORY_RANGE && 71 addr.resource_type == ACPI_MEMORY_RANGE &&
72 addr.address_length && 72 addr.address.address_length &&
73 addr.producer_consumer == ACPI_CONSUMER) { 73 addr.producer_consumer == ACPI_CONSUMER) {
74 space->base = addr.minimum; 74 space->base = addr.address.minimum;
75 space->length = addr.address_length; 75 space->length = addr.address.address_length;
76 return AE_CTRL_TERMINATE; 76 return AE_CTRL_TERMINATE;
77 } 77 }
78 return AE_OK; /* keep looking */ 78 return AE_OK; /* keep looking */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index e795cb848154..2c4498919d3c 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -380,9 +380,6 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
380 380
381static int __init acpi_parse_madt(struct acpi_table_header *table) 381static int __init acpi_parse_madt(struct acpi_table_header *table)
382{ 382{
383 if (!table)
384 return -EINVAL;
385
386 acpi_madt = (struct acpi_table_madt *)table; 383 acpi_madt = (struct acpi_table_madt *)table;
387 384
388 acpi_madt_rev = acpi_madt->header.revision; 385 acpi_madt_rev = acpi_madt->header.revision;
@@ -645,9 +642,6 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
645 struct acpi_table_header *fadt_header; 642 struct acpi_table_header *fadt_header;
646 struct acpi_table_fadt *fadt; 643 struct acpi_table_fadt *fadt;
647 644
648 if (!table)
649 return -EINVAL;
650
651 fadt_header = (struct acpi_table_header *)table; 645 fadt_header = (struct acpi_table_header *)table;
652 if (fadt_header->revision != 3) 646 if (fadt_header->revision != 3)
653 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 647 return -ENODEV; /* Only deal with ACPI 2.0 FADT */
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 900cc93e5409..48cc65705db4 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -188,12 +188,12 @@ static u64 add_io_space(struct pci_root_info *info,
188 188
189 name = (char *)(iospace + 1); 189 name = (char *)(iospace + 1);
190 190
191 min = addr->minimum; 191 min = addr->address.minimum;
192 max = min + addr->address_length - 1; 192 max = min + addr->address.address_length - 1;
193 if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION) 193 if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
194 sparse = 1; 194 sparse = 1;
195 195
196 space_nr = new_space(addr->translation_offset, sparse); 196 space_nr = new_space(addr->address.translation_offset, sparse);
197 if (space_nr == ~0) 197 if (space_nr == ~0)
198 goto free_resource; 198 goto free_resource;
199 199
@@ -247,7 +247,7 @@ static acpi_status resource_to_window(struct acpi_resource *resource,
247 if (ACPI_SUCCESS(status) && 247 if (ACPI_SUCCESS(status) &&
248 (addr->resource_type == ACPI_MEMORY_RANGE || 248 (addr->resource_type == ACPI_MEMORY_RANGE ||
249 addr->resource_type == ACPI_IO_RANGE) && 249 addr->resource_type == ACPI_IO_RANGE) &&
250 addr->address_length && 250 addr->address.address_length &&
251 addr->producer_consumer == ACPI_PRODUCER) 251 addr->producer_consumer == ACPI_PRODUCER)
252 return AE_OK; 252 return AE_OK;
253 253
@@ -284,7 +284,7 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
284 if (addr.resource_type == ACPI_MEMORY_RANGE) { 284 if (addr.resource_type == ACPI_MEMORY_RANGE) {
285 flags = IORESOURCE_MEM; 285 flags = IORESOURCE_MEM;
286 root = &iomem_resource; 286 root = &iomem_resource;
287 offset = addr.translation_offset; 287 offset = addr.address.translation_offset;
288 } else if (addr.resource_type == ACPI_IO_RANGE) { 288 } else if (addr.resource_type == ACPI_IO_RANGE) {
289 flags = IORESOURCE_IO; 289 flags = IORESOURCE_IO;
290 root = &ioport_resource; 290 root = &ioport_resource;
@@ -297,8 +297,8 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
297 resource = &info->res[info->res_num]; 297 resource = &info->res[info->res_num];
298 resource->name = info->name; 298 resource->name = info->name;
299 resource->flags = flags; 299 resource->flags = flags;
300 resource->start = addr.minimum + offset; 300 resource->start = addr.address.minimum + offset;
301 resource->end = resource->start + addr.address_length - 1; 301 resource->end = resource->start + addr.address.address_length - 1;
302 info->res_offset[info->res_num] = offset; 302 info->res_offset[info->res_num] = offset;
303 303
304 if (insert_resource(root, resource)) { 304 if (insert_resource(root, resource)) {
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index 95022b04b62d..264db1126803 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -170,7 +170,6 @@ repeat:
170 if (acia_stat & ACIA_RDRF) { 170 if (acia_stat & ACIA_RDRF) {
171 /* received a character */ 171 /* received a character */
172 scancode = acia.key_data; /* get it or reset the ACIA, I'll get it! */ 172 scancode = acia.key_data; /* get it or reset the ACIA, I'll get it! */
173 tasklet_schedule(&keyboard_tasklet);
174 interpret_scancode: 173 interpret_scancode:
175 switch (kb_state.state) { 174 switch (kb_state.state) {
176 case KEYBOARD: 175 case KEYBOARD:
@@ -430,14 +429,6 @@ void ikbd_mouse_y0_top(void)
430} 429}
431EXPORT_SYMBOL(ikbd_mouse_y0_top); 430EXPORT_SYMBOL(ikbd_mouse_y0_top);
432 431
433/* Resume */
434void ikbd_resume(void)
435{
436 static const char cmd[1] = { 0x11 };
437
438 ikbd_write(cmd, 1);
439}
440
441/* Disable mouse */ 432/* Disable mouse */
442void ikbd_mouse_disable(void) 433void ikbd_mouse_disable(void)
443{ 434{
@@ -447,14 +438,6 @@ void ikbd_mouse_disable(void)
447} 438}
448EXPORT_SYMBOL(ikbd_mouse_disable); 439EXPORT_SYMBOL(ikbd_mouse_disable);
449 440
450/* Pause output */
451void ikbd_pause(void)
452{
453 static const char cmd[1] = { 0x13 };
454
455 ikbd_write(cmd, 1);
456}
457
458/* Set joystick event reporting */ 441/* Set joystick event reporting */
459void ikbd_joystick_event_on(void) 442void ikbd_joystick_event_on(void)
460{ 443{
@@ -502,56 +485,6 @@ void ikbd_joystick_disable(void)
502 ikbd_write(cmd, 1); 485 ikbd_write(cmd, 1);
503} 486}
504 487
505/* Time-of-day clock set */
506void ikbd_clock_set(int year, int month, int day, int hour, int minute, int second)
507{
508 char cmd[7] = { 0x1B, year, month, day, hour, minute, second };
509
510 ikbd_write(cmd, 7);
511}
512
513/* Interrogate time-of-day clock */
514void ikbd_clock_get(int *year, int *month, int *day, int *hour, int *minute, int second)
515{
516 static const char cmd[1] = { 0x1C };
517
518 ikbd_write(cmd, 1);
519}
520
521/* Memory load */
522void ikbd_mem_write(int address, int size, char *data)
523{
524 panic("Attempt to write data into keyboard memory");
525}
526
527/* Memory read */
528void ikbd_mem_read(int address, char data[6])
529{
530 char cmd[3] = { 0x21, address>>8, address&0xFF };
531
532 ikbd_write(cmd, 3);
533
534 /* receive data and put it in data */
535}
536
537/* Controller execute */
538void ikbd_exec(int address)
539{
540 char cmd[3] = { 0x22, address>>8, address&0xFF };
541
542 ikbd_write(cmd, 3);
543}
544
545/* Status inquiries (0x87-0x9A) not yet implemented */
546
547/* Set the state of the caps lock led. */
548void atari_kbd_leds(unsigned int leds)
549{
550 char cmd[6] = {32, 0, 4, 1, 254 + ((leds & 4) != 0), 0};
551
552 ikbd_write(cmd, 6);
553}
554
555/* 488/*
556 * The original code sometimes left the interrupt line of 489 * The original code sometimes left the interrupt line of
557 * the ACIAs low forever. I hope, it is fixed now. 490 * the ACIAs low forever. I hope, it is fixed now.
@@ -571,9 +504,8 @@ int atari_keyb_init(void)
571 kb_state.state = KEYBOARD; 504 kb_state.state = KEYBOARD;
572 kb_state.len = 0; 505 kb_state.len = 0;
573 506
574 error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, 507 error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, 0,
575 IRQ_TYPE_SLOW, "keyboard,mouse,MIDI", 508 "keyboard,mouse,MIDI", atari_keyboard_interrupt);
576 atari_keyboard_interrupt);
577 if (error) 509 if (error)
578 return error; 510 return error;
579 511
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
index e5a66596b116..ba65f942d0c7 100644
--- a/arch/m68k/atari/stdma.c
+++ b/arch/m68k/atari/stdma.c
@@ -198,7 +198,7 @@ EXPORT_SYMBOL(stdma_islocked);
198void __init stdma_init(void) 198void __init stdma_init(void)
199{ 199{
200 stdma_isr = NULL; 200 stdma_isr = NULL;
201 if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, 201 if (request_irq(IRQ_MFP_FDC, stdma_int, IRQF_SHARED,
202 "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int)) 202 "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int))
203 pr_err("Couldn't register ST-DMA interrupt\n"); 203 pr_err("Couldn't register ST-DMA interrupt\n");
204} 204}
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index da8f981c36d6..c549b48174ec 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -32,8 +32,7 @@ atari_sched_init(irq_handler_t timer_routine)
32 /* start timer C, div = 1:100 */ 32 /* start timer C, div = 1:100 */
33 st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60; 33 st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
34 /* install interrupt service routine for MFP Timer C */ 34 /* install interrupt service routine for MFP Timer C */
35 if (request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW, 35 if (request_irq(IRQ_MFP_TIMC, timer_routine, 0, "timer", timer_routine))
36 "timer", timer_routine))
37 pr_err("Couldn't register timer interrupt\n"); 36 pr_err("Couldn't register timer interrupt\n");
38} 37}
39 38
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 399df883c8bb..1a10a08ebec7 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -36,6 +36,7 @@ CONFIG_AMIGA_PCMCIA=y
36CONFIG_ZORRO_NAMES=y 36CONFIG_ZORRO_NAMES=y
37# CONFIG_COMPACTION is not set 37# CONFIG_COMPACTION is not set
38CONFIG_CLEANCACHE=y 38CONFIG_CLEANCACHE=y
39CONFIG_ZPOOL=m
39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 40# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
40CONFIG_BINFMT_AOUT=m 41CONFIG_BINFMT_AOUT=m
41CONFIG_BINFMT_MISC=m 42CONFIG_BINFMT_MISC=m
@@ -55,6 +56,8 @@ CONFIG_NET_IPIP=m
55CONFIG_NET_IPGRE_DEMUX=m 56CONFIG_NET_IPGRE_DEMUX=m
56CONFIG_NET_IPGRE=m 57CONFIG_NET_IPGRE=m
57CONFIG_NET_IPVTI=m 58CONFIG_NET_IPVTI=m
59CONFIG_NET_FOU_IP_TUNNELS=y
60CONFIG_GENEVE=m
58CONFIG_INET_AH=m 61CONFIG_INET_AH=m
59CONFIG_INET_ESP=m 62CONFIG_INET_ESP=m
60CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
@@ -96,6 +99,8 @@ CONFIG_NFT_HASH=m
96CONFIG_NFT_COUNTER=m 99CONFIG_NFT_COUNTER=m
97CONFIG_NFT_LOG=m 100CONFIG_NFT_LOG=m
98CONFIG_NFT_LIMIT=m 101CONFIG_NFT_LIMIT=m
102CONFIG_NFT_MASQ=m
103CONFIG_NFT_REDIR=m
99CONFIG_NFT_NAT=m 104CONFIG_NFT_NAT=m
100CONFIG_NFT_QUEUE=m 105CONFIG_NFT_QUEUE=m
101CONFIG_NFT_REJECT=m 106CONFIG_NFT_REJECT=m
@@ -142,6 +147,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
142CONFIG_NETFILTER_XT_MATCH_OSF=m 147CONFIG_NETFILTER_XT_MATCH_OSF=m
143CONFIG_NETFILTER_XT_MATCH_OWNER=m 148CONFIG_NETFILTER_XT_MATCH_OWNER=m
144CONFIG_NETFILTER_XT_MATCH_POLICY=m 149CONFIG_NETFILTER_XT_MATCH_POLICY=m
150CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
145CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 151CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
146CONFIG_NETFILTER_XT_MATCH_QUOTA=m 152CONFIG_NETFILTER_XT_MATCH_QUOTA=m
147CONFIG_NETFILTER_XT_MATCH_RATEEST=m 153CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -163,6 +169,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
163CONFIG_IP_SET_HASH_IPPORT=m 169CONFIG_IP_SET_HASH_IPPORT=m
164CONFIG_IP_SET_HASH_IPPORTIP=m 170CONFIG_IP_SET_HASH_IPPORTIP=m
165CONFIG_IP_SET_HASH_IPPORTNET=m 171CONFIG_IP_SET_HASH_IPPORTNET=m
172CONFIG_IP_SET_HASH_MAC=m
166CONFIG_IP_SET_HASH_NETPORTNET=m 173CONFIG_IP_SET_HASH_NETPORTNET=m
167CONFIG_IP_SET_HASH_NET=m 174CONFIG_IP_SET_HASH_NET=m
168CONFIG_IP_SET_HASH_NETNET=m 175CONFIG_IP_SET_HASH_NETNET=m
@@ -170,9 +177,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
170CONFIG_IP_SET_HASH_NETIFACE=m 177CONFIG_IP_SET_HASH_NETIFACE=m
171CONFIG_IP_SET_LIST_SET=m 178CONFIG_IP_SET_LIST_SET=m
172CONFIG_NF_CONNTRACK_IPV4=m 179CONFIG_NF_CONNTRACK_IPV4=m
180CONFIG_NF_LOG_ARP=m
173CONFIG_NFT_CHAIN_ROUTE_IPV4=m 181CONFIG_NFT_CHAIN_ROUTE_IPV4=m
174CONFIG_NFT_CHAIN_NAT_IPV4=m
175CONFIG_NF_TABLES_ARP=m 182CONFIG_NF_TABLES_ARP=m
183CONFIG_NFT_CHAIN_NAT_IPV4=m
184CONFIG_NFT_MASQ_IPV4=m
185CONFIG_NFT_REDIR_IPV4=m
176CONFIG_IP_NF_IPTABLES=m 186CONFIG_IP_NF_IPTABLES=m
177CONFIG_IP_NF_MATCH_AH=m 187CONFIG_IP_NF_MATCH_AH=m
178CONFIG_IP_NF_MATCH_ECN=m 188CONFIG_IP_NF_MATCH_ECN=m
@@ -181,8 +191,7 @@ CONFIG_IP_NF_MATCH_TTL=m
181CONFIG_IP_NF_FILTER=m 191CONFIG_IP_NF_FILTER=m
182CONFIG_IP_NF_TARGET_REJECT=m 192CONFIG_IP_NF_TARGET_REJECT=m
183CONFIG_IP_NF_TARGET_SYNPROXY=m 193CONFIG_IP_NF_TARGET_SYNPROXY=m
184CONFIG_IP_NF_TARGET_ULOG=m 194CONFIG_IP_NF_NAT=m
185CONFIG_NF_NAT_IPV4=m
186CONFIG_IP_NF_TARGET_MASQUERADE=m 195CONFIG_IP_NF_TARGET_MASQUERADE=m
187CONFIG_IP_NF_TARGET_NETMAP=m 196CONFIG_IP_NF_TARGET_NETMAP=m
188CONFIG_IP_NF_TARGET_REDIRECT=m 197CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -197,6 +206,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
197CONFIG_NF_CONNTRACK_IPV6=m 206CONFIG_NF_CONNTRACK_IPV6=m
198CONFIG_NFT_CHAIN_ROUTE_IPV6=m 207CONFIG_NFT_CHAIN_ROUTE_IPV6=m
199CONFIG_NFT_CHAIN_NAT_IPV6=m 208CONFIG_NFT_CHAIN_NAT_IPV6=m
209CONFIG_NFT_MASQ_IPV6=m
210CONFIG_NFT_REDIR_IPV6=m
200CONFIG_IP6_NF_IPTABLES=m 211CONFIG_IP6_NF_IPTABLES=m
201CONFIG_IP6_NF_MATCH_AH=m 212CONFIG_IP6_NF_MATCH_AH=m
202CONFIG_IP6_NF_MATCH_EUI64=m 213CONFIG_IP6_NF_MATCH_EUI64=m
@@ -213,17 +224,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
213CONFIG_IP6_NF_TARGET_SYNPROXY=m 224CONFIG_IP6_NF_TARGET_SYNPROXY=m
214CONFIG_IP6_NF_MANGLE=m 225CONFIG_IP6_NF_MANGLE=m
215CONFIG_IP6_NF_RAW=m 226CONFIG_IP6_NF_RAW=m
216CONFIG_NF_NAT_IPV6=m 227CONFIG_IP6_NF_NAT=m
217CONFIG_IP6_NF_TARGET_MASQUERADE=m 228CONFIG_IP6_NF_TARGET_MASQUERADE=m
218CONFIG_IP6_NF_TARGET_NPT=m 229CONFIG_IP6_NF_TARGET_NPT=m
219CONFIG_NF_TABLES_BRIDGE=m 230CONFIG_NF_TABLES_BRIDGE=m
231CONFIG_NFT_BRIDGE_META=m
232CONFIG_NFT_BRIDGE_REJECT=m
233CONFIG_NF_LOG_BRIDGE=m
234CONFIG_BRIDGE_NF_EBTABLES=m
235CONFIG_BRIDGE_EBT_BROUTE=m
236CONFIG_BRIDGE_EBT_T_FILTER=m
237CONFIG_BRIDGE_EBT_T_NAT=m
238CONFIG_BRIDGE_EBT_802_3=m
239CONFIG_BRIDGE_EBT_AMONG=m
240CONFIG_BRIDGE_EBT_ARP=m
241CONFIG_BRIDGE_EBT_IP=m
242CONFIG_BRIDGE_EBT_IP6=m
243CONFIG_BRIDGE_EBT_LIMIT=m
244CONFIG_BRIDGE_EBT_MARK=m
245CONFIG_BRIDGE_EBT_PKTTYPE=m
246CONFIG_BRIDGE_EBT_STP=m
247CONFIG_BRIDGE_EBT_VLAN=m
248CONFIG_BRIDGE_EBT_ARPREPLY=m
249CONFIG_BRIDGE_EBT_DNAT=m
250CONFIG_BRIDGE_EBT_MARK_T=m
251CONFIG_BRIDGE_EBT_REDIRECT=m
252CONFIG_BRIDGE_EBT_SNAT=m
253CONFIG_BRIDGE_EBT_LOG=m
254CONFIG_BRIDGE_EBT_NFLOG=m
220CONFIG_IP_DCCP=m 255CONFIG_IP_DCCP=m
221# CONFIG_IP_DCCP_CCID3 is not set 256# CONFIG_IP_DCCP_CCID3 is not set
222CONFIG_SCTP_COOKIE_HMAC_SHA1=y 257CONFIG_SCTP_COOKIE_HMAC_SHA1=y
223CONFIG_RDS=m 258CONFIG_RDS=m
224CONFIG_RDS_TCP=m 259CONFIG_RDS_TCP=m
225CONFIG_L2TP=m 260CONFIG_L2TP=m
261CONFIG_BRIDGE=m
226CONFIG_ATALK=m 262CONFIG_ATALK=m
263CONFIG_6LOWPAN=m
227CONFIG_DNS_RESOLVER=y 264CONFIG_DNS_RESOLVER=y
228CONFIG_BATMAN_ADV=m 265CONFIG_BATMAN_ADV=m
229CONFIG_BATMAN_ADV_DAT=y 266CONFIG_BATMAN_ADV_DAT=y
@@ -232,9 +269,10 @@ CONFIG_BATMAN_ADV_MCAST=y
232CONFIG_NETLINK_DIAG=m 269CONFIG_NETLINK_DIAG=m
233CONFIG_NET_MPLS_GSO=m 270CONFIG_NET_MPLS_GSO=m
234# CONFIG_WIRELESS is not set 271# CONFIG_WIRELESS is not set
272# CONFIG_UEVENT_HELPER is not set
235CONFIG_DEVTMPFS=y 273CONFIG_DEVTMPFS=y
274CONFIG_DEVTMPFS_MOUNT=y
236# CONFIG_FIRMWARE_IN_KERNEL is not set 275# CONFIG_FIRMWARE_IN_KERNEL is not set
237# CONFIG_FW_LOADER_USER_HELPER is not set
238CONFIG_CONNECTOR=m 276CONFIG_CONNECTOR=m
239CONFIG_PARPORT=m 277CONFIG_PARPORT=m
240CONFIG_PARPORT_AMIGA=m 278CONFIG_PARPORT_AMIGA=m
@@ -299,6 +337,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
299CONFIG_NET_TEAM_MODE_RANDOM=m 337CONFIG_NET_TEAM_MODE_RANDOM=m
300CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 338CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
301CONFIG_NET_TEAM_MODE_LOADBALANCE=m 339CONFIG_NET_TEAM_MODE_LOADBALANCE=m
340CONFIG_MACVLAN=m
341CONFIG_MACVTAP=m
342CONFIG_IPVLAN=m
302CONFIG_VXLAN=m 343CONFIG_VXLAN=m
303CONFIG_NETCONSOLE=m 344CONFIG_NETCONSOLE=m
304CONFIG_NETCONSOLE_DYNAMIC=y 345CONFIG_NETCONSOLE_DYNAMIC=y
@@ -316,6 +357,8 @@ CONFIG_ARIADNE=y
316CONFIG_HYDRA=y 357CONFIG_HYDRA=y
317CONFIG_APNE=y 358CONFIG_APNE=y
318CONFIG_ZORRO8390=y 359CONFIG_ZORRO8390=y
360# CONFIG_NET_VENDOR_QUALCOMM is not set
361# CONFIG_NET_VENDOR_ROCKER is not set
319# CONFIG_NET_VENDOR_SAMSUNG is not set 362# CONFIG_NET_VENDOR_SAMSUNG is not set
320# CONFIG_NET_VENDOR_SEEQ is not set 363# CONFIG_NET_VENDOR_SEEQ is not set
321# CONFIG_NET_VENDOR_SMSC is not set 364# CONFIG_NET_VENDOR_SMSC is not set
@@ -371,6 +414,7 @@ CONFIG_HID=m
371CONFIG_HIDRAW=y 414CONFIG_HIDRAW=y
372CONFIG_UHID=m 415CONFIG_UHID=m
373# CONFIG_HID_GENERIC is not set 416# CONFIG_HID_GENERIC is not set
417# CONFIG_HID_PLANTRONICS is not set
374# CONFIG_USB_SUPPORT is not set 418# CONFIG_USB_SUPPORT is not set
375CONFIG_RTC_CLASS=y 419CONFIG_RTC_CLASS=y
376CONFIG_RTC_DRV_MSM6242=m 420CONFIG_RTC_DRV_MSM6242=m
@@ -392,6 +436,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
392CONFIG_AUTOFS4_FS=m 436CONFIG_AUTOFS4_FS=m
393CONFIG_FUSE_FS=m 437CONFIG_FUSE_FS=m
394CONFIG_CUSE=m 438CONFIG_CUSE=m
439CONFIG_OVERLAY_FS=m
395CONFIG_ISO9660_FS=y 440CONFIG_ISO9660_FS=y
396CONFIG_JOLIET=y 441CONFIG_JOLIET=y
397CONFIG_ZISOFS=y 442CONFIG_ZISOFS=y
@@ -407,6 +452,7 @@ CONFIG_HFS_FS=m
407CONFIG_HFSPLUS_FS=m 452CONFIG_HFSPLUS_FS=m
408CONFIG_CRAMFS=m 453CONFIG_CRAMFS=m
409CONFIG_SQUASHFS=m 454CONFIG_SQUASHFS=m
455CONFIG_SQUASHFS_LZ4=y
410CONFIG_SQUASHFS_LZO=y 456CONFIG_SQUASHFS_LZO=y
411CONFIG_MINIX_FS=m 457CONFIG_MINIX_FS=m
412CONFIG_OMFS_FS=m 458CONFIG_OMFS_FS=m
@@ -476,10 +522,18 @@ CONFIG_DLM=m
476CONFIG_MAGIC_SYSRQ=y 522CONFIG_MAGIC_SYSRQ=y
477CONFIG_ASYNC_RAID6_TEST=m 523CONFIG_ASYNC_RAID6_TEST=m
478CONFIG_TEST_STRING_HELPERS=m 524CONFIG_TEST_STRING_HELPERS=m
525CONFIG_TEST_KSTRTOX=m
526CONFIG_TEST_LKM=m
527CONFIG_TEST_USER_COPY=m
528CONFIG_TEST_BPF=m
529CONFIG_TEST_FIRMWARE=m
530CONFIG_TEST_UDELAY=m
531CONFIG_EARLY_PRINTK=y
479CONFIG_ENCRYPTED_KEYS=m 532CONFIG_ENCRYPTED_KEYS=m
480CONFIG_CRYPTO_MANAGER=y 533CONFIG_CRYPTO_MANAGER=y
481CONFIG_CRYPTO_USER=m 534CONFIG_CRYPTO_USER=m
482CONFIG_CRYPTO_CRYPTD=m 535CONFIG_CRYPTO_CRYPTD=m
536CONFIG_CRYPTO_MCRYPTD=m
483CONFIG_CRYPTO_TEST=m 537CONFIG_CRYPTO_TEST=m
484CONFIG_CRYPTO_CCM=m 538CONFIG_CRYPTO_CCM=m
485CONFIG_CRYPTO_GCM=m 539CONFIG_CRYPTO_GCM=m
@@ -514,13 +568,10 @@ CONFIG_CRYPTO_LZO=m
514CONFIG_CRYPTO_LZ4=m 568CONFIG_CRYPTO_LZ4=m
515CONFIG_CRYPTO_LZ4HC=m 569CONFIG_CRYPTO_LZ4HC=m
516# CONFIG_CRYPTO_ANSI_CPRNG is not set 570# CONFIG_CRYPTO_ANSI_CPRNG is not set
571CONFIG_CRYPTO_DRBG_MENU=m
572CONFIG_CRYPTO_DRBG_HASH=y
573CONFIG_CRYPTO_DRBG_CTR=y
517CONFIG_CRYPTO_USER_API_HASH=m 574CONFIG_CRYPTO_USER_API_HASH=m
518CONFIG_CRYPTO_USER_API_SKCIPHER=m 575CONFIG_CRYPTO_USER_API_SKCIPHER=m
519# CONFIG_CRYPTO_HW is not set 576# CONFIG_CRYPTO_HW is not set
520CONFIG_XZ_DEC_X86=y
521CONFIG_XZ_DEC_POWERPC=y
522CONFIG_XZ_DEC_IA64=y
523CONFIG_XZ_DEC_ARM=y
524CONFIG_XZ_DEC_ARMTHUMB=y
525CONFIG_XZ_DEC_SPARC=y
526CONFIG_XZ_DEC_TEST=m 577CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index be16740c0749..7859a738c81e 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -34,6 +34,7 @@ CONFIG_M68060=y
34CONFIG_APOLLO=y 34CONFIG_APOLLO=y
35# CONFIG_COMPACTION is not set 35# CONFIG_COMPACTION is not set
36CONFIG_CLEANCACHE=y 36CONFIG_CLEANCACHE=y
37CONFIG_ZPOOL=m
37# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
38CONFIG_BINFMT_AOUT=m 39CONFIG_BINFMT_AOUT=m
39CONFIG_BINFMT_MISC=m 40CONFIG_BINFMT_MISC=m
@@ -53,6 +54,8 @@ CONFIG_NET_IPIP=m
53CONFIG_NET_IPGRE_DEMUX=m 54CONFIG_NET_IPGRE_DEMUX=m
54CONFIG_NET_IPGRE=m 55CONFIG_NET_IPGRE=m
55CONFIG_NET_IPVTI=m 56CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_GENEVE=m
56CONFIG_INET_AH=m 59CONFIG_INET_AH=m
57CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
58CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
@@ -94,6 +97,8 @@ CONFIG_NFT_HASH=m
94CONFIG_NFT_COUNTER=m 97CONFIG_NFT_COUNTER=m
95CONFIG_NFT_LOG=m 98CONFIG_NFT_LOG=m
96CONFIG_NFT_LIMIT=m 99CONFIG_NFT_LIMIT=m
100CONFIG_NFT_MASQ=m
101CONFIG_NFT_REDIR=m
97CONFIG_NFT_NAT=m 102CONFIG_NFT_NAT=m
98CONFIG_NFT_QUEUE=m 103CONFIG_NFT_QUEUE=m
99CONFIG_NFT_REJECT=m 104CONFIG_NFT_REJECT=m
@@ -140,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
140CONFIG_NETFILTER_XT_MATCH_OSF=m 145CONFIG_NETFILTER_XT_MATCH_OSF=m
141CONFIG_NETFILTER_XT_MATCH_OWNER=m 146CONFIG_NETFILTER_XT_MATCH_OWNER=m
142CONFIG_NETFILTER_XT_MATCH_POLICY=m 147CONFIG_NETFILTER_XT_MATCH_POLICY=m
148CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
143CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 149CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
144CONFIG_NETFILTER_XT_MATCH_QUOTA=m 150CONFIG_NETFILTER_XT_MATCH_QUOTA=m
145CONFIG_NETFILTER_XT_MATCH_RATEEST=m 151CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -161,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
161CONFIG_IP_SET_HASH_IPPORT=m 167CONFIG_IP_SET_HASH_IPPORT=m
162CONFIG_IP_SET_HASH_IPPORTIP=m 168CONFIG_IP_SET_HASH_IPPORTIP=m
163CONFIG_IP_SET_HASH_IPPORTNET=m 169CONFIG_IP_SET_HASH_IPPORTNET=m
170CONFIG_IP_SET_HASH_MAC=m
164CONFIG_IP_SET_HASH_NETPORTNET=m 171CONFIG_IP_SET_HASH_NETPORTNET=m
165CONFIG_IP_SET_HASH_NET=m 172CONFIG_IP_SET_HASH_NET=m
166CONFIG_IP_SET_HASH_NETNET=m 173CONFIG_IP_SET_HASH_NETNET=m
@@ -168,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
168CONFIG_IP_SET_HASH_NETIFACE=m 175CONFIG_IP_SET_HASH_NETIFACE=m
169CONFIG_IP_SET_LIST_SET=m 176CONFIG_IP_SET_LIST_SET=m
170CONFIG_NF_CONNTRACK_IPV4=m 177CONFIG_NF_CONNTRACK_IPV4=m
178CONFIG_NF_LOG_ARP=m
171CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179CONFIG_NFT_CHAIN_ROUTE_IPV4=m
172CONFIG_NFT_CHAIN_NAT_IPV4=m
173CONFIG_NF_TABLES_ARP=m 180CONFIG_NF_TABLES_ARP=m
181CONFIG_NFT_CHAIN_NAT_IPV4=m
182CONFIG_NFT_MASQ_IPV4=m
183CONFIG_NFT_REDIR_IPV4=m
174CONFIG_IP_NF_IPTABLES=m 184CONFIG_IP_NF_IPTABLES=m
175CONFIG_IP_NF_MATCH_AH=m 185CONFIG_IP_NF_MATCH_AH=m
176CONFIG_IP_NF_MATCH_ECN=m 186CONFIG_IP_NF_MATCH_ECN=m
@@ -179,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
179CONFIG_IP_NF_FILTER=m 189CONFIG_IP_NF_FILTER=m
180CONFIG_IP_NF_TARGET_REJECT=m 190CONFIG_IP_NF_TARGET_REJECT=m
181CONFIG_IP_NF_TARGET_SYNPROXY=m 191CONFIG_IP_NF_TARGET_SYNPROXY=m
182CONFIG_IP_NF_TARGET_ULOG=m 192CONFIG_IP_NF_NAT=m
183CONFIG_NF_NAT_IPV4=m
184CONFIG_IP_NF_TARGET_MASQUERADE=m 193CONFIG_IP_NF_TARGET_MASQUERADE=m
185CONFIG_IP_NF_TARGET_NETMAP=m 194CONFIG_IP_NF_TARGET_NETMAP=m
186CONFIG_IP_NF_TARGET_REDIRECT=m 195CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -195,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
195CONFIG_NF_CONNTRACK_IPV6=m 204CONFIG_NF_CONNTRACK_IPV6=m
196CONFIG_NFT_CHAIN_ROUTE_IPV6=m 205CONFIG_NFT_CHAIN_ROUTE_IPV6=m
197CONFIG_NFT_CHAIN_NAT_IPV6=m 206CONFIG_NFT_CHAIN_NAT_IPV6=m
207CONFIG_NFT_MASQ_IPV6=m
208CONFIG_NFT_REDIR_IPV6=m
198CONFIG_IP6_NF_IPTABLES=m 209CONFIG_IP6_NF_IPTABLES=m
199CONFIG_IP6_NF_MATCH_AH=m 210CONFIG_IP6_NF_MATCH_AH=m
200CONFIG_IP6_NF_MATCH_EUI64=m 211CONFIG_IP6_NF_MATCH_EUI64=m
@@ -211,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
211CONFIG_IP6_NF_TARGET_SYNPROXY=m 222CONFIG_IP6_NF_TARGET_SYNPROXY=m
212CONFIG_IP6_NF_MANGLE=m 223CONFIG_IP6_NF_MANGLE=m
213CONFIG_IP6_NF_RAW=m 224CONFIG_IP6_NF_RAW=m
214CONFIG_NF_NAT_IPV6=m 225CONFIG_IP6_NF_NAT=m
215CONFIG_IP6_NF_TARGET_MASQUERADE=m 226CONFIG_IP6_NF_TARGET_MASQUERADE=m
216CONFIG_IP6_NF_TARGET_NPT=m 227CONFIG_IP6_NF_TARGET_NPT=m
217CONFIG_NF_TABLES_BRIDGE=m 228CONFIG_NF_TABLES_BRIDGE=m
229CONFIG_NFT_BRIDGE_META=m
230CONFIG_NFT_BRIDGE_REJECT=m
231CONFIG_NF_LOG_BRIDGE=m
232CONFIG_BRIDGE_NF_EBTABLES=m
233CONFIG_BRIDGE_EBT_BROUTE=m
234CONFIG_BRIDGE_EBT_T_FILTER=m
235CONFIG_BRIDGE_EBT_T_NAT=m
236CONFIG_BRIDGE_EBT_802_3=m
237CONFIG_BRIDGE_EBT_AMONG=m
238CONFIG_BRIDGE_EBT_ARP=m
239CONFIG_BRIDGE_EBT_IP=m
240CONFIG_BRIDGE_EBT_IP6=m
241CONFIG_BRIDGE_EBT_LIMIT=m
242CONFIG_BRIDGE_EBT_MARK=m
243CONFIG_BRIDGE_EBT_PKTTYPE=m
244CONFIG_BRIDGE_EBT_STP=m
245CONFIG_BRIDGE_EBT_VLAN=m
246CONFIG_BRIDGE_EBT_ARPREPLY=m
247CONFIG_BRIDGE_EBT_DNAT=m
248CONFIG_BRIDGE_EBT_MARK_T=m
249CONFIG_BRIDGE_EBT_REDIRECT=m
250CONFIG_BRIDGE_EBT_SNAT=m
251CONFIG_BRIDGE_EBT_LOG=m
252CONFIG_BRIDGE_EBT_NFLOG=m
218CONFIG_IP_DCCP=m 253CONFIG_IP_DCCP=m
219# CONFIG_IP_DCCP_CCID3 is not set 254# CONFIG_IP_DCCP_CCID3 is not set
220CONFIG_SCTP_COOKIE_HMAC_SHA1=y 255CONFIG_SCTP_COOKIE_HMAC_SHA1=y
221CONFIG_RDS=m 256CONFIG_RDS=m
222CONFIG_RDS_TCP=m 257CONFIG_RDS_TCP=m
223CONFIG_L2TP=m 258CONFIG_L2TP=m
259CONFIG_BRIDGE=m
224CONFIG_ATALK=m 260CONFIG_ATALK=m
261CONFIG_6LOWPAN=m
225CONFIG_DNS_RESOLVER=y 262CONFIG_DNS_RESOLVER=y
226CONFIG_BATMAN_ADV=m 263CONFIG_BATMAN_ADV=m
227CONFIG_BATMAN_ADV_DAT=y 264CONFIG_BATMAN_ADV_DAT=y
@@ -230,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
230CONFIG_NETLINK_DIAG=m 267CONFIG_NETLINK_DIAG=m
231CONFIG_NET_MPLS_GSO=m 268CONFIG_NET_MPLS_GSO=m
232# CONFIG_WIRELESS is not set 269# CONFIG_WIRELESS is not set
270# CONFIG_UEVENT_HELPER is not set
233CONFIG_DEVTMPFS=y 271CONFIG_DEVTMPFS=y
272CONFIG_DEVTMPFS_MOUNT=y
234# CONFIG_FIRMWARE_IN_KERNEL is not set 273# CONFIG_FIRMWARE_IN_KERNEL is not set
235# CONFIG_FW_LOADER_USER_HELPER is not set
236CONFIG_CONNECTOR=m 274CONFIG_CONNECTOR=m
237CONFIG_BLK_DEV_LOOP=y 275CONFIG_BLK_DEV_LOOP=y
238CONFIG_BLK_DEV_CRYPTOLOOP=m 276CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -281,6 +319,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
281CONFIG_NET_TEAM_MODE_RANDOM=m 319CONFIG_NET_TEAM_MODE_RANDOM=m
282CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 320CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
283CONFIG_NET_TEAM_MODE_LOADBALANCE=m 321CONFIG_NET_TEAM_MODE_LOADBALANCE=m
322CONFIG_MACVLAN=m
323CONFIG_MACVTAP=m
324CONFIG_IPVLAN=m
284CONFIG_VXLAN=m 325CONFIG_VXLAN=m
285CONFIG_NETCONSOLE=m 326CONFIG_NETCONSOLE=m
286CONFIG_NETCONSOLE_DYNAMIC=y 327CONFIG_NETCONSOLE_DYNAMIC=y
@@ -291,6 +332,8 @@ CONFIG_VETH=m
291# CONFIG_NET_VENDOR_MARVELL is not set 332# CONFIG_NET_VENDOR_MARVELL is not set
292# CONFIG_NET_VENDOR_MICREL is not set 333# CONFIG_NET_VENDOR_MICREL is not set
293# CONFIG_NET_VENDOR_NATSEMI is not set 334# CONFIG_NET_VENDOR_NATSEMI is not set
335# CONFIG_NET_VENDOR_QUALCOMM is not set
336# CONFIG_NET_VENDOR_ROCKER is not set
294# CONFIG_NET_VENDOR_SAMSUNG is not set 337# CONFIG_NET_VENDOR_SAMSUNG is not set
295# CONFIG_NET_VENDOR_SEEQ is not set 338# CONFIG_NET_VENDOR_SEEQ is not set
296# CONFIG_NET_VENDOR_STMICRO is not set 339# CONFIG_NET_VENDOR_STMICRO is not set
@@ -332,6 +375,7 @@ CONFIG_HID=m
332CONFIG_HIDRAW=y 375CONFIG_HIDRAW=y
333CONFIG_UHID=m 376CONFIG_UHID=m
334# CONFIG_HID_GENERIC is not set 377# CONFIG_HID_GENERIC is not set
378# CONFIG_HID_PLANTRONICS is not set
335# CONFIG_USB_SUPPORT is not set 379# CONFIG_USB_SUPPORT is not set
336CONFIG_RTC_CLASS=y 380CONFIG_RTC_CLASS=y
337CONFIG_RTC_DRV_GENERIC=m 381CONFIG_RTC_DRV_GENERIC=m
@@ -350,6 +394,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
350CONFIG_AUTOFS4_FS=m 394CONFIG_AUTOFS4_FS=m
351CONFIG_FUSE_FS=m 395CONFIG_FUSE_FS=m
352CONFIG_CUSE=m 396CONFIG_CUSE=m
397CONFIG_OVERLAY_FS=m
353CONFIG_ISO9660_FS=y 398CONFIG_ISO9660_FS=y
354CONFIG_JOLIET=y 399CONFIG_JOLIET=y
355CONFIG_ZISOFS=y 400CONFIG_ZISOFS=y
@@ -365,6 +410,7 @@ CONFIG_HFS_FS=m
365CONFIG_HFSPLUS_FS=m 410CONFIG_HFSPLUS_FS=m
366CONFIG_CRAMFS=m 411CONFIG_CRAMFS=m
367CONFIG_SQUASHFS=m 412CONFIG_SQUASHFS=m
413CONFIG_SQUASHFS_LZ4=y
368CONFIG_SQUASHFS_LZO=y 414CONFIG_SQUASHFS_LZO=y
369CONFIG_MINIX_FS=m 415CONFIG_MINIX_FS=m
370CONFIG_OMFS_FS=m 416CONFIG_OMFS_FS=m
@@ -434,10 +480,18 @@ CONFIG_DLM=m
434CONFIG_MAGIC_SYSRQ=y 480CONFIG_MAGIC_SYSRQ=y
435CONFIG_ASYNC_RAID6_TEST=m 481CONFIG_ASYNC_RAID6_TEST=m
436CONFIG_TEST_STRING_HELPERS=m 482CONFIG_TEST_STRING_HELPERS=m
483CONFIG_TEST_KSTRTOX=m
484CONFIG_TEST_LKM=m
485CONFIG_TEST_USER_COPY=m
486CONFIG_TEST_BPF=m
487CONFIG_TEST_FIRMWARE=m
488CONFIG_TEST_UDELAY=m
489CONFIG_EARLY_PRINTK=y
437CONFIG_ENCRYPTED_KEYS=m 490CONFIG_ENCRYPTED_KEYS=m
438CONFIG_CRYPTO_MANAGER=y 491CONFIG_CRYPTO_MANAGER=y
439CONFIG_CRYPTO_USER=m 492CONFIG_CRYPTO_USER=m
440CONFIG_CRYPTO_CRYPTD=m 493CONFIG_CRYPTO_CRYPTD=m
494CONFIG_CRYPTO_MCRYPTD=m
441CONFIG_CRYPTO_TEST=m 495CONFIG_CRYPTO_TEST=m
442CONFIG_CRYPTO_CCM=m 496CONFIG_CRYPTO_CCM=m
443CONFIG_CRYPTO_GCM=m 497CONFIG_CRYPTO_GCM=m
@@ -472,13 +526,10 @@ CONFIG_CRYPTO_LZO=m
472CONFIG_CRYPTO_LZ4=m 526CONFIG_CRYPTO_LZ4=m
473CONFIG_CRYPTO_LZ4HC=m 527CONFIG_CRYPTO_LZ4HC=m
474# CONFIG_CRYPTO_ANSI_CPRNG is not set 528# CONFIG_CRYPTO_ANSI_CPRNG is not set
529CONFIG_CRYPTO_DRBG_MENU=m
530CONFIG_CRYPTO_DRBG_HASH=y
531CONFIG_CRYPTO_DRBG_CTR=y
475CONFIG_CRYPTO_USER_API_HASH=m 532CONFIG_CRYPTO_USER_API_HASH=m
476CONFIG_CRYPTO_USER_API_SKCIPHER=m 533CONFIG_CRYPTO_USER_API_SKCIPHER=m
477# CONFIG_CRYPTO_HW is not set 534# CONFIG_CRYPTO_HW is not set
478CONFIG_XZ_DEC_X86=y
479CONFIG_XZ_DEC_POWERPC=y
480CONFIG_XZ_DEC_IA64=y
481CONFIG_XZ_DEC_ARM=y
482CONFIG_XZ_DEC_ARMTHUMB=y
483CONFIG_XZ_DEC_SPARC=y
484CONFIG_XZ_DEC_TEST=m 535CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 391e185d73be..372593a3b398 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -31,8 +31,10 @@ CONFIG_M68030=y
31CONFIG_M68040=y 31CONFIG_M68040=y
32CONFIG_M68060=y 32CONFIG_M68060=y
33CONFIG_ATARI=y 33CONFIG_ATARI=y
34CONFIG_ATARI_ROM_ISA=y
34# CONFIG_COMPACTION is not set 35# CONFIG_COMPACTION is not set
35CONFIG_CLEANCACHE=y 36CONFIG_CLEANCACHE=y
37CONFIG_ZPOOL=m
36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
37CONFIG_BINFMT_AOUT=m 39CONFIG_BINFMT_AOUT=m
38CONFIG_BINFMT_MISC=m 40CONFIG_BINFMT_MISC=m
@@ -52,6 +54,8 @@ CONFIG_NET_IPIP=m
52CONFIG_NET_IPGRE_DEMUX=m 54CONFIG_NET_IPGRE_DEMUX=m
53CONFIG_NET_IPGRE=m 55CONFIG_NET_IPGRE=m
54CONFIG_NET_IPVTI=m 56CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_GENEVE=m
55CONFIG_INET_AH=m 59CONFIG_INET_AH=m
56CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
57CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
@@ -93,6 +97,8 @@ CONFIG_NFT_HASH=m
93CONFIG_NFT_COUNTER=m 97CONFIG_NFT_COUNTER=m
94CONFIG_NFT_LOG=m 98CONFIG_NFT_LOG=m
95CONFIG_NFT_LIMIT=m 99CONFIG_NFT_LIMIT=m
100CONFIG_NFT_MASQ=m
101CONFIG_NFT_REDIR=m
96CONFIG_NFT_NAT=m 102CONFIG_NFT_NAT=m
97CONFIG_NFT_QUEUE=m 103CONFIG_NFT_QUEUE=m
98CONFIG_NFT_REJECT=m 104CONFIG_NFT_REJECT=m
@@ -139,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
139CONFIG_NETFILTER_XT_MATCH_OSF=m 145CONFIG_NETFILTER_XT_MATCH_OSF=m
140CONFIG_NETFILTER_XT_MATCH_OWNER=m 146CONFIG_NETFILTER_XT_MATCH_OWNER=m
141CONFIG_NETFILTER_XT_MATCH_POLICY=m 147CONFIG_NETFILTER_XT_MATCH_POLICY=m
148CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
142CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 149CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
143CONFIG_NETFILTER_XT_MATCH_QUOTA=m 150CONFIG_NETFILTER_XT_MATCH_QUOTA=m
144CONFIG_NETFILTER_XT_MATCH_RATEEST=m 151CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -160,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
160CONFIG_IP_SET_HASH_IPPORT=m 167CONFIG_IP_SET_HASH_IPPORT=m
161CONFIG_IP_SET_HASH_IPPORTIP=m 168CONFIG_IP_SET_HASH_IPPORTIP=m
162CONFIG_IP_SET_HASH_IPPORTNET=m 169CONFIG_IP_SET_HASH_IPPORTNET=m
170CONFIG_IP_SET_HASH_MAC=m
163CONFIG_IP_SET_HASH_NETPORTNET=m 171CONFIG_IP_SET_HASH_NETPORTNET=m
164CONFIG_IP_SET_HASH_NET=m 172CONFIG_IP_SET_HASH_NET=m
165CONFIG_IP_SET_HASH_NETNET=m 173CONFIG_IP_SET_HASH_NETNET=m
@@ -167,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
167CONFIG_IP_SET_HASH_NETIFACE=m 175CONFIG_IP_SET_HASH_NETIFACE=m
168CONFIG_IP_SET_LIST_SET=m 176CONFIG_IP_SET_LIST_SET=m
169CONFIG_NF_CONNTRACK_IPV4=m 177CONFIG_NF_CONNTRACK_IPV4=m
178CONFIG_NF_LOG_ARP=m
170CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179CONFIG_NFT_CHAIN_ROUTE_IPV4=m
171CONFIG_NFT_CHAIN_NAT_IPV4=m
172CONFIG_NF_TABLES_ARP=m 180CONFIG_NF_TABLES_ARP=m
181CONFIG_NFT_CHAIN_NAT_IPV4=m
182CONFIG_NFT_MASQ_IPV4=m
183CONFIG_NFT_REDIR_IPV4=m
173CONFIG_IP_NF_IPTABLES=m 184CONFIG_IP_NF_IPTABLES=m
174CONFIG_IP_NF_MATCH_AH=m 185CONFIG_IP_NF_MATCH_AH=m
175CONFIG_IP_NF_MATCH_ECN=m 186CONFIG_IP_NF_MATCH_ECN=m
@@ -178,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
178CONFIG_IP_NF_FILTER=m 189CONFIG_IP_NF_FILTER=m
179CONFIG_IP_NF_TARGET_REJECT=m 190CONFIG_IP_NF_TARGET_REJECT=m
180CONFIG_IP_NF_TARGET_SYNPROXY=m 191CONFIG_IP_NF_TARGET_SYNPROXY=m
181CONFIG_IP_NF_TARGET_ULOG=m 192CONFIG_IP_NF_NAT=m
182CONFIG_NF_NAT_IPV4=m
183CONFIG_IP_NF_TARGET_MASQUERADE=m 193CONFIG_IP_NF_TARGET_MASQUERADE=m
184CONFIG_IP_NF_TARGET_NETMAP=m 194CONFIG_IP_NF_TARGET_NETMAP=m
185CONFIG_IP_NF_TARGET_REDIRECT=m 195CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -194,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
194CONFIG_NF_CONNTRACK_IPV6=m 204CONFIG_NF_CONNTRACK_IPV6=m
195CONFIG_NFT_CHAIN_ROUTE_IPV6=m 205CONFIG_NFT_CHAIN_ROUTE_IPV6=m
196CONFIG_NFT_CHAIN_NAT_IPV6=m 206CONFIG_NFT_CHAIN_NAT_IPV6=m
207CONFIG_NFT_MASQ_IPV6=m
208CONFIG_NFT_REDIR_IPV6=m
197CONFIG_IP6_NF_IPTABLES=m 209CONFIG_IP6_NF_IPTABLES=m
198CONFIG_IP6_NF_MATCH_AH=m 210CONFIG_IP6_NF_MATCH_AH=m
199CONFIG_IP6_NF_MATCH_EUI64=m 211CONFIG_IP6_NF_MATCH_EUI64=m
@@ -210,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
210CONFIG_IP6_NF_TARGET_SYNPROXY=m 222CONFIG_IP6_NF_TARGET_SYNPROXY=m
211CONFIG_IP6_NF_MANGLE=m 223CONFIG_IP6_NF_MANGLE=m
212CONFIG_IP6_NF_RAW=m 224CONFIG_IP6_NF_RAW=m
213CONFIG_NF_NAT_IPV6=m 225CONFIG_IP6_NF_NAT=m
214CONFIG_IP6_NF_TARGET_MASQUERADE=m 226CONFIG_IP6_NF_TARGET_MASQUERADE=m
215CONFIG_IP6_NF_TARGET_NPT=m 227CONFIG_IP6_NF_TARGET_NPT=m
216CONFIG_NF_TABLES_BRIDGE=m 228CONFIG_NF_TABLES_BRIDGE=m
229CONFIG_NFT_BRIDGE_META=m
230CONFIG_NFT_BRIDGE_REJECT=m
231CONFIG_NF_LOG_BRIDGE=m
232CONFIG_BRIDGE_NF_EBTABLES=m
233CONFIG_BRIDGE_EBT_BROUTE=m
234CONFIG_BRIDGE_EBT_T_FILTER=m
235CONFIG_BRIDGE_EBT_T_NAT=m
236CONFIG_BRIDGE_EBT_802_3=m
237CONFIG_BRIDGE_EBT_AMONG=m
238CONFIG_BRIDGE_EBT_ARP=m
239CONFIG_BRIDGE_EBT_IP=m
240CONFIG_BRIDGE_EBT_IP6=m
241CONFIG_BRIDGE_EBT_LIMIT=m
242CONFIG_BRIDGE_EBT_MARK=m
243CONFIG_BRIDGE_EBT_PKTTYPE=m
244CONFIG_BRIDGE_EBT_STP=m
245CONFIG_BRIDGE_EBT_VLAN=m
246CONFIG_BRIDGE_EBT_ARPREPLY=m
247CONFIG_BRIDGE_EBT_DNAT=m
248CONFIG_BRIDGE_EBT_MARK_T=m
249CONFIG_BRIDGE_EBT_REDIRECT=m
250CONFIG_BRIDGE_EBT_SNAT=m
251CONFIG_BRIDGE_EBT_LOG=m
252CONFIG_BRIDGE_EBT_NFLOG=m
217CONFIG_IP_DCCP=m 253CONFIG_IP_DCCP=m
218# CONFIG_IP_DCCP_CCID3 is not set 254# CONFIG_IP_DCCP_CCID3 is not set
219CONFIG_SCTP_COOKIE_HMAC_SHA1=y 255CONFIG_SCTP_COOKIE_HMAC_SHA1=y
220CONFIG_RDS=m 256CONFIG_RDS=m
221CONFIG_RDS_TCP=m 257CONFIG_RDS_TCP=m
222CONFIG_L2TP=m 258CONFIG_L2TP=m
259CONFIG_BRIDGE=m
223CONFIG_ATALK=m 260CONFIG_ATALK=m
261CONFIG_6LOWPAN=m
224CONFIG_DNS_RESOLVER=y 262CONFIG_DNS_RESOLVER=y
225CONFIG_BATMAN_ADV=m 263CONFIG_BATMAN_ADV=m
226CONFIG_BATMAN_ADV_DAT=y 264CONFIG_BATMAN_ADV_DAT=y
@@ -229,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
229CONFIG_NETLINK_DIAG=m 267CONFIG_NETLINK_DIAG=m
230CONFIG_NET_MPLS_GSO=m 268CONFIG_NET_MPLS_GSO=m
231# CONFIG_WIRELESS is not set 269# CONFIG_WIRELESS is not set
270# CONFIG_UEVENT_HELPER is not set
232CONFIG_DEVTMPFS=y 271CONFIG_DEVTMPFS=y
272CONFIG_DEVTMPFS_MOUNT=y
233# CONFIG_FIRMWARE_IN_KERNEL is not set 273# CONFIG_FIRMWARE_IN_KERNEL is not set
234# CONFIG_FW_LOADER_USER_HELPER is not set
235CONFIG_CONNECTOR=m 274CONFIG_CONNECTOR=m
236CONFIG_PARPORT=m 275CONFIG_PARPORT=m
237CONFIG_PARPORT_ATARI=m 276CONFIG_PARPORT_ATARI=m
@@ -289,6 +328,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
289CONFIG_NET_TEAM_MODE_RANDOM=m 328CONFIG_NET_TEAM_MODE_RANDOM=m
290CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 329CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
291CONFIG_NET_TEAM_MODE_LOADBALANCE=m 330CONFIG_NET_TEAM_MODE_LOADBALANCE=m
331CONFIG_MACVLAN=m
332CONFIG_MACVTAP=m
333CONFIG_IPVLAN=m
292CONFIG_VXLAN=m 334CONFIG_VXLAN=m
293CONFIG_NETCONSOLE=m 335CONFIG_NETCONSOLE=m
294CONFIG_NETCONSOLE_DYNAMIC=y 336CONFIG_NETCONSOLE_DYNAMIC=y
@@ -299,8 +341,12 @@ CONFIG_ATARILANCE=y
299# CONFIG_NET_VENDOR_INTEL is not set 341# CONFIG_NET_VENDOR_INTEL is not set
300# CONFIG_NET_VENDOR_MARVELL is not set 342# CONFIG_NET_VENDOR_MARVELL is not set
301# CONFIG_NET_VENDOR_MICREL is not set 343# CONFIG_NET_VENDOR_MICREL is not set
344CONFIG_NE2000=y
345# CONFIG_NET_VENDOR_QUALCOMM is not set
346# CONFIG_NET_VENDOR_ROCKER is not set
302# CONFIG_NET_VENDOR_SAMSUNG is not set 347# CONFIG_NET_VENDOR_SAMSUNG is not set
303# CONFIG_NET_VENDOR_SEEQ is not set 348# CONFIG_NET_VENDOR_SEEQ is not set
349CONFIG_SMC91X=y
304# CONFIG_NET_VENDOR_STMICRO is not set 350# CONFIG_NET_VENDOR_STMICRO is not set
305# CONFIG_NET_VENDOR_VIA is not set 351# CONFIG_NET_VENDOR_VIA is not set
306# CONFIG_NET_VENDOR_WIZNET is not set 352# CONFIG_NET_VENDOR_WIZNET is not set
@@ -345,6 +391,7 @@ CONFIG_DMASOUND_ATARI=m
345CONFIG_HID=m 391CONFIG_HID=m
346CONFIG_HIDRAW=y 392CONFIG_HIDRAW=y
347CONFIG_UHID=m 393CONFIG_UHID=m
394# CONFIG_HID_PLANTRONICS is not set
348CONFIG_RTC_CLASS=y 395CONFIG_RTC_CLASS=y
349CONFIG_RTC_DRV_GENERIC=m 396CONFIG_RTC_DRV_GENERIC=m
350# CONFIG_IOMMU_SUPPORT is not set 397# CONFIG_IOMMU_SUPPORT is not set
@@ -354,6 +401,8 @@ CONFIG_NATFEAT=y
354CONFIG_NFBLOCK=y 401CONFIG_NFBLOCK=y
355CONFIG_NFCON=y 402CONFIG_NFCON=y
356CONFIG_NFETH=y 403CONFIG_NFETH=y
404CONFIG_ATARI_ETHERNAT=y
405CONFIG_ATARI_ETHERNEC=y
357CONFIG_ATARI_DSP56K=m 406CONFIG_ATARI_DSP56K=m
358CONFIG_EXT4_FS=y 407CONFIG_EXT4_FS=y
359CONFIG_REISERFS_FS=m 408CONFIG_REISERFS_FS=m
@@ -367,6 +416,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
367CONFIG_AUTOFS4_FS=m 416CONFIG_AUTOFS4_FS=m
368CONFIG_FUSE_FS=m 417CONFIG_FUSE_FS=m
369CONFIG_CUSE=m 418CONFIG_CUSE=m
419CONFIG_OVERLAY_FS=m
370CONFIG_ISO9660_FS=y 420CONFIG_ISO9660_FS=y
371CONFIG_JOLIET=y 421CONFIG_JOLIET=y
372CONFIG_ZISOFS=y 422CONFIG_ZISOFS=y
@@ -382,6 +432,7 @@ CONFIG_HFS_FS=m
382CONFIG_HFSPLUS_FS=m 432CONFIG_HFSPLUS_FS=m
383CONFIG_CRAMFS=m 433CONFIG_CRAMFS=m
384CONFIG_SQUASHFS=m 434CONFIG_SQUASHFS=m
435CONFIG_SQUASHFS_LZ4=y
385CONFIG_SQUASHFS_LZO=y 436CONFIG_SQUASHFS_LZO=y
386CONFIG_MINIX_FS=m 437CONFIG_MINIX_FS=m
387CONFIG_OMFS_FS=m 438CONFIG_OMFS_FS=m
@@ -451,10 +502,18 @@ CONFIG_DLM=m
451CONFIG_MAGIC_SYSRQ=y 502CONFIG_MAGIC_SYSRQ=y
452CONFIG_ASYNC_RAID6_TEST=m 503CONFIG_ASYNC_RAID6_TEST=m
453CONFIG_TEST_STRING_HELPERS=m 504CONFIG_TEST_STRING_HELPERS=m
505CONFIG_TEST_KSTRTOX=m
506CONFIG_TEST_LKM=m
507CONFIG_TEST_USER_COPY=m
508CONFIG_TEST_BPF=m
509CONFIG_TEST_FIRMWARE=m
510CONFIG_TEST_UDELAY=m
511CONFIG_EARLY_PRINTK=y
454CONFIG_ENCRYPTED_KEYS=m 512CONFIG_ENCRYPTED_KEYS=m
455CONFIG_CRYPTO_MANAGER=y 513CONFIG_CRYPTO_MANAGER=y
456CONFIG_CRYPTO_USER=m 514CONFIG_CRYPTO_USER=m
457CONFIG_CRYPTO_CRYPTD=m 515CONFIG_CRYPTO_CRYPTD=m
516CONFIG_CRYPTO_MCRYPTD=m
458CONFIG_CRYPTO_TEST=m 517CONFIG_CRYPTO_TEST=m
459CONFIG_CRYPTO_CCM=m 518CONFIG_CRYPTO_CCM=m
460CONFIG_CRYPTO_GCM=m 519CONFIG_CRYPTO_GCM=m
@@ -489,13 +548,10 @@ CONFIG_CRYPTO_LZO=m
489CONFIG_CRYPTO_LZ4=m 548CONFIG_CRYPTO_LZ4=m
490CONFIG_CRYPTO_LZ4HC=m 549CONFIG_CRYPTO_LZ4HC=m
491# CONFIG_CRYPTO_ANSI_CPRNG is not set 550# CONFIG_CRYPTO_ANSI_CPRNG is not set
551CONFIG_CRYPTO_DRBG_MENU=m
552CONFIG_CRYPTO_DRBG_HASH=y
553CONFIG_CRYPTO_DRBG_CTR=y
492CONFIG_CRYPTO_USER_API_HASH=m 554CONFIG_CRYPTO_USER_API_HASH=m
493CONFIG_CRYPTO_USER_API_SKCIPHER=m 555CONFIG_CRYPTO_USER_API_SKCIPHER=m
494# CONFIG_CRYPTO_HW is not set 556# CONFIG_CRYPTO_HW is not set
495CONFIG_XZ_DEC_X86=y
496CONFIG_XZ_DEC_POWERPC=y
497CONFIG_XZ_DEC_IA64=y
498CONFIG_XZ_DEC_ARM=y
499CONFIG_XZ_DEC_ARMTHUMB=y
500CONFIG_XZ_DEC_SPARC=y
501CONFIG_XZ_DEC_TEST=m 557CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index d0e705d1a063..f3bd35e76ea4 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -32,6 +32,7 @@ CONFIG_VME=y
32CONFIG_BVME6000=y 32CONFIG_BVME6000=y
33# CONFIG_COMPACTION is not set 33# CONFIG_COMPACTION is not set
34CONFIG_CLEANCACHE=y 34CONFIG_CLEANCACHE=y
35CONFIG_ZPOOL=m
35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
36CONFIG_BINFMT_AOUT=m 37CONFIG_BINFMT_AOUT=m
37CONFIG_BINFMT_MISC=m 38CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
51CONFIG_NET_IPGRE_DEMUX=m 52CONFIG_NET_IPGRE_DEMUX=m
52CONFIG_NET_IPGRE=m 53CONFIG_NET_IPGRE=m
53CONFIG_NET_IPVTI=m 54CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_GENEVE=m
54CONFIG_INET_AH=m 57CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
56CONFIG_INET_IPCOMP=m 59CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
92CONFIG_NFT_COUNTER=m 95CONFIG_NFT_COUNTER=m
93CONFIG_NFT_LOG=m 96CONFIG_NFT_LOG=m
94CONFIG_NFT_LIMIT=m 97CONFIG_NFT_LIMIT=m
98CONFIG_NFT_MASQ=m
99CONFIG_NFT_REDIR=m
95CONFIG_NFT_NAT=m 100CONFIG_NFT_NAT=m
96CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
97CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
138CONFIG_NETFILTER_XT_MATCH_OSF=m 143CONFIG_NETFILTER_XT_MATCH_OSF=m
139CONFIG_NETFILTER_XT_MATCH_OWNER=m 144CONFIG_NETFILTER_XT_MATCH_OWNER=m
140CONFIG_NETFILTER_XT_MATCH_POLICY=m 145CONFIG_NETFILTER_XT_MATCH_POLICY=m
146CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
141CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 147CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
142CONFIG_NETFILTER_XT_MATCH_QUOTA=m 148CONFIG_NETFILTER_XT_MATCH_QUOTA=m
143CONFIG_NETFILTER_XT_MATCH_RATEEST=m 149CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
159CONFIG_IP_SET_HASH_IPPORT=m 165CONFIG_IP_SET_HASH_IPPORT=m
160CONFIG_IP_SET_HASH_IPPORTIP=m 166CONFIG_IP_SET_HASH_IPPORTIP=m
161CONFIG_IP_SET_HASH_IPPORTNET=m 167CONFIG_IP_SET_HASH_IPPORTNET=m
168CONFIG_IP_SET_HASH_MAC=m
162CONFIG_IP_SET_HASH_NETPORTNET=m 169CONFIG_IP_SET_HASH_NETPORTNET=m
163CONFIG_IP_SET_HASH_NET=m 170CONFIG_IP_SET_HASH_NET=m
164CONFIG_IP_SET_HASH_NETNET=m 171CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
166CONFIG_IP_SET_HASH_NETIFACE=m 173CONFIG_IP_SET_HASH_NETIFACE=m
167CONFIG_IP_SET_LIST_SET=m 174CONFIG_IP_SET_LIST_SET=m
168CONFIG_NF_CONNTRACK_IPV4=m 175CONFIG_NF_CONNTRACK_IPV4=m
176CONFIG_NF_LOG_ARP=m
169CONFIG_NFT_CHAIN_ROUTE_IPV4=m 177CONFIG_NFT_CHAIN_ROUTE_IPV4=m
170CONFIG_NFT_CHAIN_NAT_IPV4=m
171CONFIG_NF_TABLES_ARP=m 178CONFIG_NF_TABLES_ARP=m
179CONFIG_NFT_CHAIN_NAT_IPV4=m
180CONFIG_NFT_MASQ_IPV4=m
181CONFIG_NFT_REDIR_IPV4=m
172CONFIG_IP_NF_IPTABLES=m 182CONFIG_IP_NF_IPTABLES=m
173CONFIG_IP_NF_MATCH_AH=m 183CONFIG_IP_NF_MATCH_AH=m
174CONFIG_IP_NF_MATCH_ECN=m 184CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
177CONFIG_IP_NF_FILTER=m 187CONFIG_IP_NF_FILTER=m
178CONFIG_IP_NF_TARGET_REJECT=m 188CONFIG_IP_NF_TARGET_REJECT=m
179CONFIG_IP_NF_TARGET_SYNPROXY=m 189CONFIG_IP_NF_TARGET_SYNPROXY=m
180CONFIG_IP_NF_TARGET_ULOG=m 190CONFIG_IP_NF_NAT=m
181CONFIG_NF_NAT_IPV4=m
182CONFIG_IP_NF_TARGET_MASQUERADE=m 191CONFIG_IP_NF_TARGET_MASQUERADE=m
183CONFIG_IP_NF_TARGET_NETMAP=m 192CONFIG_IP_NF_TARGET_NETMAP=m
184CONFIG_IP_NF_TARGET_REDIRECT=m 193CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
193CONFIG_NF_CONNTRACK_IPV6=m 202CONFIG_NF_CONNTRACK_IPV6=m
194CONFIG_NFT_CHAIN_ROUTE_IPV6=m 203CONFIG_NFT_CHAIN_ROUTE_IPV6=m
195CONFIG_NFT_CHAIN_NAT_IPV6=m 204CONFIG_NFT_CHAIN_NAT_IPV6=m
205CONFIG_NFT_MASQ_IPV6=m
206CONFIG_NFT_REDIR_IPV6=m
196CONFIG_IP6_NF_IPTABLES=m 207CONFIG_IP6_NF_IPTABLES=m
197CONFIG_IP6_NF_MATCH_AH=m 208CONFIG_IP6_NF_MATCH_AH=m
198CONFIG_IP6_NF_MATCH_EUI64=m 209CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
209CONFIG_IP6_NF_TARGET_SYNPROXY=m 220CONFIG_IP6_NF_TARGET_SYNPROXY=m
210CONFIG_IP6_NF_MANGLE=m 221CONFIG_IP6_NF_MANGLE=m
211CONFIG_IP6_NF_RAW=m 222CONFIG_IP6_NF_RAW=m
212CONFIG_NF_NAT_IPV6=m 223CONFIG_IP6_NF_NAT=m
213CONFIG_IP6_NF_TARGET_MASQUERADE=m 224CONFIG_IP6_NF_TARGET_MASQUERADE=m
214CONFIG_IP6_NF_TARGET_NPT=m 225CONFIG_IP6_NF_TARGET_NPT=m
215CONFIG_NF_TABLES_BRIDGE=m 226CONFIG_NF_TABLES_BRIDGE=m
227CONFIG_NFT_BRIDGE_META=m
228CONFIG_NFT_BRIDGE_REJECT=m
229CONFIG_NF_LOG_BRIDGE=m
230CONFIG_BRIDGE_NF_EBTABLES=m
231CONFIG_BRIDGE_EBT_BROUTE=m
232CONFIG_BRIDGE_EBT_T_FILTER=m
233CONFIG_BRIDGE_EBT_T_NAT=m
234CONFIG_BRIDGE_EBT_802_3=m
235CONFIG_BRIDGE_EBT_AMONG=m
236CONFIG_BRIDGE_EBT_ARP=m
237CONFIG_BRIDGE_EBT_IP=m
238CONFIG_BRIDGE_EBT_IP6=m
239CONFIG_BRIDGE_EBT_LIMIT=m
240CONFIG_BRIDGE_EBT_MARK=m
241CONFIG_BRIDGE_EBT_PKTTYPE=m
242CONFIG_BRIDGE_EBT_STP=m
243CONFIG_BRIDGE_EBT_VLAN=m
244CONFIG_BRIDGE_EBT_ARPREPLY=m
245CONFIG_BRIDGE_EBT_DNAT=m
246CONFIG_BRIDGE_EBT_MARK_T=m
247CONFIG_BRIDGE_EBT_REDIRECT=m
248CONFIG_BRIDGE_EBT_SNAT=m
249CONFIG_BRIDGE_EBT_LOG=m
250CONFIG_BRIDGE_EBT_NFLOG=m
216CONFIG_IP_DCCP=m 251CONFIG_IP_DCCP=m
217# CONFIG_IP_DCCP_CCID3 is not set 252# CONFIG_IP_DCCP_CCID3 is not set
218CONFIG_SCTP_COOKIE_HMAC_SHA1=y 253CONFIG_SCTP_COOKIE_HMAC_SHA1=y
219CONFIG_RDS=m 254CONFIG_RDS=m
220CONFIG_RDS_TCP=m 255CONFIG_RDS_TCP=m
221CONFIG_L2TP=m 256CONFIG_L2TP=m
257CONFIG_BRIDGE=m
222CONFIG_ATALK=m 258CONFIG_ATALK=m
259CONFIG_6LOWPAN=m
223CONFIG_DNS_RESOLVER=y 260CONFIG_DNS_RESOLVER=y
224CONFIG_BATMAN_ADV=m 261CONFIG_BATMAN_ADV=m
225CONFIG_BATMAN_ADV_DAT=y 262CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
228CONFIG_NETLINK_DIAG=m 265CONFIG_NETLINK_DIAG=m
229CONFIG_NET_MPLS_GSO=m 266CONFIG_NET_MPLS_GSO=m
230# CONFIG_WIRELESS is not set 267# CONFIG_WIRELESS is not set
268# CONFIG_UEVENT_HELPER is not set
231CONFIG_DEVTMPFS=y 269CONFIG_DEVTMPFS=y
270CONFIG_DEVTMPFS_MOUNT=y
232# CONFIG_FIRMWARE_IN_KERNEL is not set 271# CONFIG_FIRMWARE_IN_KERNEL is not set
233# CONFIG_FW_LOADER_USER_HELPER is not set
234CONFIG_CONNECTOR=m 272CONFIG_CONNECTOR=m
235CONFIG_BLK_DEV_LOOP=y 273CONFIG_BLK_DEV_LOOP=y
236CONFIG_BLK_DEV_CRYPTOLOOP=m 274CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -280,6 +318,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
280CONFIG_NET_TEAM_MODE_RANDOM=m 318CONFIG_NET_TEAM_MODE_RANDOM=m
281CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 319CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
282CONFIG_NET_TEAM_MODE_LOADBALANCE=m 320CONFIG_NET_TEAM_MODE_LOADBALANCE=m
321CONFIG_MACVLAN=m
322CONFIG_MACVTAP=m
323CONFIG_IPVLAN=m
283CONFIG_VXLAN=m 324CONFIG_VXLAN=m
284CONFIG_NETCONSOLE=m 325CONFIG_NETCONSOLE=m
285CONFIG_NETCONSOLE_DYNAMIC=y 326CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_BVME6000_NET=y
290# CONFIG_NET_VENDOR_MARVELL is not set 331# CONFIG_NET_VENDOR_MARVELL is not set
291# CONFIG_NET_VENDOR_MICREL is not set 332# CONFIG_NET_VENDOR_MICREL is not set
292# CONFIG_NET_VENDOR_NATSEMI is not set 333# CONFIG_NET_VENDOR_NATSEMI is not set
334# CONFIG_NET_VENDOR_QUALCOMM is not set
335# CONFIG_NET_VENDOR_ROCKER is not set
293# CONFIG_NET_VENDOR_SAMSUNG is not set 336# CONFIG_NET_VENDOR_SAMSUNG is not set
294# CONFIG_NET_VENDOR_SEEQ is not set 337# CONFIG_NET_VENDOR_SEEQ is not set
295# CONFIG_NET_VENDOR_STMICRO is not set 338# CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
326CONFIG_HIDRAW=y 369CONFIG_HIDRAW=y
327CONFIG_UHID=m 370CONFIG_UHID=m
328# CONFIG_HID_GENERIC is not set 371# CONFIG_HID_GENERIC is not set
372# CONFIG_HID_PLANTRONICS is not set
329# CONFIG_USB_SUPPORT is not set 373# CONFIG_USB_SUPPORT is not set
330CONFIG_RTC_CLASS=y 374CONFIG_RTC_CLASS=y
331CONFIG_RTC_DRV_GENERIC=m 375CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
343CONFIG_AUTOFS4_FS=m 387CONFIG_AUTOFS4_FS=m
344CONFIG_FUSE_FS=m 388CONFIG_FUSE_FS=m
345CONFIG_CUSE=m 389CONFIG_CUSE=m
390CONFIG_OVERLAY_FS=m
346CONFIG_ISO9660_FS=y 391CONFIG_ISO9660_FS=y
347CONFIG_JOLIET=y 392CONFIG_JOLIET=y
348CONFIG_ZISOFS=y 393CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
358CONFIG_HFSPLUS_FS=m 403CONFIG_HFSPLUS_FS=m
359CONFIG_CRAMFS=m 404CONFIG_CRAMFS=m
360CONFIG_SQUASHFS=m 405CONFIG_SQUASHFS=m
406CONFIG_SQUASHFS_LZ4=y
361CONFIG_SQUASHFS_LZO=y 407CONFIG_SQUASHFS_LZO=y
362CONFIG_MINIX_FS=m 408CONFIG_MINIX_FS=m
363CONFIG_OMFS_FS=m 409CONFIG_OMFS_FS=m
@@ -427,10 +473,18 @@ CONFIG_DLM=m
427CONFIG_MAGIC_SYSRQ=y 473CONFIG_MAGIC_SYSRQ=y
428CONFIG_ASYNC_RAID6_TEST=m 474CONFIG_ASYNC_RAID6_TEST=m
429CONFIG_TEST_STRING_HELPERS=m 475CONFIG_TEST_STRING_HELPERS=m
476CONFIG_TEST_KSTRTOX=m
477CONFIG_TEST_LKM=m
478CONFIG_TEST_USER_COPY=m
479CONFIG_TEST_BPF=m
480CONFIG_TEST_FIRMWARE=m
481CONFIG_TEST_UDELAY=m
482CONFIG_EARLY_PRINTK=y
430CONFIG_ENCRYPTED_KEYS=m 483CONFIG_ENCRYPTED_KEYS=m
431CONFIG_CRYPTO_MANAGER=y 484CONFIG_CRYPTO_MANAGER=y
432CONFIG_CRYPTO_USER=m 485CONFIG_CRYPTO_USER=m
433CONFIG_CRYPTO_CRYPTD=m 486CONFIG_CRYPTO_CRYPTD=m
487CONFIG_CRYPTO_MCRYPTD=m
434CONFIG_CRYPTO_TEST=m 488CONFIG_CRYPTO_TEST=m
435CONFIG_CRYPTO_CCM=m 489CONFIG_CRYPTO_CCM=m
436CONFIG_CRYPTO_GCM=m 490CONFIG_CRYPTO_GCM=m
@@ -465,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
465CONFIG_CRYPTO_LZ4=m 519CONFIG_CRYPTO_LZ4=m
466CONFIG_CRYPTO_LZ4HC=m 520CONFIG_CRYPTO_LZ4HC=m
467# CONFIG_CRYPTO_ANSI_CPRNG is not set 521# CONFIG_CRYPTO_ANSI_CPRNG is not set
522CONFIG_CRYPTO_DRBG_MENU=m
523CONFIG_CRYPTO_DRBG_HASH=y
524CONFIG_CRYPTO_DRBG_CTR=y
468CONFIG_CRYPTO_USER_API_HASH=m 525CONFIG_CRYPTO_USER_API_HASH=m
469CONFIG_CRYPTO_USER_API_SKCIPHER=m 526CONFIG_CRYPTO_USER_API_SKCIPHER=m
470# CONFIG_CRYPTO_HW is not set 527# CONFIG_CRYPTO_HW is not set
471CONFIG_XZ_DEC_X86=y
472CONFIG_XZ_DEC_POWERPC=y
473CONFIG_XZ_DEC_IA64=y
474CONFIG_XZ_DEC_ARM=y
475CONFIG_XZ_DEC_ARMTHUMB=y
476CONFIG_XZ_DEC_SPARC=y
477CONFIG_XZ_DEC_TEST=m 528CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index fdc7e9672249..9f9793fb2b73 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -34,6 +34,7 @@ CONFIG_M68060=y
34CONFIG_HP300=y 34CONFIG_HP300=y
35# CONFIG_COMPACTION is not set 35# CONFIG_COMPACTION is not set
36CONFIG_CLEANCACHE=y 36CONFIG_CLEANCACHE=y
37CONFIG_ZPOOL=m
37# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
38CONFIG_BINFMT_AOUT=m 39CONFIG_BINFMT_AOUT=m
39CONFIG_BINFMT_MISC=m 40CONFIG_BINFMT_MISC=m
@@ -53,6 +54,8 @@ CONFIG_NET_IPIP=m
53CONFIG_NET_IPGRE_DEMUX=m 54CONFIG_NET_IPGRE_DEMUX=m
54CONFIG_NET_IPGRE=m 55CONFIG_NET_IPGRE=m
55CONFIG_NET_IPVTI=m 56CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_GENEVE=m
56CONFIG_INET_AH=m 59CONFIG_INET_AH=m
57CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
58CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
@@ -94,6 +97,8 @@ CONFIG_NFT_HASH=m
94CONFIG_NFT_COUNTER=m 97CONFIG_NFT_COUNTER=m
95CONFIG_NFT_LOG=m 98CONFIG_NFT_LOG=m
96CONFIG_NFT_LIMIT=m 99CONFIG_NFT_LIMIT=m
100CONFIG_NFT_MASQ=m
101CONFIG_NFT_REDIR=m
97CONFIG_NFT_NAT=m 102CONFIG_NFT_NAT=m
98CONFIG_NFT_QUEUE=m 103CONFIG_NFT_QUEUE=m
99CONFIG_NFT_REJECT=m 104CONFIG_NFT_REJECT=m
@@ -140,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
140CONFIG_NETFILTER_XT_MATCH_OSF=m 145CONFIG_NETFILTER_XT_MATCH_OSF=m
141CONFIG_NETFILTER_XT_MATCH_OWNER=m 146CONFIG_NETFILTER_XT_MATCH_OWNER=m
142CONFIG_NETFILTER_XT_MATCH_POLICY=m 147CONFIG_NETFILTER_XT_MATCH_POLICY=m
148CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
143CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 149CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
144CONFIG_NETFILTER_XT_MATCH_QUOTA=m 150CONFIG_NETFILTER_XT_MATCH_QUOTA=m
145CONFIG_NETFILTER_XT_MATCH_RATEEST=m 151CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -161,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
161CONFIG_IP_SET_HASH_IPPORT=m 167CONFIG_IP_SET_HASH_IPPORT=m
162CONFIG_IP_SET_HASH_IPPORTIP=m 168CONFIG_IP_SET_HASH_IPPORTIP=m
163CONFIG_IP_SET_HASH_IPPORTNET=m 169CONFIG_IP_SET_HASH_IPPORTNET=m
170CONFIG_IP_SET_HASH_MAC=m
164CONFIG_IP_SET_HASH_NETPORTNET=m 171CONFIG_IP_SET_HASH_NETPORTNET=m
165CONFIG_IP_SET_HASH_NET=m 172CONFIG_IP_SET_HASH_NET=m
166CONFIG_IP_SET_HASH_NETNET=m 173CONFIG_IP_SET_HASH_NETNET=m
@@ -168,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
168CONFIG_IP_SET_HASH_NETIFACE=m 175CONFIG_IP_SET_HASH_NETIFACE=m
169CONFIG_IP_SET_LIST_SET=m 176CONFIG_IP_SET_LIST_SET=m
170CONFIG_NF_CONNTRACK_IPV4=m 177CONFIG_NF_CONNTRACK_IPV4=m
178CONFIG_NF_LOG_ARP=m
171CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179CONFIG_NFT_CHAIN_ROUTE_IPV4=m
172CONFIG_NFT_CHAIN_NAT_IPV4=m
173CONFIG_NF_TABLES_ARP=m 180CONFIG_NF_TABLES_ARP=m
181CONFIG_NFT_CHAIN_NAT_IPV4=m
182CONFIG_NFT_MASQ_IPV4=m
183CONFIG_NFT_REDIR_IPV4=m
174CONFIG_IP_NF_IPTABLES=m 184CONFIG_IP_NF_IPTABLES=m
175CONFIG_IP_NF_MATCH_AH=m 185CONFIG_IP_NF_MATCH_AH=m
176CONFIG_IP_NF_MATCH_ECN=m 186CONFIG_IP_NF_MATCH_ECN=m
@@ -179,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
179CONFIG_IP_NF_FILTER=m 189CONFIG_IP_NF_FILTER=m
180CONFIG_IP_NF_TARGET_REJECT=m 190CONFIG_IP_NF_TARGET_REJECT=m
181CONFIG_IP_NF_TARGET_SYNPROXY=m 191CONFIG_IP_NF_TARGET_SYNPROXY=m
182CONFIG_IP_NF_TARGET_ULOG=m 192CONFIG_IP_NF_NAT=m
183CONFIG_NF_NAT_IPV4=m
184CONFIG_IP_NF_TARGET_MASQUERADE=m 193CONFIG_IP_NF_TARGET_MASQUERADE=m
185CONFIG_IP_NF_TARGET_NETMAP=m 194CONFIG_IP_NF_TARGET_NETMAP=m
186CONFIG_IP_NF_TARGET_REDIRECT=m 195CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -195,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
195CONFIG_NF_CONNTRACK_IPV6=m 204CONFIG_NF_CONNTRACK_IPV6=m
196CONFIG_NFT_CHAIN_ROUTE_IPV6=m 205CONFIG_NFT_CHAIN_ROUTE_IPV6=m
197CONFIG_NFT_CHAIN_NAT_IPV6=m 206CONFIG_NFT_CHAIN_NAT_IPV6=m
207CONFIG_NFT_MASQ_IPV6=m
208CONFIG_NFT_REDIR_IPV6=m
198CONFIG_IP6_NF_IPTABLES=m 209CONFIG_IP6_NF_IPTABLES=m
199CONFIG_IP6_NF_MATCH_AH=m 210CONFIG_IP6_NF_MATCH_AH=m
200CONFIG_IP6_NF_MATCH_EUI64=m 211CONFIG_IP6_NF_MATCH_EUI64=m
@@ -211,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
211CONFIG_IP6_NF_TARGET_SYNPROXY=m 222CONFIG_IP6_NF_TARGET_SYNPROXY=m
212CONFIG_IP6_NF_MANGLE=m 223CONFIG_IP6_NF_MANGLE=m
213CONFIG_IP6_NF_RAW=m 224CONFIG_IP6_NF_RAW=m
214CONFIG_NF_NAT_IPV6=m 225CONFIG_IP6_NF_NAT=m
215CONFIG_IP6_NF_TARGET_MASQUERADE=m 226CONFIG_IP6_NF_TARGET_MASQUERADE=m
216CONFIG_IP6_NF_TARGET_NPT=m 227CONFIG_IP6_NF_TARGET_NPT=m
217CONFIG_NF_TABLES_BRIDGE=m 228CONFIG_NF_TABLES_BRIDGE=m
229CONFIG_NFT_BRIDGE_META=m
230CONFIG_NFT_BRIDGE_REJECT=m
231CONFIG_NF_LOG_BRIDGE=m
232CONFIG_BRIDGE_NF_EBTABLES=m
233CONFIG_BRIDGE_EBT_BROUTE=m
234CONFIG_BRIDGE_EBT_T_FILTER=m
235CONFIG_BRIDGE_EBT_T_NAT=m
236CONFIG_BRIDGE_EBT_802_3=m
237CONFIG_BRIDGE_EBT_AMONG=m
238CONFIG_BRIDGE_EBT_ARP=m
239CONFIG_BRIDGE_EBT_IP=m
240CONFIG_BRIDGE_EBT_IP6=m
241CONFIG_BRIDGE_EBT_LIMIT=m
242CONFIG_BRIDGE_EBT_MARK=m
243CONFIG_BRIDGE_EBT_PKTTYPE=m
244CONFIG_BRIDGE_EBT_STP=m
245CONFIG_BRIDGE_EBT_VLAN=m
246CONFIG_BRIDGE_EBT_ARPREPLY=m
247CONFIG_BRIDGE_EBT_DNAT=m
248CONFIG_BRIDGE_EBT_MARK_T=m
249CONFIG_BRIDGE_EBT_REDIRECT=m
250CONFIG_BRIDGE_EBT_SNAT=m
251CONFIG_BRIDGE_EBT_LOG=m
252CONFIG_BRIDGE_EBT_NFLOG=m
218CONFIG_IP_DCCP=m 253CONFIG_IP_DCCP=m
219# CONFIG_IP_DCCP_CCID3 is not set 254# CONFIG_IP_DCCP_CCID3 is not set
220CONFIG_SCTP_COOKIE_HMAC_SHA1=y 255CONFIG_SCTP_COOKIE_HMAC_SHA1=y
221CONFIG_RDS=m 256CONFIG_RDS=m
222CONFIG_RDS_TCP=m 257CONFIG_RDS_TCP=m
223CONFIG_L2TP=m 258CONFIG_L2TP=m
259CONFIG_BRIDGE=m
224CONFIG_ATALK=m 260CONFIG_ATALK=m
261CONFIG_6LOWPAN=m
225CONFIG_DNS_RESOLVER=y 262CONFIG_DNS_RESOLVER=y
226CONFIG_BATMAN_ADV=m 263CONFIG_BATMAN_ADV=m
227CONFIG_BATMAN_ADV_DAT=y 264CONFIG_BATMAN_ADV_DAT=y
@@ -230,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
230CONFIG_NETLINK_DIAG=m 267CONFIG_NETLINK_DIAG=m
231CONFIG_NET_MPLS_GSO=m 268CONFIG_NET_MPLS_GSO=m
232# CONFIG_WIRELESS is not set 269# CONFIG_WIRELESS is not set
270# CONFIG_UEVENT_HELPER is not set
233CONFIG_DEVTMPFS=y 271CONFIG_DEVTMPFS=y
272CONFIG_DEVTMPFS_MOUNT=y
234# CONFIG_FIRMWARE_IN_KERNEL is not set 273# CONFIG_FIRMWARE_IN_KERNEL is not set
235# CONFIG_FW_LOADER_USER_HELPER is not set
236CONFIG_CONNECTOR=m 274CONFIG_CONNECTOR=m
237CONFIG_BLK_DEV_LOOP=y 275CONFIG_BLK_DEV_LOOP=y
238CONFIG_BLK_DEV_CRYPTOLOOP=m 276CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -281,6 +319,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
281CONFIG_NET_TEAM_MODE_RANDOM=m 319CONFIG_NET_TEAM_MODE_RANDOM=m
282CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 320CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
283CONFIG_NET_TEAM_MODE_LOADBALANCE=m 321CONFIG_NET_TEAM_MODE_LOADBALANCE=m
322CONFIG_MACVLAN=m
323CONFIG_MACVTAP=m
324CONFIG_IPVLAN=m
284CONFIG_VXLAN=m 325CONFIG_VXLAN=m
285CONFIG_NETCONSOLE=m 326CONFIG_NETCONSOLE=m
286CONFIG_NETCONSOLE_DYNAMIC=y 327CONFIG_NETCONSOLE_DYNAMIC=y
@@ -292,6 +333,8 @@ CONFIG_HPLANCE=y
292# CONFIG_NET_VENDOR_MARVELL is not set 333# CONFIG_NET_VENDOR_MARVELL is not set
293# CONFIG_NET_VENDOR_MICREL is not set 334# CONFIG_NET_VENDOR_MICREL is not set
294# CONFIG_NET_VENDOR_NATSEMI is not set 335# CONFIG_NET_VENDOR_NATSEMI is not set
336# CONFIG_NET_VENDOR_QUALCOMM is not set
337# CONFIG_NET_VENDOR_ROCKER is not set
295# CONFIG_NET_VENDOR_SAMSUNG is not set 338# CONFIG_NET_VENDOR_SAMSUNG is not set
296# CONFIG_NET_VENDOR_SEEQ is not set 339# CONFIG_NET_VENDOR_SEEQ is not set
297# CONFIG_NET_VENDOR_STMICRO is not set 340# CONFIG_NET_VENDOR_STMICRO is not set
@@ -335,6 +378,7 @@ CONFIG_HID=m
335CONFIG_HIDRAW=y 378CONFIG_HIDRAW=y
336CONFIG_UHID=m 379CONFIG_UHID=m
337# CONFIG_HID_GENERIC is not set 380# CONFIG_HID_GENERIC is not set
381# CONFIG_HID_PLANTRONICS is not set
338# CONFIG_USB_SUPPORT is not set 382# CONFIG_USB_SUPPORT is not set
339CONFIG_RTC_CLASS=y 383CONFIG_RTC_CLASS=y
340CONFIG_RTC_DRV_GENERIC=m 384CONFIG_RTC_DRV_GENERIC=m
@@ -352,6 +396,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
352CONFIG_AUTOFS4_FS=m 396CONFIG_AUTOFS4_FS=m
353CONFIG_FUSE_FS=m 397CONFIG_FUSE_FS=m
354CONFIG_CUSE=m 398CONFIG_CUSE=m
399CONFIG_OVERLAY_FS=m
355CONFIG_ISO9660_FS=y 400CONFIG_ISO9660_FS=y
356CONFIG_JOLIET=y 401CONFIG_JOLIET=y
357CONFIG_ZISOFS=y 402CONFIG_ZISOFS=y
@@ -367,6 +412,7 @@ CONFIG_HFS_FS=m
367CONFIG_HFSPLUS_FS=m 412CONFIG_HFSPLUS_FS=m
368CONFIG_CRAMFS=m 413CONFIG_CRAMFS=m
369CONFIG_SQUASHFS=m 414CONFIG_SQUASHFS=m
415CONFIG_SQUASHFS_LZ4=y
370CONFIG_SQUASHFS_LZO=y 416CONFIG_SQUASHFS_LZO=y
371CONFIG_MINIX_FS=m 417CONFIG_MINIX_FS=m
372CONFIG_OMFS_FS=m 418CONFIG_OMFS_FS=m
@@ -436,10 +482,18 @@ CONFIG_DLM=m
436CONFIG_MAGIC_SYSRQ=y 482CONFIG_MAGIC_SYSRQ=y
437CONFIG_ASYNC_RAID6_TEST=m 483CONFIG_ASYNC_RAID6_TEST=m
438CONFIG_TEST_STRING_HELPERS=m 484CONFIG_TEST_STRING_HELPERS=m
485CONFIG_TEST_KSTRTOX=m
486CONFIG_TEST_LKM=m
487CONFIG_TEST_USER_COPY=m
488CONFIG_TEST_BPF=m
489CONFIG_TEST_FIRMWARE=m
490CONFIG_TEST_UDELAY=m
491CONFIG_EARLY_PRINTK=y
439CONFIG_ENCRYPTED_KEYS=m 492CONFIG_ENCRYPTED_KEYS=m
440CONFIG_CRYPTO_MANAGER=y 493CONFIG_CRYPTO_MANAGER=y
441CONFIG_CRYPTO_USER=m 494CONFIG_CRYPTO_USER=m
442CONFIG_CRYPTO_CRYPTD=m 495CONFIG_CRYPTO_CRYPTD=m
496CONFIG_CRYPTO_MCRYPTD=m
443CONFIG_CRYPTO_TEST=m 497CONFIG_CRYPTO_TEST=m
444CONFIG_CRYPTO_CCM=m 498CONFIG_CRYPTO_CCM=m
445CONFIG_CRYPTO_GCM=m 499CONFIG_CRYPTO_GCM=m
@@ -474,13 +528,10 @@ CONFIG_CRYPTO_LZO=m
474CONFIG_CRYPTO_LZ4=m 528CONFIG_CRYPTO_LZ4=m
475CONFIG_CRYPTO_LZ4HC=m 529CONFIG_CRYPTO_LZ4HC=m
476# CONFIG_CRYPTO_ANSI_CPRNG is not set 530# CONFIG_CRYPTO_ANSI_CPRNG is not set
531CONFIG_CRYPTO_DRBG_MENU=m
532CONFIG_CRYPTO_DRBG_HASH=y
533CONFIG_CRYPTO_DRBG_CTR=y
477CONFIG_CRYPTO_USER_API_HASH=m 534CONFIG_CRYPTO_USER_API_HASH=m
478CONFIG_CRYPTO_USER_API_SKCIPHER=m 535CONFIG_CRYPTO_USER_API_SKCIPHER=m
479# CONFIG_CRYPTO_HW is not set 536# CONFIG_CRYPTO_HW is not set
480CONFIG_XZ_DEC_X86=y
481CONFIG_XZ_DEC_POWERPC=y
482CONFIG_XZ_DEC_IA64=y
483CONFIG_XZ_DEC_ARM=y
484CONFIG_XZ_DEC_ARMTHUMB=y
485CONFIG_XZ_DEC_SPARC=y
486CONFIG_XZ_DEC_TEST=m 537CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3d345641d5a0..89f225c01a0b 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -33,6 +33,7 @@ CONFIG_M68KFPU_EMU=y
33CONFIG_MAC=y 33CONFIG_MAC=y
34# CONFIG_COMPACTION is not set 34# CONFIG_COMPACTION is not set
35CONFIG_CLEANCACHE=y 35CONFIG_CLEANCACHE=y
36CONFIG_ZPOOL=m
36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 37# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
37CONFIG_BINFMT_AOUT=m 38CONFIG_BINFMT_AOUT=m
38CONFIG_BINFMT_MISC=m 39CONFIG_BINFMT_MISC=m
@@ -52,6 +53,8 @@ CONFIG_NET_IPIP=m
52CONFIG_NET_IPGRE_DEMUX=m 53CONFIG_NET_IPGRE_DEMUX=m
53CONFIG_NET_IPGRE=m 54CONFIG_NET_IPGRE=m
54CONFIG_NET_IPVTI=m 55CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_GENEVE=m
55CONFIG_INET_AH=m 58CONFIG_INET_AH=m
56CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
57CONFIG_INET_IPCOMP=m 60CONFIG_INET_IPCOMP=m
@@ -93,6 +96,8 @@ CONFIG_NFT_HASH=m
93CONFIG_NFT_COUNTER=m 96CONFIG_NFT_COUNTER=m
94CONFIG_NFT_LOG=m 97CONFIG_NFT_LOG=m
95CONFIG_NFT_LIMIT=m 98CONFIG_NFT_LIMIT=m
99CONFIG_NFT_MASQ=m
100CONFIG_NFT_REDIR=m
96CONFIG_NFT_NAT=m 101CONFIG_NFT_NAT=m
97CONFIG_NFT_QUEUE=m 102CONFIG_NFT_QUEUE=m
98CONFIG_NFT_REJECT=m 103CONFIG_NFT_REJECT=m
@@ -139,6 +144,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
139CONFIG_NETFILTER_XT_MATCH_OSF=m 144CONFIG_NETFILTER_XT_MATCH_OSF=m
140CONFIG_NETFILTER_XT_MATCH_OWNER=m 145CONFIG_NETFILTER_XT_MATCH_OWNER=m
141CONFIG_NETFILTER_XT_MATCH_POLICY=m 146CONFIG_NETFILTER_XT_MATCH_POLICY=m
147CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
142CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 148CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
143CONFIG_NETFILTER_XT_MATCH_QUOTA=m 149CONFIG_NETFILTER_XT_MATCH_QUOTA=m
144CONFIG_NETFILTER_XT_MATCH_RATEEST=m 150CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -160,6 +166,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
160CONFIG_IP_SET_HASH_IPPORT=m 166CONFIG_IP_SET_HASH_IPPORT=m
161CONFIG_IP_SET_HASH_IPPORTIP=m 167CONFIG_IP_SET_HASH_IPPORTIP=m
162CONFIG_IP_SET_HASH_IPPORTNET=m 168CONFIG_IP_SET_HASH_IPPORTNET=m
169CONFIG_IP_SET_HASH_MAC=m
163CONFIG_IP_SET_HASH_NETPORTNET=m 170CONFIG_IP_SET_HASH_NETPORTNET=m
164CONFIG_IP_SET_HASH_NET=m 171CONFIG_IP_SET_HASH_NET=m
165CONFIG_IP_SET_HASH_NETNET=m 172CONFIG_IP_SET_HASH_NETNET=m
@@ -167,9 +174,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
167CONFIG_IP_SET_HASH_NETIFACE=m 174CONFIG_IP_SET_HASH_NETIFACE=m
168CONFIG_IP_SET_LIST_SET=m 175CONFIG_IP_SET_LIST_SET=m
169CONFIG_NF_CONNTRACK_IPV4=m 176CONFIG_NF_CONNTRACK_IPV4=m
177CONFIG_NF_LOG_ARP=m
170CONFIG_NFT_CHAIN_ROUTE_IPV4=m 178CONFIG_NFT_CHAIN_ROUTE_IPV4=m
171CONFIG_NFT_CHAIN_NAT_IPV4=m
172CONFIG_NF_TABLES_ARP=m 179CONFIG_NF_TABLES_ARP=m
180CONFIG_NFT_CHAIN_NAT_IPV4=m
181CONFIG_NFT_MASQ_IPV4=m
182CONFIG_NFT_REDIR_IPV4=m
173CONFIG_IP_NF_IPTABLES=m 183CONFIG_IP_NF_IPTABLES=m
174CONFIG_IP_NF_MATCH_AH=m 184CONFIG_IP_NF_MATCH_AH=m
175CONFIG_IP_NF_MATCH_ECN=m 185CONFIG_IP_NF_MATCH_ECN=m
@@ -178,8 +188,7 @@ CONFIG_IP_NF_MATCH_TTL=m
178CONFIG_IP_NF_FILTER=m 188CONFIG_IP_NF_FILTER=m
179CONFIG_IP_NF_TARGET_REJECT=m 189CONFIG_IP_NF_TARGET_REJECT=m
180CONFIG_IP_NF_TARGET_SYNPROXY=m 190CONFIG_IP_NF_TARGET_SYNPROXY=m
181CONFIG_IP_NF_TARGET_ULOG=m 191CONFIG_IP_NF_NAT=m
182CONFIG_NF_NAT_IPV4=m
183CONFIG_IP_NF_TARGET_MASQUERADE=m 192CONFIG_IP_NF_TARGET_MASQUERADE=m
184CONFIG_IP_NF_TARGET_NETMAP=m 193CONFIG_IP_NF_TARGET_NETMAP=m
185CONFIG_IP_NF_TARGET_REDIRECT=m 194CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -194,6 +203,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
194CONFIG_NF_CONNTRACK_IPV6=m 203CONFIG_NF_CONNTRACK_IPV6=m
195CONFIG_NFT_CHAIN_ROUTE_IPV6=m 204CONFIG_NFT_CHAIN_ROUTE_IPV6=m
196CONFIG_NFT_CHAIN_NAT_IPV6=m 205CONFIG_NFT_CHAIN_NAT_IPV6=m
206CONFIG_NFT_MASQ_IPV6=m
207CONFIG_NFT_REDIR_IPV6=m
197CONFIG_IP6_NF_IPTABLES=m 208CONFIG_IP6_NF_IPTABLES=m
198CONFIG_IP6_NF_MATCH_AH=m 209CONFIG_IP6_NF_MATCH_AH=m
199CONFIG_IP6_NF_MATCH_EUI64=m 210CONFIG_IP6_NF_MATCH_EUI64=m
@@ -210,20 +221,46 @@ CONFIG_IP6_NF_TARGET_REJECT=m
210CONFIG_IP6_NF_TARGET_SYNPROXY=m 221CONFIG_IP6_NF_TARGET_SYNPROXY=m
211CONFIG_IP6_NF_MANGLE=m 222CONFIG_IP6_NF_MANGLE=m
212CONFIG_IP6_NF_RAW=m 223CONFIG_IP6_NF_RAW=m
213CONFIG_NF_NAT_IPV6=m 224CONFIG_IP6_NF_NAT=m
214CONFIG_IP6_NF_TARGET_MASQUERADE=m 225CONFIG_IP6_NF_TARGET_MASQUERADE=m
215CONFIG_IP6_NF_TARGET_NPT=m 226CONFIG_IP6_NF_TARGET_NPT=m
216CONFIG_NF_TABLES_BRIDGE=m 227CONFIG_NF_TABLES_BRIDGE=m
228CONFIG_NFT_BRIDGE_META=m
229CONFIG_NFT_BRIDGE_REJECT=m
230CONFIG_NF_LOG_BRIDGE=m
231CONFIG_BRIDGE_NF_EBTABLES=m
232CONFIG_BRIDGE_EBT_BROUTE=m
233CONFIG_BRIDGE_EBT_T_FILTER=m
234CONFIG_BRIDGE_EBT_T_NAT=m
235CONFIG_BRIDGE_EBT_802_3=m
236CONFIG_BRIDGE_EBT_AMONG=m
237CONFIG_BRIDGE_EBT_ARP=m
238CONFIG_BRIDGE_EBT_IP=m
239CONFIG_BRIDGE_EBT_IP6=m
240CONFIG_BRIDGE_EBT_LIMIT=m
241CONFIG_BRIDGE_EBT_MARK=m
242CONFIG_BRIDGE_EBT_PKTTYPE=m
243CONFIG_BRIDGE_EBT_STP=m
244CONFIG_BRIDGE_EBT_VLAN=m
245CONFIG_BRIDGE_EBT_ARPREPLY=m
246CONFIG_BRIDGE_EBT_DNAT=m
247CONFIG_BRIDGE_EBT_MARK_T=m
248CONFIG_BRIDGE_EBT_REDIRECT=m
249CONFIG_BRIDGE_EBT_SNAT=m
250CONFIG_BRIDGE_EBT_LOG=m
251CONFIG_BRIDGE_EBT_NFLOG=m
217CONFIG_IP_DCCP=m 252CONFIG_IP_DCCP=m
218# CONFIG_IP_DCCP_CCID3 is not set 253# CONFIG_IP_DCCP_CCID3 is not set
219CONFIG_SCTP_COOKIE_HMAC_SHA1=y 254CONFIG_SCTP_COOKIE_HMAC_SHA1=y
220CONFIG_RDS=m 255CONFIG_RDS=m
221CONFIG_RDS_TCP=m 256CONFIG_RDS_TCP=m
222CONFIG_L2TP=m 257CONFIG_L2TP=m
258CONFIG_BRIDGE=m
223CONFIG_ATALK=m 259CONFIG_ATALK=m
224CONFIG_DEV_APPLETALK=m 260CONFIG_DEV_APPLETALK=m
225CONFIG_IPDDP=m 261CONFIG_IPDDP=m
226CONFIG_IPDDP_ENCAP=y 262CONFIG_IPDDP_ENCAP=y
263CONFIG_6LOWPAN=m
227CONFIG_DNS_RESOLVER=y 264CONFIG_DNS_RESOLVER=y
228CONFIG_BATMAN_ADV=m 265CONFIG_BATMAN_ADV=m
229CONFIG_BATMAN_ADV_DAT=y 266CONFIG_BATMAN_ADV_DAT=y
@@ -232,9 +269,10 @@ CONFIG_BATMAN_ADV_MCAST=y
232CONFIG_NETLINK_DIAG=m 269CONFIG_NETLINK_DIAG=m
233CONFIG_NET_MPLS_GSO=m 270CONFIG_NET_MPLS_GSO=m
234# CONFIG_WIRELESS is not set 271# CONFIG_WIRELESS is not set
272# CONFIG_UEVENT_HELPER is not set
235CONFIG_DEVTMPFS=y 273CONFIG_DEVTMPFS=y
274CONFIG_DEVTMPFS_MOUNT=y
236# CONFIG_FIRMWARE_IN_KERNEL is not set 275# CONFIG_FIRMWARE_IN_KERNEL is not set
237# CONFIG_FW_LOADER_USER_HELPER is not set
238CONFIG_CONNECTOR=m 276CONFIG_CONNECTOR=m
239CONFIG_BLK_DEV_SWIM=m 277CONFIG_BLK_DEV_SWIM=m
240CONFIG_BLK_DEV_LOOP=y 278CONFIG_BLK_DEV_LOOP=y
@@ -297,6 +335,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
297CONFIG_NET_TEAM_MODE_RANDOM=m 335CONFIG_NET_TEAM_MODE_RANDOM=m
298CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 336CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
299CONFIG_NET_TEAM_MODE_LOADBALANCE=m 337CONFIG_NET_TEAM_MODE_LOADBALANCE=m
338CONFIG_MACVLAN=m
339CONFIG_MACVTAP=m
340CONFIG_IPVLAN=m
300CONFIG_VXLAN=m 341CONFIG_VXLAN=m
301CONFIG_NETCONSOLE=m 342CONFIG_NETCONSOLE=m
302CONFIG_NETCONSOLE_DYNAMIC=y 343CONFIG_NETCONSOLE_DYNAMIC=y
@@ -310,6 +351,8 @@ CONFIG_MAC89x0=y
310# CONFIG_NET_VENDOR_MICREL is not set 351# CONFIG_NET_VENDOR_MICREL is not set
311CONFIG_MACSONIC=y 352CONFIG_MACSONIC=y
312CONFIG_MAC8390=y 353CONFIG_MAC8390=y
354# CONFIG_NET_VENDOR_QUALCOMM is not set
355# CONFIG_NET_VENDOR_ROCKER is not set
313# CONFIG_NET_VENDOR_SAMSUNG is not set 356# CONFIG_NET_VENDOR_SAMSUNG is not set
314# CONFIG_NET_VENDOR_SEEQ is not set 357# CONFIG_NET_VENDOR_SEEQ is not set
315# CONFIG_NET_VENDOR_SMSC is not set 358# CONFIG_NET_VENDOR_SMSC is not set
@@ -357,6 +400,7 @@ CONFIG_HID=m
357CONFIG_HIDRAW=y 400CONFIG_HIDRAW=y
358CONFIG_UHID=m 401CONFIG_UHID=m
359# CONFIG_HID_GENERIC is not set 402# CONFIG_HID_GENERIC is not set
403# CONFIG_HID_PLANTRONICS is not set
360# CONFIG_USB_SUPPORT is not set 404# CONFIG_USB_SUPPORT is not set
361CONFIG_RTC_CLASS=y 405CONFIG_RTC_CLASS=y
362CONFIG_RTC_DRV_GENERIC=m 406CONFIG_RTC_DRV_GENERIC=m
@@ -374,6 +418,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
374CONFIG_AUTOFS4_FS=m 418CONFIG_AUTOFS4_FS=m
375CONFIG_FUSE_FS=m 419CONFIG_FUSE_FS=m
376CONFIG_CUSE=m 420CONFIG_CUSE=m
421CONFIG_OVERLAY_FS=m
377CONFIG_ISO9660_FS=y 422CONFIG_ISO9660_FS=y
378CONFIG_JOLIET=y 423CONFIG_JOLIET=y
379CONFIG_ZISOFS=y 424CONFIG_ZISOFS=y
@@ -389,6 +434,7 @@ CONFIG_HFS_FS=m
389CONFIG_HFSPLUS_FS=m 434CONFIG_HFSPLUS_FS=m
390CONFIG_CRAMFS=m 435CONFIG_CRAMFS=m
391CONFIG_SQUASHFS=m 436CONFIG_SQUASHFS=m
437CONFIG_SQUASHFS_LZ4=y
392CONFIG_SQUASHFS_LZO=y 438CONFIG_SQUASHFS_LZO=y
393CONFIG_MINIX_FS=m 439CONFIG_MINIX_FS=m
394CONFIG_OMFS_FS=m 440CONFIG_OMFS_FS=m
@@ -458,11 +504,18 @@ CONFIG_DLM=m
458CONFIG_MAGIC_SYSRQ=y 504CONFIG_MAGIC_SYSRQ=y
459CONFIG_ASYNC_RAID6_TEST=m 505CONFIG_ASYNC_RAID6_TEST=m
460CONFIG_TEST_STRING_HELPERS=m 506CONFIG_TEST_STRING_HELPERS=m
507CONFIG_TEST_KSTRTOX=m
508CONFIG_TEST_LKM=m
509CONFIG_TEST_USER_COPY=m
510CONFIG_TEST_BPF=m
511CONFIG_TEST_FIRMWARE=m
512CONFIG_TEST_UDELAY=m
461CONFIG_EARLY_PRINTK=y 513CONFIG_EARLY_PRINTK=y
462CONFIG_ENCRYPTED_KEYS=m 514CONFIG_ENCRYPTED_KEYS=m
463CONFIG_CRYPTO_MANAGER=y 515CONFIG_CRYPTO_MANAGER=y
464CONFIG_CRYPTO_USER=m 516CONFIG_CRYPTO_USER=m
465CONFIG_CRYPTO_CRYPTD=m 517CONFIG_CRYPTO_CRYPTD=m
518CONFIG_CRYPTO_MCRYPTD=m
466CONFIG_CRYPTO_TEST=m 519CONFIG_CRYPTO_TEST=m
467CONFIG_CRYPTO_CCM=m 520CONFIG_CRYPTO_CCM=m
468CONFIG_CRYPTO_GCM=m 521CONFIG_CRYPTO_GCM=m
@@ -497,13 +550,10 @@ CONFIG_CRYPTO_LZO=m
497CONFIG_CRYPTO_LZ4=m 550CONFIG_CRYPTO_LZ4=m
498CONFIG_CRYPTO_LZ4HC=m 551CONFIG_CRYPTO_LZ4HC=m
499# CONFIG_CRYPTO_ANSI_CPRNG is not set 552# CONFIG_CRYPTO_ANSI_CPRNG is not set
553CONFIG_CRYPTO_DRBG_MENU=m
554CONFIG_CRYPTO_DRBG_HASH=y
555CONFIG_CRYPTO_DRBG_CTR=y
500CONFIG_CRYPTO_USER_API_HASH=m 556CONFIG_CRYPTO_USER_API_HASH=m
501CONFIG_CRYPTO_USER_API_SKCIPHER=m 557CONFIG_CRYPTO_USER_API_SKCIPHER=m
502# CONFIG_CRYPTO_HW is not set 558# CONFIG_CRYPTO_HW is not set
503CONFIG_XZ_DEC_X86=y
504CONFIG_XZ_DEC_POWERPC=y
505CONFIG_XZ_DEC_IA64=y
506CONFIG_XZ_DEC_ARM=y
507CONFIG_XZ_DEC_ARMTHUMB=y
508CONFIG_XZ_DEC_SPARC=y
509CONFIG_XZ_DEC_TEST=m 559CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 59aa42096000..d3cdb5447a2c 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -39,9 +39,11 @@ CONFIG_SUN3X=y
39CONFIG_Q40=y 39CONFIG_Q40=y
40CONFIG_ZORRO=y 40CONFIG_ZORRO=y
41CONFIG_AMIGA_PCMCIA=y 41CONFIG_AMIGA_PCMCIA=y
42CONFIG_ATARI_ROM_ISA=y
42CONFIG_ZORRO_NAMES=y 43CONFIG_ZORRO_NAMES=y
43# CONFIG_COMPACTION is not set 44# CONFIG_COMPACTION is not set
44CONFIG_CLEANCACHE=y 45CONFIG_CLEANCACHE=y
46CONFIG_ZPOOL=m
45# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 47# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
46CONFIG_BINFMT_AOUT=m 48CONFIG_BINFMT_AOUT=m
47CONFIG_BINFMT_MISC=m 49CONFIG_BINFMT_MISC=m
@@ -61,6 +63,8 @@ CONFIG_NET_IPIP=m
61CONFIG_NET_IPGRE_DEMUX=m 63CONFIG_NET_IPGRE_DEMUX=m
62CONFIG_NET_IPGRE=m 64CONFIG_NET_IPGRE=m
63CONFIG_NET_IPVTI=m 65CONFIG_NET_IPVTI=m
66CONFIG_NET_FOU_IP_TUNNELS=y
67CONFIG_GENEVE=m
64CONFIG_INET_AH=m 68CONFIG_INET_AH=m
65CONFIG_INET_ESP=m 69CONFIG_INET_ESP=m
66CONFIG_INET_IPCOMP=m 70CONFIG_INET_IPCOMP=m
@@ -102,6 +106,8 @@ CONFIG_NFT_HASH=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
109CONFIG_NFT_MASQ=m
110CONFIG_NFT_REDIR=m
105CONFIG_NFT_NAT=m 111CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 112CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 113CONFIG_NFT_REJECT=m
@@ -148,6 +154,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
148CONFIG_NETFILTER_XT_MATCH_OSF=m 154CONFIG_NETFILTER_XT_MATCH_OSF=m
149CONFIG_NETFILTER_XT_MATCH_OWNER=m 155CONFIG_NETFILTER_XT_MATCH_OWNER=m
150CONFIG_NETFILTER_XT_MATCH_POLICY=m 156CONFIG_NETFILTER_XT_MATCH_POLICY=m
157CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
151CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 158CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
152CONFIG_NETFILTER_XT_MATCH_QUOTA=m 159CONFIG_NETFILTER_XT_MATCH_QUOTA=m
153CONFIG_NETFILTER_XT_MATCH_RATEEST=m 160CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -169,6 +176,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
169CONFIG_IP_SET_HASH_IPPORT=m 176CONFIG_IP_SET_HASH_IPPORT=m
170CONFIG_IP_SET_HASH_IPPORTIP=m 177CONFIG_IP_SET_HASH_IPPORTIP=m
171CONFIG_IP_SET_HASH_IPPORTNET=m 178CONFIG_IP_SET_HASH_IPPORTNET=m
179CONFIG_IP_SET_HASH_MAC=m
172CONFIG_IP_SET_HASH_NETPORTNET=m 180CONFIG_IP_SET_HASH_NETPORTNET=m
173CONFIG_IP_SET_HASH_NET=m 181CONFIG_IP_SET_HASH_NET=m
174CONFIG_IP_SET_HASH_NETNET=m 182CONFIG_IP_SET_HASH_NETNET=m
@@ -176,9 +184,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
176CONFIG_IP_SET_HASH_NETIFACE=m 184CONFIG_IP_SET_HASH_NETIFACE=m
177CONFIG_IP_SET_LIST_SET=m 185CONFIG_IP_SET_LIST_SET=m
178CONFIG_NF_CONNTRACK_IPV4=m 186CONFIG_NF_CONNTRACK_IPV4=m
187CONFIG_NF_LOG_ARP=m
179CONFIG_NFT_CHAIN_ROUTE_IPV4=m 188CONFIG_NFT_CHAIN_ROUTE_IPV4=m
180CONFIG_NFT_CHAIN_NAT_IPV4=m
181CONFIG_NF_TABLES_ARP=m 189CONFIG_NF_TABLES_ARP=m
190CONFIG_NFT_CHAIN_NAT_IPV4=m
191CONFIG_NFT_MASQ_IPV4=m
192CONFIG_NFT_REDIR_IPV4=m
182CONFIG_IP_NF_IPTABLES=m 193CONFIG_IP_NF_IPTABLES=m
183CONFIG_IP_NF_MATCH_AH=m 194CONFIG_IP_NF_MATCH_AH=m
184CONFIG_IP_NF_MATCH_ECN=m 195CONFIG_IP_NF_MATCH_ECN=m
@@ -187,8 +198,7 @@ CONFIG_IP_NF_MATCH_TTL=m
187CONFIG_IP_NF_FILTER=m 198CONFIG_IP_NF_FILTER=m
188CONFIG_IP_NF_TARGET_REJECT=m 199CONFIG_IP_NF_TARGET_REJECT=m
189CONFIG_IP_NF_TARGET_SYNPROXY=m 200CONFIG_IP_NF_TARGET_SYNPROXY=m
190CONFIG_IP_NF_TARGET_ULOG=m 201CONFIG_IP_NF_NAT=m
191CONFIG_NF_NAT_IPV4=m
192CONFIG_IP_NF_TARGET_MASQUERADE=m 202CONFIG_IP_NF_TARGET_MASQUERADE=m
193CONFIG_IP_NF_TARGET_NETMAP=m 203CONFIG_IP_NF_TARGET_NETMAP=m
194CONFIG_IP_NF_TARGET_REDIRECT=m 204CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -203,6 +213,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
203CONFIG_NF_CONNTRACK_IPV6=m 213CONFIG_NF_CONNTRACK_IPV6=m
204CONFIG_NFT_CHAIN_ROUTE_IPV6=m 214CONFIG_NFT_CHAIN_ROUTE_IPV6=m
205CONFIG_NFT_CHAIN_NAT_IPV6=m 215CONFIG_NFT_CHAIN_NAT_IPV6=m
216CONFIG_NFT_MASQ_IPV6=m
217CONFIG_NFT_REDIR_IPV6=m
206CONFIG_IP6_NF_IPTABLES=m 218CONFIG_IP6_NF_IPTABLES=m
207CONFIG_IP6_NF_MATCH_AH=m 219CONFIG_IP6_NF_MATCH_AH=m
208CONFIG_IP6_NF_MATCH_EUI64=m 220CONFIG_IP6_NF_MATCH_EUI64=m
@@ -219,20 +231,46 @@ CONFIG_IP6_NF_TARGET_REJECT=m
219CONFIG_IP6_NF_TARGET_SYNPROXY=m 231CONFIG_IP6_NF_TARGET_SYNPROXY=m
220CONFIG_IP6_NF_MANGLE=m 232CONFIG_IP6_NF_MANGLE=m
221CONFIG_IP6_NF_RAW=m 233CONFIG_IP6_NF_RAW=m
222CONFIG_NF_NAT_IPV6=m 234CONFIG_IP6_NF_NAT=m
223CONFIG_IP6_NF_TARGET_MASQUERADE=m 235CONFIG_IP6_NF_TARGET_MASQUERADE=m
224CONFIG_IP6_NF_TARGET_NPT=m 236CONFIG_IP6_NF_TARGET_NPT=m
225CONFIG_NF_TABLES_BRIDGE=m 237CONFIG_NF_TABLES_BRIDGE=m
238CONFIG_NFT_BRIDGE_META=m
239CONFIG_NFT_BRIDGE_REJECT=m
240CONFIG_NF_LOG_BRIDGE=m
241CONFIG_BRIDGE_NF_EBTABLES=m
242CONFIG_BRIDGE_EBT_BROUTE=m
243CONFIG_BRIDGE_EBT_T_FILTER=m
244CONFIG_BRIDGE_EBT_T_NAT=m
245CONFIG_BRIDGE_EBT_802_3=m
246CONFIG_BRIDGE_EBT_AMONG=m
247CONFIG_BRIDGE_EBT_ARP=m
248CONFIG_BRIDGE_EBT_IP=m
249CONFIG_BRIDGE_EBT_IP6=m
250CONFIG_BRIDGE_EBT_LIMIT=m
251CONFIG_BRIDGE_EBT_MARK=m
252CONFIG_BRIDGE_EBT_PKTTYPE=m
253CONFIG_BRIDGE_EBT_STP=m
254CONFIG_BRIDGE_EBT_VLAN=m
255CONFIG_BRIDGE_EBT_ARPREPLY=m
256CONFIG_BRIDGE_EBT_DNAT=m
257CONFIG_BRIDGE_EBT_MARK_T=m
258CONFIG_BRIDGE_EBT_REDIRECT=m
259CONFIG_BRIDGE_EBT_SNAT=m
260CONFIG_BRIDGE_EBT_LOG=m
261CONFIG_BRIDGE_EBT_NFLOG=m
226CONFIG_IP_DCCP=m 262CONFIG_IP_DCCP=m
227# CONFIG_IP_DCCP_CCID3 is not set 263# CONFIG_IP_DCCP_CCID3 is not set
228CONFIG_SCTP_COOKIE_HMAC_SHA1=y 264CONFIG_SCTP_COOKIE_HMAC_SHA1=y
229CONFIG_RDS=m 265CONFIG_RDS=m
230CONFIG_RDS_TCP=m 266CONFIG_RDS_TCP=m
231CONFIG_L2TP=m 267CONFIG_L2TP=m
268CONFIG_BRIDGE=m
232CONFIG_ATALK=m 269CONFIG_ATALK=m
233CONFIG_DEV_APPLETALK=m 270CONFIG_DEV_APPLETALK=m
234CONFIG_IPDDP=m 271CONFIG_IPDDP=m
235CONFIG_IPDDP_ENCAP=y 272CONFIG_IPDDP_ENCAP=y
273CONFIG_6LOWPAN=m
236CONFIG_DNS_RESOLVER=y 274CONFIG_DNS_RESOLVER=y
237CONFIG_BATMAN_ADV=m 275CONFIG_BATMAN_ADV=m
238CONFIG_BATMAN_ADV_DAT=y 276CONFIG_BATMAN_ADV_DAT=y
@@ -241,9 +279,10 @@ CONFIG_BATMAN_ADV_MCAST=y
241CONFIG_NETLINK_DIAG=m 279CONFIG_NETLINK_DIAG=m
242CONFIG_NET_MPLS_GSO=m 280CONFIG_NET_MPLS_GSO=m
243# CONFIG_WIRELESS is not set 281# CONFIG_WIRELESS is not set
282# CONFIG_UEVENT_HELPER is not set
244CONFIG_DEVTMPFS=y 283CONFIG_DEVTMPFS=y
284CONFIG_DEVTMPFS_MOUNT=y
245# CONFIG_FIRMWARE_IN_KERNEL is not set 285# CONFIG_FIRMWARE_IN_KERNEL is not set
246# CONFIG_FW_LOADER_USER_HELPER is not set
247CONFIG_CONNECTOR=m 286CONFIG_CONNECTOR=m
248CONFIG_PARPORT=m 287CONFIG_PARPORT=m
249CONFIG_PARPORT_PC=m 288CONFIG_PARPORT_PC=m
@@ -329,6 +368,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
329CONFIG_NET_TEAM_MODE_RANDOM=m 368CONFIG_NET_TEAM_MODE_RANDOM=m
330CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 369CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
331CONFIG_NET_TEAM_MODE_LOADBALANCE=m 370CONFIG_NET_TEAM_MODE_LOADBALANCE=m
371CONFIG_MACVLAN=m
372CONFIG_MACVTAP=m
373CONFIG_IPVLAN=m
332CONFIG_VXLAN=m 374CONFIG_VXLAN=m
333CONFIG_NETCONSOLE=m 375CONFIG_NETCONSOLE=m
334CONFIG_NETCONSOLE_DYNAMIC=y 376CONFIG_NETCONSOLE_DYNAMIC=y
@@ -352,11 +394,14 @@ CONFIG_MVME16x_NET=y
352CONFIG_MACSONIC=y 394CONFIG_MACSONIC=y
353CONFIG_HYDRA=y 395CONFIG_HYDRA=y
354CONFIG_MAC8390=y 396CONFIG_MAC8390=y
355CONFIG_NE2000=m 397CONFIG_NE2000=y
356CONFIG_APNE=y 398CONFIG_APNE=y
357CONFIG_ZORRO8390=y 399CONFIG_ZORRO8390=y
400# CONFIG_NET_VENDOR_QUALCOMM is not set
401# CONFIG_NET_VENDOR_ROCKER is not set
358# CONFIG_NET_VENDOR_SAMSUNG is not set 402# CONFIG_NET_VENDOR_SAMSUNG is not set
359# CONFIG_NET_VENDOR_SEEQ is not set 403# CONFIG_NET_VENDOR_SEEQ is not set
404CONFIG_SMC91X=y
360# CONFIG_NET_VENDOR_STMICRO is not set 405# CONFIG_NET_VENDOR_STMICRO is not set
361# CONFIG_NET_VENDOR_VIA is not set 406# CONFIG_NET_VENDOR_VIA is not set
362# CONFIG_NET_VENDOR_WIZNET is not set 407# CONFIG_NET_VENDOR_WIZNET is not set
@@ -423,6 +468,7 @@ CONFIG_HID=m
423CONFIG_HIDRAW=y 468CONFIG_HIDRAW=y
424CONFIG_UHID=m 469CONFIG_UHID=m
425# CONFIG_HID_GENERIC is not set 470# CONFIG_HID_GENERIC is not set
471# CONFIG_HID_PLANTRONICS is not set
426# CONFIG_USB_SUPPORT is not set 472# CONFIG_USB_SUPPORT is not set
427CONFIG_RTC_CLASS=y 473CONFIG_RTC_CLASS=y
428CONFIG_RTC_DRV_MSM6242=m 474CONFIG_RTC_DRV_MSM6242=m
@@ -435,6 +481,8 @@ CONFIG_NATFEAT=y
435CONFIG_NFBLOCK=y 481CONFIG_NFBLOCK=y
436CONFIG_NFCON=y 482CONFIG_NFCON=y
437CONFIG_NFETH=y 483CONFIG_NFETH=y
484CONFIG_ATARI_ETHERNAT=y
485CONFIG_ATARI_ETHERNEC=y
438CONFIG_ATARI_DSP56K=m 486CONFIG_ATARI_DSP56K=m
439CONFIG_AMIGA_BUILTIN_SERIAL=y 487CONFIG_AMIGA_BUILTIN_SERIAL=y
440CONFIG_SERIAL_CONSOLE=y 488CONFIG_SERIAL_CONSOLE=y
@@ -450,6 +498,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
450CONFIG_AUTOFS4_FS=m 498CONFIG_AUTOFS4_FS=m
451CONFIG_FUSE_FS=m 499CONFIG_FUSE_FS=m
452CONFIG_CUSE=m 500CONFIG_CUSE=m
501CONFIG_OVERLAY_FS=m
453CONFIG_ISO9660_FS=y 502CONFIG_ISO9660_FS=y
454CONFIG_JOLIET=y 503CONFIG_JOLIET=y
455CONFIG_ZISOFS=y 504CONFIG_ZISOFS=y
@@ -465,6 +514,7 @@ CONFIG_HFS_FS=m
465CONFIG_HFSPLUS_FS=m 514CONFIG_HFSPLUS_FS=m
466CONFIG_CRAMFS=m 515CONFIG_CRAMFS=m
467CONFIG_SQUASHFS=m 516CONFIG_SQUASHFS=m
517CONFIG_SQUASHFS_LZ4=y
468CONFIG_SQUASHFS_LZO=y 518CONFIG_SQUASHFS_LZO=y
469CONFIG_MINIX_FS=m 519CONFIG_MINIX_FS=m
470CONFIG_OMFS_FS=m 520CONFIG_OMFS_FS=m
@@ -534,11 +584,18 @@ CONFIG_DLM=m
534CONFIG_MAGIC_SYSRQ=y 584CONFIG_MAGIC_SYSRQ=y
535CONFIG_ASYNC_RAID6_TEST=m 585CONFIG_ASYNC_RAID6_TEST=m
536CONFIG_TEST_STRING_HELPERS=m 586CONFIG_TEST_STRING_HELPERS=m
587CONFIG_TEST_KSTRTOX=m
588CONFIG_TEST_LKM=m
589CONFIG_TEST_USER_COPY=m
590CONFIG_TEST_BPF=m
591CONFIG_TEST_FIRMWARE=m
592CONFIG_TEST_UDELAY=m
537CONFIG_EARLY_PRINTK=y 593CONFIG_EARLY_PRINTK=y
538CONFIG_ENCRYPTED_KEYS=m 594CONFIG_ENCRYPTED_KEYS=m
539CONFIG_CRYPTO_MANAGER=y 595CONFIG_CRYPTO_MANAGER=y
540CONFIG_CRYPTO_USER=m 596CONFIG_CRYPTO_USER=m
541CONFIG_CRYPTO_CRYPTD=m 597CONFIG_CRYPTO_CRYPTD=m
598CONFIG_CRYPTO_MCRYPTD=m
542CONFIG_CRYPTO_TEST=m 599CONFIG_CRYPTO_TEST=m
543CONFIG_CRYPTO_CCM=m 600CONFIG_CRYPTO_CCM=m
544CONFIG_CRYPTO_GCM=m 601CONFIG_CRYPTO_GCM=m
@@ -573,13 +630,10 @@ CONFIG_CRYPTO_LZO=m
573CONFIG_CRYPTO_LZ4=m 630CONFIG_CRYPTO_LZ4=m
574CONFIG_CRYPTO_LZ4HC=m 631CONFIG_CRYPTO_LZ4HC=m
575# CONFIG_CRYPTO_ANSI_CPRNG is not set 632# CONFIG_CRYPTO_ANSI_CPRNG is not set
633CONFIG_CRYPTO_DRBG_MENU=m
634CONFIG_CRYPTO_DRBG_HASH=y
635CONFIG_CRYPTO_DRBG_CTR=y
576CONFIG_CRYPTO_USER_API_HASH=m 636CONFIG_CRYPTO_USER_API_HASH=m
577CONFIG_CRYPTO_USER_API_SKCIPHER=m 637CONFIG_CRYPTO_USER_API_SKCIPHER=m
578# CONFIG_CRYPTO_HW is not set 638# CONFIG_CRYPTO_HW is not set
579CONFIG_XZ_DEC_X86=y
580CONFIG_XZ_DEC_POWERPC=y
581CONFIG_XZ_DEC_IA64=y
582CONFIG_XZ_DEC_ARM=y
583CONFIG_XZ_DEC_ARMTHUMB=y
584CONFIG_XZ_DEC_SPARC=y
585CONFIG_XZ_DEC_TEST=m 639CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 066b24af095e..b4c76640973e 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -31,6 +31,7 @@ CONFIG_VME=y
31CONFIG_MVME147=y 31CONFIG_MVME147=y
32# CONFIG_COMPACTION is not set 32# CONFIG_COMPACTION is not set
33CONFIG_CLEANCACHE=y 33CONFIG_CLEANCACHE=y
34CONFIG_ZPOOL=m
34# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
35CONFIG_BINFMT_AOUT=m 36CONFIG_BINFMT_AOUT=m
36CONFIG_BINFMT_MISC=m 37CONFIG_BINFMT_MISC=m
@@ -50,6 +51,8 @@ CONFIG_NET_IPIP=m
50CONFIG_NET_IPGRE_DEMUX=m 51CONFIG_NET_IPGRE_DEMUX=m
51CONFIG_NET_IPGRE=m 52CONFIG_NET_IPGRE=m
52CONFIG_NET_IPVTI=m 53CONFIG_NET_IPVTI=m
54CONFIG_NET_FOU_IP_TUNNELS=y
55CONFIG_GENEVE=m
53CONFIG_INET_AH=m 56CONFIG_INET_AH=m
54CONFIG_INET_ESP=m 57CONFIG_INET_ESP=m
55CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
@@ -91,6 +94,8 @@ CONFIG_NFT_HASH=m
91CONFIG_NFT_COUNTER=m 94CONFIG_NFT_COUNTER=m
92CONFIG_NFT_LOG=m 95CONFIG_NFT_LOG=m
93CONFIG_NFT_LIMIT=m 96CONFIG_NFT_LIMIT=m
97CONFIG_NFT_MASQ=m
98CONFIG_NFT_REDIR=m
94CONFIG_NFT_NAT=m 99CONFIG_NFT_NAT=m
95CONFIG_NFT_QUEUE=m 100CONFIG_NFT_QUEUE=m
96CONFIG_NFT_REJECT=m 101CONFIG_NFT_REJECT=m
@@ -137,6 +142,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
137CONFIG_NETFILTER_XT_MATCH_OSF=m 142CONFIG_NETFILTER_XT_MATCH_OSF=m
138CONFIG_NETFILTER_XT_MATCH_OWNER=m 143CONFIG_NETFILTER_XT_MATCH_OWNER=m
139CONFIG_NETFILTER_XT_MATCH_POLICY=m 144CONFIG_NETFILTER_XT_MATCH_POLICY=m
145CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
140CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 146CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
141CONFIG_NETFILTER_XT_MATCH_QUOTA=m 147CONFIG_NETFILTER_XT_MATCH_QUOTA=m
142CONFIG_NETFILTER_XT_MATCH_RATEEST=m 148CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -158,6 +164,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
158CONFIG_IP_SET_HASH_IPPORT=m 164CONFIG_IP_SET_HASH_IPPORT=m
159CONFIG_IP_SET_HASH_IPPORTIP=m 165CONFIG_IP_SET_HASH_IPPORTIP=m
160CONFIG_IP_SET_HASH_IPPORTNET=m 166CONFIG_IP_SET_HASH_IPPORTNET=m
167CONFIG_IP_SET_HASH_MAC=m
161CONFIG_IP_SET_HASH_NETPORTNET=m 168CONFIG_IP_SET_HASH_NETPORTNET=m
162CONFIG_IP_SET_HASH_NET=m 169CONFIG_IP_SET_HASH_NET=m
163CONFIG_IP_SET_HASH_NETNET=m 170CONFIG_IP_SET_HASH_NETNET=m
@@ -165,9 +172,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
165CONFIG_IP_SET_HASH_NETIFACE=m 172CONFIG_IP_SET_HASH_NETIFACE=m
166CONFIG_IP_SET_LIST_SET=m 173CONFIG_IP_SET_LIST_SET=m
167CONFIG_NF_CONNTRACK_IPV4=m 174CONFIG_NF_CONNTRACK_IPV4=m
175CONFIG_NF_LOG_ARP=m
168CONFIG_NFT_CHAIN_ROUTE_IPV4=m 176CONFIG_NFT_CHAIN_ROUTE_IPV4=m
169CONFIG_NFT_CHAIN_NAT_IPV4=m
170CONFIG_NF_TABLES_ARP=m 177CONFIG_NF_TABLES_ARP=m
178CONFIG_NFT_CHAIN_NAT_IPV4=m
179CONFIG_NFT_MASQ_IPV4=m
180CONFIG_NFT_REDIR_IPV4=m
171CONFIG_IP_NF_IPTABLES=m 181CONFIG_IP_NF_IPTABLES=m
172CONFIG_IP_NF_MATCH_AH=m 182CONFIG_IP_NF_MATCH_AH=m
173CONFIG_IP_NF_MATCH_ECN=m 183CONFIG_IP_NF_MATCH_ECN=m
@@ -176,8 +186,7 @@ CONFIG_IP_NF_MATCH_TTL=m
176CONFIG_IP_NF_FILTER=m 186CONFIG_IP_NF_FILTER=m
177CONFIG_IP_NF_TARGET_REJECT=m 187CONFIG_IP_NF_TARGET_REJECT=m
178CONFIG_IP_NF_TARGET_SYNPROXY=m 188CONFIG_IP_NF_TARGET_SYNPROXY=m
179CONFIG_IP_NF_TARGET_ULOG=m 189CONFIG_IP_NF_NAT=m
180CONFIG_NF_NAT_IPV4=m
181CONFIG_IP_NF_TARGET_MASQUERADE=m 190CONFIG_IP_NF_TARGET_MASQUERADE=m
182CONFIG_IP_NF_TARGET_NETMAP=m 191CONFIG_IP_NF_TARGET_NETMAP=m
183CONFIG_IP_NF_TARGET_REDIRECT=m 192CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -192,6 +201,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
192CONFIG_NF_CONNTRACK_IPV6=m 201CONFIG_NF_CONNTRACK_IPV6=m
193CONFIG_NFT_CHAIN_ROUTE_IPV6=m 202CONFIG_NFT_CHAIN_ROUTE_IPV6=m
194CONFIG_NFT_CHAIN_NAT_IPV6=m 203CONFIG_NFT_CHAIN_NAT_IPV6=m
204CONFIG_NFT_MASQ_IPV6=m
205CONFIG_NFT_REDIR_IPV6=m
195CONFIG_IP6_NF_IPTABLES=m 206CONFIG_IP6_NF_IPTABLES=m
196CONFIG_IP6_NF_MATCH_AH=m 207CONFIG_IP6_NF_MATCH_AH=m
197CONFIG_IP6_NF_MATCH_EUI64=m 208CONFIG_IP6_NF_MATCH_EUI64=m
@@ -208,17 +219,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
208CONFIG_IP6_NF_TARGET_SYNPROXY=m 219CONFIG_IP6_NF_TARGET_SYNPROXY=m
209CONFIG_IP6_NF_MANGLE=m 220CONFIG_IP6_NF_MANGLE=m
210CONFIG_IP6_NF_RAW=m 221CONFIG_IP6_NF_RAW=m
211CONFIG_NF_NAT_IPV6=m 222CONFIG_IP6_NF_NAT=m
212CONFIG_IP6_NF_TARGET_MASQUERADE=m 223CONFIG_IP6_NF_TARGET_MASQUERADE=m
213CONFIG_IP6_NF_TARGET_NPT=m 224CONFIG_IP6_NF_TARGET_NPT=m
214CONFIG_NF_TABLES_BRIDGE=m 225CONFIG_NF_TABLES_BRIDGE=m
226CONFIG_NFT_BRIDGE_META=m
227CONFIG_NFT_BRIDGE_REJECT=m
228CONFIG_NF_LOG_BRIDGE=m
229CONFIG_BRIDGE_NF_EBTABLES=m
230CONFIG_BRIDGE_EBT_BROUTE=m
231CONFIG_BRIDGE_EBT_T_FILTER=m
232CONFIG_BRIDGE_EBT_T_NAT=m
233CONFIG_BRIDGE_EBT_802_3=m
234CONFIG_BRIDGE_EBT_AMONG=m
235CONFIG_BRIDGE_EBT_ARP=m
236CONFIG_BRIDGE_EBT_IP=m
237CONFIG_BRIDGE_EBT_IP6=m
238CONFIG_BRIDGE_EBT_LIMIT=m
239CONFIG_BRIDGE_EBT_MARK=m
240CONFIG_BRIDGE_EBT_PKTTYPE=m
241CONFIG_BRIDGE_EBT_STP=m
242CONFIG_BRIDGE_EBT_VLAN=m
243CONFIG_BRIDGE_EBT_ARPREPLY=m
244CONFIG_BRIDGE_EBT_DNAT=m
245CONFIG_BRIDGE_EBT_MARK_T=m
246CONFIG_BRIDGE_EBT_REDIRECT=m
247CONFIG_BRIDGE_EBT_SNAT=m
248CONFIG_BRIDGE_EBT_LOG=m
249CONFIG_BRIDGE_EBT_NFLOG=m
215CONFIG_IP_DCCP=m 250CONFIG_IP_DCCP=m
216# CONFIG_IP_DCCP_CCID3 is not set 251# CONFIG_IP_DCCP_CCID3 is not set
217CONFIG_SCTP_COOKIE_HMAC_SHA1=y 252CONFIG_SCTP_COOKIE_HMAC_SHA1=y
218CONFIG_RDS=m 253CONFIG_RDS=m
219CONFIG_RDS_TCP=m 254CONFIG_RDS_TCP=m
220CONFIG_L2TP=m 255CONFIG_L2TP=m
256CONFIG_BRIDGE=m
221CONFIG_ATALK=m 257CONFIG_ATALK=m
258CONFIG_6LOWPAN=m
222CONFIG_DNS_RESOLVER=y 259CONFIG_DNS_RESOLVER=y
223CONFIG_BATMAN_ADV=m 260CONFIG_BATMAN_ADV=m
224CONFIG_BATMAN_ADV_DAT=y 261CONFIG_BATMAN_ADV_DAT=y
@@ -227,9 +264,10 @@ CONFIG_BATMAN_ADV_MCAST=y
227CONFIG_NETLINK_DIAG=m 264CONFIG_NETLINK_DIAG=m
228CONFIG_NET_MPLS_GSO=m 265CONFIG_NET_MPLS_GSO=m
229# CONFIG_WIRELESS is not set 266# CONFIG_WIRELESS is not set
267# CONFIG_UEVENT_HELPER is not set
230CONFIG_DEVTMPFS=y 268CONFIG_DEVTMPFS=y
269CONFIG_DEVTMPFS_MOUNT=y
231# CONFIG_FIRMWARE_IN_KERNEL is not set 270# CONFIG_FIRMWARE_IN_KERNEL is not set
232# CONFIG_FW_LOADER_USER_HELPER is not set
233CONFIG_CONNECTOR=m 271CONFIG_CONNECTOR=m
234CONFIG_BLK_DEV_LOOP=y 272CONFIG_BLK_DEV_LOOP=y
235CONFIG_BLK_DEV_CRYPTOLOOP=m 273CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -279,6 +317,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
279CONFIG_NET_TEAM_MODE_RANDOM=m 317CONFIG_NET_TEAM_MODE_RANDOM=m
280CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 318CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
281CONFIG_NET_TEAM_MODE_LOADBALANCE=m 319CONFIG_NET_TEAM_MODE_LOADBALANCE=m
320CONFIG_MACVLAN=m
321CONFIG_MACVTAP=m
322CONFIG_IPVLAN=m
282CONFIG_VXLAN=m 323CONFIG_VXLAN=m
283CONFIG_NETCONSOLE=m 324CONFIG_NETCONSOLE=m
284CONFIG_NETCONSOLE_DYNAMIC=y 325CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_MVME147_NET=y
290# CONFIG_NET_VENDOR_MARVELL is not set 331# CONFIG_NET_VENDOR_MARVELL is not set
291# CONFIG_NET_VENDOR_MICREL is not set 332# CONFIG_NET_VENDOR_MICREL is not set
292# CONFIG_NET_VENDOR_NATSEMI is not set 333# CONFIG_NET_VENDOR_NATSEMI is not set
334# CONFIG_NET_VENDOR_QUALCOMM is not set
335# CONFIG_NET_VENDOR_ROCKER is not set
293# CONFIG_NET_VENDOR_SAMSUNG is not set 336# CONFIG_NET_VENDOR_SAMSUNG is not set
294# CONFIG_NET_VENDOR_SEEQ is not set 337# CONFIG_NET_VENDOR_SEEQ is not set
295# CONFIG_NET_VENDOR_STMICRO is not set 338# CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
326CONFIG_HIDRAW=y 369CONFIG_HIDRAW=y
327CONFIG_UHID=m 370CONFIG_UHID=m
328# CONFIG_HID_GENERIC is not set 371# CONFIG_HID_GENERIC is not set
372# CONFIG_HID_PLANTRONICS is not set
329# CONFIG_USB_SUPPORT is not set 373# CONFIG_USB_SUPPORT is not set
330CONFIG_RTC_CLASS=y 374CONFIG_RTC_CLASS=y
331CONFIG_RTC_DRV_GENERIC=m 375CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
343CONFIG_AUTOFS4_FS=m 387CONFIG_AUTOFS4_FS=m
344CONFIG_FUSE_FS=m 388CONFIG_FUSE_FS=m
345CONFIG_CUSE=m 389CONFIG_CUSE=m
390CONFIG_OVERLAY_FS=m
346CONFIG_ISO9660_FS=y 391CONFIG_ISO9660_FS=y
347CONFIG_JOLIET=y 392CONFIG_JOLIET=y
348CONFIG_ZISOFS=y 393CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
358CONFIG_HFSPLUS_FS=m 403CONFIG_HFSPLUS_FS=m
359CONFIG_CRAMFS=m 404CONFIG_CRAMFS=m
360CONFIG_SQUASHFS=m 405CONFIG_SQUASHFS=m
406CONFIG_SQUASHFS_LZ4=y
361CONFIG_SQUASHFS_LZO=y 407CONFIG_SQUASHFS_LZO=y
362CONFIG_MINIX_FS=m 408CONFIG_MINIX_FS=m
363CONFIG_OMFS_FS=m 409CONFIG_OMFS_FS=m
@@ -427,10 +473,18 @@ CONFIG_DLM=m
427CONFIG_MAGIC_SYSRQ=y 473CONFIG_MAGIC_SYSRQ=y
428CONFIG_ASYNC_RAID6_TEST=m 474CONFIG_ASYNC_RAID6_TEST=m
429CONFIG_TEST_STRING_HELPERS=m 475CONFIG_TEST_STRING_HELPERS=m
476CONFIG_TEST_KSTRTOX=m
477CONFIG_TEST_LKM=m
478CONFIG_TEST_USER_COPY=m
479CONFIG_TEST_BPF=m
480CONFIG_TEST_FIRMWARE=m
481CONFIG_TEST_UDELAY=m
482CONFIG_EARLY_PRINTK=y
430CONFIG_ENCRYPTED_KEYS=m 483CONFIG_ENCRYPTED_KEYS=m
431CONFIG_CRYPTO_MANAGER=y 484CONFIG_CRYPTO_MANAGER=y
432CONFIG_CRYPTO_USER=m 485CONFIG_CRYPTO_USER=m
433CONFIG_CRYPTO_CRYPTD=m 486CONFIG_CRYPTO_CRYPTD=m
487CONFIG_CRYPTO_MCRYPTD=m
434CONFIG_CRYPTO_TEST=m 488CONFIG_CRYPTO_TEST=m
435CONFIG_CRYPTO_CCM=m 489CONFIG_CRYPTO_CCM=m
436CONFIG_CRYPTO_GCM=m 490CONFIG_CRYPTO_GCM=m
@@ -465,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
465CONFIG_CRYPTO_LZ4=m 519CONFIG_CRYPTO_LZ4=m
466CONFIG_CRYPTO_LZ4HC=m 520CONFIG_CRYPTO_LZ4HC=m
467# CONFIG_CRYPTO_ANSI_CPRNG is not set 521# CONFIG_CRYPTO_ANSI_CPRNG is not set
522CONFIG_CRYPTO_DRBG_MENU=m
523CONFIG_CRYPTO_DRBG_HASH=y
524CONFIG_CRYPTO_DRBG_CTR=y
468CONFIG_CRYPTO_USER_API_HASH=m 525CONFIG_CRYPTO_USER_API_HASH=m
469CONFIG_CRYPTO_USER_API_SKCIPHER=m 526CONFIG_CRYPTO_USER_API_SKCIPHER=m
470# CONFIG_CRYPTO_HW is not set 527# CONFIG_CRYPTO_HW is not set
471CONFIG_XZ_DEC_X86=y
472CONFIG_XZ_DEC_POWERPC=y
473CONFIG_XZ_DEC_IA64=y
474CONFIG_XZ_DEC_ARM=y
475CONFIG_XZ_DEC_ARMTHUMB=y
476CONFIG_XZ_DEC_SPARC=y
477CONFIG_XZ_DEC_TEST=m 528CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 9326ea664a5b..0d4a26f9b58c 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -32,6 +32,7 @@ CONFIG_VME=y
32CONFIG_MVME16x=y 32CONFIG_MVME16x=y
33# CONFIG_COMPACTION is not set 33# CONFIG_COMPACTION is not set
34CONFIG_CLEANCACHE=y 34CONFIG_CLEANCACHE=y
35CONFIG_ZPOOL=m
35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
36CONFIG_BINFMT_AOUT=m 37CONFIG_BINFMT_AOUT=m
37CONFIG_BINFMT_MISC=m 38CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
51CONFIG_NET_IPGRE_DEMUX=m 52CONFIG_NET_IPGRE_DEMUX=m
52CONFIG_NET_IPGRE=m 53CONFIG_NET_IPGRE=m
53CONFIG_NET_IPVTI=m 54CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_GENEVE=m
54CONFIG_INET_AH=m 57CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
56CONFIG_INET_IPCOMP=m 59CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
92CONFIG_NFT_COUNTER=m 95CONFIG_NFT_COUNTER=m
93CONFIG_NFT_LOG=m 96CONFIG_NFT_LOG=m
94CONFIG_NFT_LIMIT=m 97CONFIG_NFT_LIMIT=m
98CONFIG_NFT_MASQ=m
99CONFIG_NFT_REDIR=m
95CONFIG_NFT_NAT=m 100CONFIG_NFT_NAT=m
96CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
97CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
138CONFIG_NETFILTER_XT_MATCH_OSF=m 143CONFIG_NETFILTER_XT_MATCH_OSF=m
139CONFIG_NETFILTER_XT_MATCH_OWNER=m 144CONFIG_NETFILTER_XT_MATCH_OWNER=m
140CONFIG_NETFILTER_XT_MATCH_POLICY=m 145CONFIG_NETFILTER_XT_MATCH_POLICY=m
146CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
141CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 147CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
142CONFIG_NETFILTER_XT_MATCH_QUOTA=m 148CONFIG_NETFILTER_XT_MATCH_QUOTA=m
143CONFIG_NETFILTER_XT_MATCH_RATEEST=m 149CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
159CONFIG_IP_SET_HASH_IPPORT=m 165CONFIG_IP_SET_HASH_IPPORT=m
160CONFIG_IP_SET_HASH_IPPORTIP=m 166CONFIG_IP_SET_HASH_IPPORTIP=m
161CONFIG_IP_SET_HASH_IPPORTNET=m 167CONFIG_IP_SET_HASH_IPPORTNET=m
168CONFIG_IP_SET_HASH_MAC=m
162CONFIG_IP_SET_HASH_NETPORTNET=m 169CONFIG_IP_SET_HASH_NETPORTNET=m
163CONFIG_IP_SET_HASH_NET=m 170CONFIG_IP_SET_HASH_NET=m
164CONFIG_IP_SET_HASH_NETNET=m 171CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
166CONFIG_IP_SET_HASH_NETIFACE=m 173CONFIG_IP_SET_HASH_NETIFACE=m
167CONFIG_IP_SET_LIST_SET=m 174CONFIG_IP_SET_LIST_SET=m
168CONFIG_NF_CONNTRACK_IPV4=m 175CONFIG_NF_CONNTRACK_IPV4=m
176CONFIG_NF_LOG_ARP=m
169CONFIG_NFT_CHAIN_ROUTE_IPV4=m 177CONFIG_NFT_CHAIN_ROUTE_IPV4=m
170CONFIG_NFT_CHAIN_NAT_IPV4=m
171CONFIG_NF_TABLES_ARP=m 178CONFIG_NF_TABLES_ARP=m
179CONFIG_NFT_CHAIN_NAT_IPV4=m
180CONFIG_NFT_MASQ_IPV4=m
181CONFIG_NFT_REDIR_IPV4=m
172CONFIG_IP_NF_IPTABLES=m 182CONFIG_IP_NF_IPTABLES=m
173CONFIG_IP_NF_MATCH_AH=m 183CONFIG_IP_NF_MATCH_AH=m
174CONFIG_IP_NF_MATCH_ECN=m 184CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
177CONFIG_IP_NF_FILTER=m 187CONFIG_IP_NF_FILTER=m
178CONFIG_IP_NF_TARGET_REJECT=m 188CONFIG_IP_NF_TARGET_REJECT=m
179CONFIG_IP_NF_TARGET_SYNPROXY=m 189CONFIG_IP_NF_TARGET_SYNPROXY=m
180CONFIG_IP_NF_TARGET_ULOG=m 190CONFIG_IP_NF_NAT=m
181CONFIG_NF_NAT_IPV4=m
182CONFIG_IP_NF_TARGET_MASQUERADE=m 191CONFIG_IP_NF_TARGET_MASQUERADE=m
183CONFIG_IP_NF_TARGET_NETMAP=m 192CONFIG_IP_NF_TARGET_NETMAP=m
184CONFIG_IP_NF_TARGET_REDIRECT=m 193CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
193CONFIG_NF_CONNTRACK_IPV6=m 202CONFIG_NF_CONNTRACK_IPV6=m
194CONFIG_NFT_CHAIN_ROUTE_IPV6=m 203CONFIG_NFT_CHAIN_ROUTE_IPV6=m
195CONFIG_NFT_CHAIN_NAT_IPV6=m 204CONFIG_NFT_CHAIN_NAT_IPV6=m
205CONFIG_NFT_MASQ_IPV6=m
206CONFIG_NFT_REDIR_IPV6=m
196CONFIG_IP6_NF_IPTABLES=m 207CONFIG_IP6_NF_IPTABLES=m
197CONFIG_IP6_NF_MATCH_AH=m 208CONFIG_IP6_NF_MATCH_AH=m
198CONFIG_IP6_NF_MATCH_EUI64=m 209CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
209CONFIG_IP6_NF_TARGET_SYNPROXY=m 220CONFIG_IP6_NF_TARGET_SYNPROXY=m
210CONFIG_IP6_NF_MANGLE=m 221CONFIG_IP6_NF_MANGLE=m
211CONFIG_IP6_NF_RAW=m 222CONFIG_IP6_NF_RAW=m
212CONFIG_NF_NAT_IPV6=m 223CONFIG_IP6_NF_NAT=m
213CONFIG_IP6_NF_TARGET_MASQUERADE=m 224CONFIG_IP6_NF_TARGET_MASQUERADE=m
214CONFIG_IP6_NF_TARGET_NPT=m 225CONFIG_IP6_NF_TARGET_NPT=m
215CONFIG_NF_TABLES_BRIDGE=m 226CONFIG_NF_TABLES_BRIDGE=m
227CONFIG_NFT_BRIDGE_META=m
228CONFIG_NFT_BRIDGE_REJECT=m
229CONFIG_NF_LOG_BRIDGE=m
230CONFIG_BRIDGE_NF_EBTABLES=m
231CONFIG_BRIDGE_EBT_BROUTE=m
232CONFIG_BRIDGE_EBT_T_FILTER=m
233CONFIG_BRIDGE_EBT_T_NAT=m
234CONFIG_BRIDGE_EBT_802_3=m
235CONFIG_BRIDGE_EBT_AMONG=m
236CONFIG_BRIDGE_EBT_ARP=m
237CONFIG_BRIDGE_EBT_IP=m
238CONFIG_BRIDGE_EBT_IP6=m
239CONFIG_BRIDGE_EBT_LIMIT=m
240CONFIG_BRIDGE_EBT_MARK=m
241CONFIG_BRIDGE_EBT_PKTTYPE=m
242CONFIG_BRIDGE_EBT_STP=m
243CONFIG_BRIDGE_EBT_VLAN=m
244CONFIG_BRIDGE_EBT_ARPREPLY=m
245CONFIG_BRIDGE_EBT_DNAT=m
246CONFIG_BRIDGE_EBT_MARK_T=m
247CONFIG_BRIDGE_EBT_REDIRECT=m
248CONFIG_BRIDGE_EBT_SNAT=m
249CONFIG_BRIDGE_EBT_LOG=m
250CONFIG_BRIDGE_EBT_NFLOG=m
216CONFIG_IP_DCCP=m 251CONFIG_IP_DCCP=m
217# CONFIG_IP_DCCP_CCID3 is not set 252# CONFIG_IP_DCCP_CCID3 is not set
218CONFIG_SCTP_COOKIE_HMAC_SHA1=y 253CONFIG_SCTP_COOKIE_HMAC_SHA1=y
219CONFIG_RDS=m 254CONFIG_RDS=m
220CONFIG_RDS_TCP=m 255CONFIG_RDS_TCP=m
221CONFIG_L2TP=m 256CONFIG_L2TP=m
257CONFIG_BRIDGE=m
222CONFIG_ATALK=m 258CONFIG_ATALK=m
259CONFIG_6LOWPAN=m
223CONFIG_DNS_RESOLVER=y 260CONFIG_DNS_RESOLVER=y
224CONFIG_BATMAN_ADV=m 261CONFIG_BATMAN_ADV=m
225CONFIG_BATMAN_ADV_DAT=y 262CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
228CONFIG_NETLINK_DIAG=m 265CONFIG_NETLINK_DIAG=m
229CONFIG_NET_MPLS_GSO=m 266CONFIG_NET_MPLS_GSO=m
230# CONFIG_WIRELESS is not set 267# CONFIG_WIRELESS is not set
268# CONFIG_UEVENT_HELPER is not set
231CONFIG_DEVTMPFS=y 269CONFIG_DEVTMPFS=y
270CONFIG_DEVTMPFS_MOUNT=y
232# CONFIG_FIRMWARE_IN_KERNEL is not set 271# CONFIG_FIRMWARE_IN_KERNEL is not set
233# CONFIG_FW_LOADER_USER_HELPER is not set
234CONFIG_CONNECTOR=m 272CONFIG_CONNECTOR=m
235CONFIG_BLK_DEV_LOOP=y 273CONFIG_BLK_DEV_LOOP=y
236CONFIG_BLK_DEV_CRYPTOLOOP=m 274CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -280,6 +318,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
280CONFIG_NET_TEAM_MODE_RANDOM=m 318CONFIG_NET_TEAM_MODE_RANDOM=m
281CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 319CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
282CONFIG_NET_TEAM_MODE_LOADBALANCE=m 320CONFIG_NET_TEAM_MODE_LOADBALANCE=m
321CONFIG_MACVLAN=m
322CONFIG_MACVTAP=m
323CONFIG_IPVLAN=m
283CONFIG_VXLAN=m 324CONFIG_VXLAN=m
284CONFIG_NETCONSOLE=m 325CONFIG_NETCONSOLE=m
285CONFIG_NETCONSOLE_DYNAMIC=y 326CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_MVME16x_NET=y
290# CONFIG_NET_VENDOR_MARVELL is not set 331# CONFIG_NET_VENDOR_MARVELL is not set
291# CONFIG_NET_VENDOR_MICREL is not set 332# CONFIG_NET_VENDOR_MICREL is not set
292# CONFIG_NET_VENDOR_NATSEMI is not set 333# CONFIG_NET_VENDOR_NATSEMI is not set
334# CONFIG_NET_VENDOR_QUALCOMM is not set
335# CONFIG_NET_VENDOR_ROCKER is not set
293# CONFIG_NET_VENDOR_SAMSUNG is not set 336# CONFIG_NET_VENDOR_SAMSUNG is not set
294# CONFIG_NET_VENDOR_SEEQ is not set 337# CONFIG_NET_VENDOR_SEEQ is not set
295# CONFIG_NET_VENDOR_STMICRO is not set 338# CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
326CONFIG_HIDRAW=y 369CONFIG_HIDRAW=y
327CONFIG_UHID=m 370CONFIG_UHID=m
328# CONFIG_HID_GENERIC is not set 371# CONFIG_HID_GENERIC is not set
372# CONFIG_HID_PLANTRONICS is not set
329# CONFIG_USB_SUPPORT is not set 373# CONFIG_USB_SUPPORT is not set
330CONFIG_RTC_CLASS=y 374CONFIG_RTC_CLASS=y
331CONFIG_RTC_DRV_GENERIC=m 375CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
343CONFIG_AUTOFS4_FS=m 387CONFIG_AUTOFS4_FS=m
344CONFIG_FUSE_FS=m 388CONFIG_FUSE_FS=m
345CONFIG_CUSE=m 389CONFIG_CUSE=m
390CONFIG_OVERLAY_FS=m
346CONFIG_ISO9660_FS=y 391CONFIG_ISO9660_FS=y
347CONFIG_JOLIET=y 392CONFIG_JOLIET=y
348CONFIG_ZISOFS=y 393CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
358CONFIG_HFSPLUS_FS=m 403CONFIG_HFSPLUS_FS=m
359CONFIG_CRAMFS=m 404CONFIG_CRAMFS=m
360CONFIG_SQUASHFS=m 405CONFIG_SQUASHFS=m
406CONFIG_SQUASHFS_LZ4=y
361CONFIG_SQUASHFS_LZO=y 407CONFIG_SQUASHFS_LZO=y
362CONFIG_MINIX_FS=m 408CONFIG_MINIX_FS=m
363CONFIG_OMFS_FS=m 409CONFIG_OMFS_FS=m
@@ -427,11 +473,18 @@ CONFIG_DLM=m
427CONFIG_MAGIC_SYSRQ=y 473CONFIG_MAGIC_SYSRQ=y
428CONFIG_ASYNC_RAID6_TEST=m 474CONFIG_ASYNC_RAID6_TEST=m
429CONFIG_TEST_STRING_HELPERS=m 475CONFIG_TEST_STRING_HELPERS=m
476CONFIG_TEST_KSTRTOX=m
477CONFIG_TEST_LKM=m
478CONFIG_TEST_USER_COPY=m
479CONFIG_TEST_BPF=m
480CONFIG_TEST_FIRMWARE=m
481CONFIG_TEST_UDELAY=m
430CONFIG_EARLY_PRINTK=y 482CONFIG_EARLY_PRINTK=y
431CONFIG_ENCRYPTED_KEYS=m 483CONFIG_ENCRYPTED_KEYS=m
432CONFIG_CRYPTO_MANAGER=y 484CONFIG_CRYPTO_MANAGER=y
433CONFIG_CRYPTO_USER=m 485CONFIG_CRYPTO_USER=m
434CONFIG_CRYPTO_CRYPTD=m 486CONFIG_CRYPTO_CRYPTD=m
487CONFIG_CRYPTO_MCRYPTD=m
435CONFIG_CRYPTO_TEST=m 488CONFIG_CRYPTO_TEST=m
436CONFIG_CRYPTO_CCM=m 489CONFIG_CRYPTO_CCM=m
437CONFIG_CRYPTO_GCM=m 490CONFIG_CRYPTO_GCM=m
@@ -466,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
466CONFIG_CRYPTO_LZ4=m 519CONFIG_CRYPTO_LZ4=m
467CONFIG_CRYPTO_LZ4HC=m 520CONFIG_CRYPTO_LZ4HC=m
468# CONFIG_CRYPTO_ANSI_CPRNG is not set 521# CONFIG_CRYPTO_ANSI_CPRNG is not set
522CONFIG_CRYPTO_DRBG_MENU=m
523CONFIG_CRYPTO_DRBG_HASH=y
524CONFIG_CRYPTO_DRBG_CTR=y
469CONFIG_CRYPTO_USER_API_HASH=m 525CONFIG_CRYPTO_USER_API_HASH=m
470CONFIG_CRYPTO_USER_API_SKCIPHER=m 526CONFIG_CRYPTO_USER_API_SKCIPHER=m
471# CONFIG_CRYPTO_HW is not set 527# CONFIG_CRYPTO_HW is not set
472CONFIG_XZ_DEC_X86=y
473CONFIG_XZ_DEC_POWERPC=y
474CONFIG_XZ_DEC_IA64=y
475CONFIG_XZ_DEC_ARM=y
476CONFIG_XZ_DEC_ARMTHUMB=y
477CONFIG_XZ_DEC_SPARC=y
478CONFIG_XZ_DEC_TEST=m 528CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index d7d1101e31b5..5d581c503fa3 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -32,6 +32,7 @@ CONFIG_M68060=y
32CONFIG_Q40=y 32CONFIG_Q40=y
33# CONFIG_COMPACTION is not set 33# CONFIG_COMPACTION is not set
34CONFIG_CLEANCACHE=y 34CONFIG_CLEANCACHE=y
35CONFIG_ZPOOL=m
35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 36# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
36CONFIG_BINFMT_AOUT=m 37CONFIG_BINFMT_AOUT=m
37CONFIG_BINFMT_MISC=m 38CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
51CONFIG_NET_IPGRE_DEMUX=m 52CONFIG_NET_IPGRE_DEMUX=m
52CONFIG_NET_IPGRE=m 53CONFIG_NET_IPGRE=m
53CONFIG_NET_IPVTI=m 54CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_GENEVE=m
54CONFIG_INET_AH=m 57CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
56CONFIG_INET_IPCOMP=m 59CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
92CONFIG_NFT_COUNTER=m 95CONFIG_NFT_COUNTER=m
93CONFIG_NFT_LOG=m 96CONFIG_NFT_LOG=m
94CONFIG_NFT_LIMIT=m 97CONFIG_NFT_LIMIT=m
98CONFIG_NFT_MASQ=m
99CONFIG_NFT_REDIR=m
95CONFIG_NFT_NAT=m 100CONFIG_NFT_NAT=m
96CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
97CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
138CONFIG_NETFILTER_XT_MATCH_OSF=m 143CONFIG_NETFILTER_XT_MATCH_OSF=m
139CONFIG_NETFILTER_XT_MATCH_OWNER=m 144CONFIG_NETFILTER_XT_MATCH_OWNER=m
140CONFIG_NETFILTER_XT_MATCH_POLICY=m 145CONFIG_NETFILTER_XT_MATCH_POLICY=m
146CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
141CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 147CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
142CONFIG_NETFILTER_XT_MATCH_QUOTA=m 148CONFIG_NETFILTER_XT_MATCH_QUOTA=m
143CONFIG_NETFILTER_XT_MATCH_RATEEST=m 149CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
159CONFIG_IP_SET_HASH_IPPORT=m 165CONFIG_IP_SET_HASH_IPPORT=m
160CONFIG_IP_SET_HASH_IPPORTIP=m 166CONFIG_IP_SET_HASH_IPPORTIP=m
161CONFIG_IP_SET_HASH_IPPORTNET=m 167CONFIG_IP_SET_HASH_IPPORTNET=m
168CONFIG_IP_SET_HASH_MAC=m
162CONFIG_IP_SET_HASH_NETPORTNET=m 169CONFIG_IP_SET_HASH_NETPORTNET=m
163CONFIG_IP_SET_HASH_NET=m 170CONFIG_IP_SET_HASH_NET=m
164CONFIG_IP_SET_HASH_NETNET=m 171CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
166CONFIG_IP_SET_HASH_NETIFACE=m 173CONFIG_IP_SET_HASH_NETIFACE=m
167CONFIG_IP_SET_LIST_SET=m 174CONFIG_IP_SET_LIST_SET=m
168CONFIG_NF_CONNTRACK_IPV4=m 175CONFIG_NF_CONNTRACK_IPV4=m
176CONFIG_NF_LOG_ARP=m
169CONFIG_NFT_CHAIN_ROUTE_IPV4=m 177CONFIG_NFT_CHAIN_ROUTE_IPV4=m
170CONFIG_NFT_CHAIN_NAT_IPV4=m
171CONFIG_NF_TABLES_ARP=m 178CONFIG_NF_TABLES_ARP=m
179CONFIG_NFT_CHAIN_NAT_IPV4=m
180CONFIG_NFT_MASQ_IPV4=m
181CONFIG_NFT_REDIR_IPV4=m
172CONFIG_IP_NF_IPTABLES=m 182CONFIG_IP_NF_IPTABLES=m
173CONFIG_IP_NF_MATCH_AH=m 183CONFIG_IP_NF_MATCH_AH=m
174CONFIG_IP_NF_MATCH_ECN=m 184CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
177CONFIG_IP_NF_FILTER=m 187CONFIG_IP_NF_FILTER=m
178CONFIG_IP_NF_TARGET_REJECT=m 188CONFIG_IP_NF_TARGET_REJECT=m
179CONFIG_IP_NF_TARGET_SYNPROXY=m 189CONFIG_IP_NF_TARGET_SYNPROXY=m
180CONFIG_IP_NF_TARGET_ULOG=m 190CONFIG_IP_NF_NAT=m
181CONFIG_NF_NAT_IPV4=m
182CONFIG_IP_NF_TARGET_MASQUERADE=m 191CONFIG_IP_NF_TARGET_MASQUERADE=m
183CONFIG_IP_NF_TARGET_NETMAP=m 192CONFIG_IP_NF_TARGET_NETMAP=m
184CONFIG_IP_NF_TARGET_REDIRECT=m 193CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
193CONFIG_NF_CONNTRACK_IPV6=m 202CONFIG_NF_CONNTRACK_IPV6=m
194CONFIG_NFT_CHAIN_ROUTE_IPV6=m 203CONFIG_NFT_CHAIN_ROUTE_IPV6=m
195CONFIG_NFT_CHAIN_NAT_IPV6=m 204CONFIG_NFT_CHAIN_NAT_IPV6=m
205CONFIG_NFT_MASQ_IPV6=m
206CONFIG_NFT_REDIR_IPV6=m
196CONFIG_IP6_NF_IPTABLES=m 207CONFIG_IP6_NF_IPTABLES=m
197CONFIG_IP6_NF_MATCH_AH=m 208CONFIG_IP6_NF_MATCH_AH=m
198CONFIG_IP6_NF_MATCH_EUI64=m 209CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
209CONFIG_IP6_NF_TARGET_SYNPROXY=m 220CONFIG_IP6_NF_TARGET_SYNPROXY=m
210CONFIG_IP6_NF_MANGLE=m 221CONFIG_IP6_NF_MANGLE=m
211CONFIG_IP6_NF_RAW=m 222CONFIG_IP6_NF_RAW=m
212CONFIG_NF_NAT_IPV6=m 223CONFIG_IP6_NF_NAT=m
213CONFIG_IP6_NF_TARGET_MASQUERADE=m 224CONFIG_IP6_NF_TARGET_MASQUERADE=m
214CONFIG_IP6_NF_TARGET_NPT=m 225CONFIG_IP6_NF_TARGET_NPT=m
215CONFIG_NF_TABLES_BRIDGE=m 226CONFIG_NF_TABLES_BRIDGE=m
227CONFIG_NFT_BRIDGE_META=m
228CONFIG_NFT_BRIDGE_REJECT=m
229CONFIG_NF_LOG_BRIDGE=m
230CONFIG_BRIDGE_NF_EBTABLES=m
231CONFIG_BRIDGE_EBT_BROUTE=m
232CONFIG_BRIDGE_EBT_T_FILTER=m
233CONFIG_BRIDGE_EBT_T_NAT=m
234CONFIG_BRIDGE_EBT_802_3=m
235CONFIG_BRIDGE_EBT_AMONG=m
236CONFIG_BRIDGE_EBT_ARP=m
237CONFIG_BRIDGE_EBT_IP=m
238CONFIG_BRIDGE_EBT_IP6=m
239CONFIG_BRIDGE_EBT_LIMIT=m
240CONFIG_BRIDGE_EBT_MARK=m
241CONFIG_BRIDGE_EBT_PKTTYPE=m
242CONFIG_BRIDGE_EBT_STP=m
243CONFIG_BRIDGE_EBT_VLAN=m
244CONFIG_BRIDGE_EBT_ARPREPLY=m
245CONFIG_BRIDGE_EBT_DNAT=m
246CONFIG_BRIDGE_EBT_MARK_T=m
247CONFIG_BRIDGE_EBT_REDIRECT=m
248CONFIG_BRIDGE_EBT_SNAT=m
249CONFIG_BRIDGE_EBT_LOG=m
250CONFIG_BRIDGE_EBT_NFLOG=m
216CONFIG_IP_DCCP=m 251CONFIG_IP_DCCP=m
217# CONFIG_IP_DCCP_CCID3 is not set 252# CONFIG_IP_DCCP_CCID3 is not set
218CONFIG_SCTP_COOKIE_HMAC_SHA1=y 253CONFIG_SCTP_COOKIE_HMAC_SHA1=y
219CONFIG_RDS=m 254CONFIG_RDS=m
220CONFIG_RDS_TCP=m 255CONFIG_RDS_TCP=m
221CONFIG_L2TP=m 256CONFIG_L2TP=m
257CONFIG_BRIDGE=m
222CONFIG_ATALK=m 258CONFIG_ATALK=m
259CONFIG_6LOWPAN=m
223CONFIG_DNS_RESOLVER=y 260CONFIG_DNS_RESOLVER=y
224CONFIG_BATMAN_ADV=m 261CONFIG_BATMAN_ADV=m
225CONFIG_BATMAN_ADV_DAT=y 262CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
228CONFIG_NETLINK_DIAG=m 265CONFIG_NETLINK_DIAG=m
229CONFIG_NET_MPLS_GSO=m 266CONFIG_NET_MPLS_GSO=m
230# CONFIG_WIRELESS is not set 267# CONFIG_WIRELESS is not set
268# CONFIG_UEVENT_HELPER is not set
231CONFIG_DEVTMPFS=y 269CONFIG_DEVTMPFS=y
270CONFIG_DEVTMPFS_MOUNT=y
232# CONFIG_FIRMWARE_IN_KERNEL is not set 271# CONFIG_FIRMWARE_IN_KERNEL is not set
233# CONFIG_FW_LOADER_USER_HELPER is not set
234CONFIG_CONNECTOR=m 272CONFIG_CONNECTOR=m
235CONFIG_PARPORT=m 273CONFIG_PARPORT=m
236CONFIG_PARPORT_PC=m 274CONFIG_PARPORT_PC=m
@@ -286,6 +324,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
286CONFIG_NET_TEAM_MODE_RANDOM=m 324CONFIG_NET_TEAM_MODE_RANDOM=m
287CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 325CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
288CONFIG_NET_TEAM_MODE_LOADBALANCE=m 326CONFIG_NET_TEAM_MODE_LOADBALANCE=m
327CONFIG_MACVLAN=m
328CONFIG_MACVTAP=m
329CONFIG_IPVLAN=m
289CONFIG_VXLAN=m 330CONFIG_VXLAN=m
290CONFIG_NETCONSOLE=m 331CONFIG_NETCONSOLE=m
291CONFIG_NETCONSOLE_DYNAMIC=y 332CONFIG_NETCONSOLE_DYNAMIC=y
@@ -300,6 +341,8 @@ CONFIG_VETH=m
300# CONFIG_NET_VENDOR_MARVELL is not set 341# CONFIG_NET_VENDOR_MARVELL is not set
301# CONFIG_NET_VENDOR_MICREL is not set 342# CONFIG_NET_VENDOR_MICREL is not set
302CONFIG_NE2000=m 343CONFIG_NE2000=m
344# CONFIG_NET_VENDOR_QUALCOMM is not set
345# CONFIG_NET_VENDOR_ROCKER is not set
303# CONFIG_NET_VENDOR_SAMSUNG is not set 346# CONFIG_NET_VENDOR_SAMSUNG is not set
304# CONFIG_NET_VENDOR_SEEQ is not set 347# CONFIG_NET_VENDOR_SEEQ is not set
305# CONFIG_NET_VENDOR_SMSC is not set 348# CONFIG_NET_VENDOR_SMSC is not set
@@ -347,6 +390,7 @@ CONFIG_HID=m
347CONFIG_HIDRAW=y 390CONFIG_HIDRAW=y
348CONFIG_UHID=m 391CONFIG_UHID=m
349# CONFIG_HID_GENERIC is not set 392# CONFIG_HID_GENERIC is not set
393# CONFIG_HID_PLANTRONICS is not set
350# CONFIG_USB_SUPPORT is not set 394# CONFIG_USB_SUPPORT is not set
351CONFIG_RTC_CLASS=y 395CONFIG_RTC_CLASS=y
352CONFIG_RTC_DRV_GENERIC=m 396CONFIG_RTC_DRV_GENERIC=m
@@ -365,6 +409,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
365CONFIG_AUTOFS4_FS=m 409CONFIG_AUTOFS4_FS=m
366CONFIG_FUSE_FS=m 410CONFIG_FUSE_FS=m
367CONFIG_CUSE=m 411CONFIG_CUSE=m
412CONFIG_OVERLAY_FS=m
368CONFIG_ISO9660_FS=y 413CONFIG_ISO9660_FS=y
369CONFIG_JOLIET=y 414CONFIG_JOLIET=y
370CONFIG_ZISOFS=y 415CONFIG_ZISOFS=y
@@ -380,6 +425,7 @@ CONFIG_HFS_FS=m
380CONFIG_HFSPLUS_FS=m 425CONFIG_HFSPLUS_FS=m
381CONFIG_CRAMFS=m 426CONFIG_CRAMFS=m
382CONFIG_SQUASHFS=m 427CONFIG_SQUASHFS=m
428CONFIG_SQUASHFS_LZ4=y
383CONFIG_SQUASHFS_LZO=y 429CONFIG_SQUASHFS_LZO=y
384CONFIG_MINIX_FS=m 430CONFIG_MINIX_FS=m
385CONFIG_OMFS_FS=m 431CONFIG_OMFS_FS=m
@@ -449,10 +495,18 @@ CONFIG_DLM=m
449CONFIG_MAGIC_SYSRQ=y 495CONFIG_MAGIC_SYSRQ=y
450CONFIG_ASYNC_RAID6_TEST=m 496CONFIG_ASYNC_RAID6_TEST=m
451CONFIG_TEST_STRING_HELPERS=m 497CONFIG_TEST_STRING_HELPERS=m
498CONFIG_TEST_KSTRTOX=m
499CONFIG_TEST_LKM=m
500CONFIG_TEST_USER_COPY=m
501CONFIG_TEST_BPF=m
502CONFIG_TEST_FIRMWARE=m
503CONFIG_TEST_UDELAY=m
504CONFIG_EARLY_PRINTK=y
452CONFIG_ENCRYPTED_KEYS=m 505CONFIG_ENCRYPTED_KEYS=m
453CONFIG_CRYPTO_MANAGER=y 506CONFIG_CRYPTO_MANAGER=y
454CONFIG_CRYPTO_USER=m 507CONFIG_CRYPTO_USER=m
455CONFIG_CRYPTO_CRYPTD=m 508CONFIG_CRYPTO_CRYPTD=m
509CONFIG_CRYPTO_MCRYPTD=m
456CONFIG_CRYPTO_TEST=m 510CONFIG_CRYPTO_TEST=m
457CONFIG_CRYPTO_CCM=m 511CONFIG_CRYPTO_CCM=m
458CONFIG_CRYPTO_GCM=m 512CONFIG_CRYPTO_GCM=m
@@ -487,13 +541,10 @@ CONFIG_CRYPTO_LZO=m
487CONFIG_CRYPTO_LZ4=m 541CONFIG_CRYPTO_LZ4=m
488CONFIG_CRYPTO_LZ4HC=m 542CONFIG_CRYPTO_LZ4HC=m
489# CONFIG_CRYPTO_ANSI_CPRNG is not set 543# CONFIG_CRYPTO_ANSI_CPRNG is not set
544CONFIG_CRYPTO_DRBG_MENU=m
545CONFIG_CRYPTO_DRBG_HASH=y
546CONFIG_CRYPTO_DRBG_CTR=y
490CONFIG_CRYPTO_USER_API_HASH=m 547CONFIG_CRYPTO_USER_API_HASH=m
491CONFIG_CRYPTO_USER_API_SKCIPHER=m 548CONFIG_CRYPTO_USER_API_SKCIPHER=m
492# CONFIG_CRYPTO_HW is not set 549# CONFIG_CRYPTO_HW is not set
493CONFIG_XZ_DEC_X86=y
494CONFIG_XZ_DEC_POWERPC=y
495CONFIG_XZ_DEC_IA64=y
496CONFIG_XZ_DEC_ARM=y
497CONFIG_XZ_DEC_ARMTHUMB=y
498CONFIG_XZ_DEC_SPARC=y
499CONFIG_XZ_DEC_TEST=m 550CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 98522e8fb852..c6b49a4a887c 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -29,6 +29,7 @@ CONFIG_BOOTINFO_PROC=y
29CONFIG_SUN3=y 29CONFIG_SUN3=y
30# CONFIG_COMPACTION is not set 30# CONFIG_COMPACTION is not set
31CONFIG_CLEANCACHE=y 31CONFIG_CLEANCACHE=y
32CONFIG_ZPOOL=m
32# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 33# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
33CONFIG_BINFMT_AOUT=m 34CONFIG_BINFMT_AOUT=m
34CONFIG_BINFMT_MISC=m 35CONFIG_BINFMT_MISC=m
@@ -48,6 +49,8 @@ CONFIG_NET_IPIP=m
48CONFIG_NET_IPGRE_DEMUX=m 49CONFIG_NET_IPGRE_DEMUX=m
49CONFIG_NET_IPGRE=m 50CONFIG_NET_IPGRE=m
50CONFIG_NET_IPVTI=m 51CONFIG_NET_IPVTI=m
52CONFIG_NET_FOU_IP_TUNNELS=y
53CONFIG_GENEVE=m
51CONFIG_INET_AH=m 54CONFIG_INET_AH=m
52CONFIG_INET_ESP=m 55CONFIG_INET_ESP=m
53CONFIG_INET_IPCOMP=m 56CONFIG_INET_IPCOMP=m
@@ -89,6 +92,8 @@ CONFIG_NFT_HASH=m
89CONFIG_NFT_COUNTER=m 92CONFIG_NFT_COUNTER=m
90CONFIG_NFT_LOG=m 93CONFIG_NFT_LOG=m
91CONFIG_NFT_LIMIT=m 94CONFIG_NFT_LIMIT=m
95CONFIG_NFT_MASQ=m
96CONFIG_NFT_REDIR=m
92CONFIG_NFT_NAT=m 97CONFIG_NFT_NAT=m
93CONFIG_NFT_QUEUE=m 98CONFIG_NFT_QUEUE=m
94CONFIG_NFT_REJECT=m 99CONFIG_NFT_REJECT=m
@@ -135,6 +140,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
135CONFIG_NETFILTER_XT_MATCH_OSF=m 140CONFIG_NETFILTER_XT_MATCH_OSF=m
136CONFIG_NETFILTER_XT_MATCH_OWNER=m 141CONFIG_NETFILTER_XT_MATCH_OWNER=m
137CONFIG_NETFILTER_XT_MATCH_POLICY=m 142CONFIG_NETFILTER_XT_MATCH_POLICY=m
143CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
138CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 144CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
139CONFIG_NETFILTER_XT_MATCH_QUOTA=m 145CONFIG_NETFILTER_XT_MATCH_QUOTA=m
140CONFIG_NETFILTER_XT_MATCH_RATEEST=m 146CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -156,6 +162,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
156CONFIG_IP_SET_HASH_IPPORT=m 162CONFIG_IP_SET_HASH_IPPORT=m
157CONFIG_IP_SET_HASH_IPPORTIP=m 163CONFIG_IP_SET_HASH_IPPORTIP=m
158CONFIG_IP_SET_HASH_IPPORTNET=m 164CONFIG_IP_SET_HASH_IPPORTNET=m
165CONFIG_IP_SET_HASH_MAC=m
159CONFIG_IP_SET_HASH_NETPORTNET=m 166CONFIG_IP_SET_HASH_NETPORTNET=m
160CONFIG_IP_SET_HASH_NET=m 167CONFIG_IP_SET_HASH_NET=m
161CONFIG_IP_SET_HASH_NETNET=m 168CONFIG_IP_SET_HASH_NETNET=m
@@ -163,9 +170,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
163CONFIG_IP_SET_HASH_NETIFACE=m 170CONFIG_IP_SET_HASH_NETIFACE=m
164CONFIG_IP_SET_LIST_SET=m 171CONFIG_IP_SET_LIST_SET=m
165CONFIG_NF_CONNTRACK_IPV4=m 172CONFIG_NF_CONNTRACK_IPV4=m
173CONFIG_NF_LOG_ARP=m
166CONFIG_NFT_CHAIN_ROUTE_IPV4=m 174CONFIG_NFT_CHAIN_ROUTE_IPV4=m
167CONFIG_NFT_CHAIN_NAT_IPV4=m
168CONFIG_NF_TABLES_ARP=m 175CONFIG_NF_TABLES_ARP=m
176CONFIG_NFT_CHAIN_NAT_IPV4=m
177CONFIG_NFT_MASQ_IPV4=m
178CONFIG_NFT_REDIR_IPV4=m
169CONFIG_IP_NF_IPTABLES=m 179CONFIG_IP_NF_IPTABLES=m
170CONFIG_IP_NF_MATCH_AH=m 180CONFIG_IP_NF_MATCH_AH=m
171CONFIG_IP_NF_MATCH_ECN=m 181CONFIG_IP_NF_MATCH_ECN=m
@@ -174,8 +184,7 @@ CONFIG_IP_NF_MATCH_TTL=m
174CONFIG_IP_NF_FILTER=m 184CONFIG_IP_NF_FILTER=m
175CONFIG_IP_NF_TARGET_REJECT=m 185CONFIG_IP_NF_TARGET_REJECT=m
176CONFIG_IP_NF_TARGET_SYNPROXY=m 186CONFIG_IP_NF_TARGET_SYNPROXY=m
177CONFIG_IP_NF_TARGET_ULOG=m 187CONFIG_IP_NF_NAT=m
178CONFIG_NF_NAT_IPV4=m
179CONFIG_IP_NF_TARGET_MASQUERADE=m 188CONFIG_IP_NF_TARGET_MASQUERADE=m
180CONFIG_IP_NF_TARGET_NETMAP=m 189CONFIG_IP_NF_TARGET_NETMAP=m
181CONFIG_IP_NF_TARGET_REDIRECT=m 190CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -190,6 +199,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
190CONFIG_NF_CONNTRACK_IPV6=m 199CONFIG_NF_CONNTRACK_IPV6=m
191CONFIG_NFT_CHAIN_ROUTE_IPV6=m 200CONFIG_NFT_CHAIN_ROUTE_IPV6=m
192CONFIG_NFT_CHAIN_NAT_IPV6=m 201CONFIG_NFT_CHAIN_NAT_IPV6=m
202CONFIG_NFT_MASQ_IPV6=m
203CONFIG_NFT_REDIR_IPV6=m
193CONFIG_IP6_NF_IPTABLES=m 204CONFIG_IP6_NF_IPTABLES=m
194CONFIG_IP6_NF_MATCH_AH=m 205CONFIG_IP6_NF_MATCH_AH=m
195CONFIG_IP6_NF_MATCH_EUI64=m 206CONFIG_IP6_NF_MATCH_EUI64=m
@@ -206,17 +217,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
206CONFIG_IP6_NF_TARGET_SYNPROXY=m 217CONFIG_IP6_NF_TARGET_SYNPROXY=m
207CONFIG_IP6_NF_MANGLE=m 218CONFIG_IP6_NF_MANGLE=m
208CONFIG_IP6_NF_RAW=m 219CONFIG_IP6_NF_RAW=m
209CONFIG_NF_NAT_IPV6=m 220CONFIG_IP6_NF_NAT=m
210CONFIG_IP6_NF_TARGET_MASQUERADE=m 221CONFIG_IP6_NF_TARGET_MASQUERADE=m
211CONFIG_IP6_NF_TARGET_NPT=m 222CONFIG_IP6_NF_TARGET_NPT=m
212CONFIG_NF_TABLES_BRIDGE=m 223CONFIG_NF_TABLES_BRIDGE=m
224CONFIG_NFT_BRIDGE_META=m
225CONFIG_NFT_BRIDGE_REJECT=m
226CONFIG_NF_LOG_BRIDGE=m
227CONFIG_BRIDGE_NF_EBTABLES=m
228CONFIG_BRIDGE_EBT_BROUTE=m
229CONFIG_BRIDGE_EBT_T_FILTER=m
230CONFIG_BRIDGE_EBT_T_NAT=m
231CONFIG_BRIDGE_EBT_802_3=m
232CONFIG_BRIDGE_EBT_AMONG=m
233CONFIG_BRIDGE_EBT_ARP=m
234CONFIG_BRIDGE_EBT_IP=m
235CONFIG_BRIDGE_EBT_IP6=m
236CONFIG_BRIDGE_EBT_LIMIT=m
237CONFIG_BRIDGE_EBT_MARK=m
238CONFIG_BRIDGE_EBT_PKTTYPE=m
239CONFIG_BRIDGE_EBT_STP=m
240CONFIG_BRIDGE_EBT_VLAN=m
241CONFIG_BRIDGE_EBT_ARPREPLY=m
242CONFIG_BRIDGE_EBT_DNAT=m
243CONFIG_BRIDGE_EBT_MARK_T=m
244CONFIG_BRIDGE_EBT_REDIRECT=m
245CONFIG_BRIDGE_EBT_SNAT=m
246CONFIG_BRIDGE_EBT_LOG=m
247CONFIG_BRIDGE_EBT_NFLOG=m
213CONFIG_IP_DCCP=m 248CONFIG_IP_DCCP=m
214# CONFIG_IP_DCCP_CCID3 is not set 249# CONFIG_IP_DCCP_CCID3 is not set
215CONFIG_SCTP_COOKIE_HMAC_SHA1=y 250CONFIG_SCTP_COOKIE_HMAC_SHA1=y
216CONFIG_RDS=m 251CONFIG_RDS=m
217CONFIG_RDS_TCP=m 252CONFIG_RDS_TCP=m
218CONFIG_L2TP=m 253CONFIG_L2TP=m
254CONFIG_BRIDGE=m
219CONFIG_ATALK=m 255CONFIG_ATALK=m
256CONFIG_6LOWPAN=m
220CONFIG_DNS_RESOLVER=y 257CONFIG_DNS_RESOLVER=y
221CONFIG_BATMAN_ADV=m 258CONFIG_BATMAN_ADV=m
222CONFIG_BATMAN_ADV_DAT=y 259CONFIG_BATMAN_ADV_DAT=y
@@ -225,9 +262,10 @@ CONFIG_BATMAN_ADV_MCAST=y
225CONFIG_NETLINK_DIAG=m 262CONFIG_NETLINK_DIAG=m
226CONFIG_NET_MPLS_GSO=m 263CONFIG_NET_MPLS_GSO=m
227# CONFIG_WIRELESS is not set 264# CONFIG_WIRELESS is not set
265# CONFIG_UEVENT_HELPER is not set
228CONFIG_DEVTMPFS=y 266CONFIG_DEVTMPFS=y
267CONFIG_DEVTMPFS_MOUNT=y
229# CONFIG_FIRMWARE_IN_KERNEL is not set 268# CONFIG_FIRMWARE_IN_KERNEL is not set
230# CONFIG_FW_LOADER_USER_HELPER is not set
231CONFIG_CONNECTOR=m 269CONFIG_CONNECTOR=m
232CONFIG_BLK_DEV_LOOP=y 270CONFIG_BLK_DEV_LOOP=y
233CONFIG_BLK_DEV_CRYPTOLOOP=m 271CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -277,6 +315,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
277CONFIG_NET_TEAM_MODE_RANDOM=m 315CONFIG_NET_TEAM_MODE_RANDOM=m
278CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 316CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
279CONFIG_NET_TEAM_MODE_LOADBALANCE=m 317CONFIG_NET_TEAM_MODE_LOADBALANCE=m
318CONFIG_MACVLAN=m
319CONFIG_MACVTAP=m
320CONFIG_IPVLAN=m
280CONFIG_VXLAN=m 321CONFIG_VXLAN=m
281CONFIG_NETCONSOLE=m 322CONFIG_NETCONSOLE=m
282CONFIG_NETCONSOLE_DYNAMIC=y 323CONFIG_NETCONSOLE_DYNAMIC=y
@@ -287,6 +328,8 @@ CONFIG_SUN3_82586=y
287# CONFIG_NET_VENDOR_MARVELL is not set 328# CONFIG_NET_VENDOR_MARVELL is not set
288# CONFIG_NET_VENDOR_MICREL is not set 329# CONFIG_NET_VENDOR_MICREL is not set
289# CONFIG_NET_VENDOR_NATSEMI is not set 330# CONFIG_NET_VENDOR_NATSEMI is not set
331# CONFIG_NET_VENDOR_QUALCOMM is not set
332# CONFIG_NET_VENDOR_ROCKER is not set
290# CONFIG_NET_VENDOR_SAMSUNG is not set 333# CONFIG_NET_VENDOR_SAMSUNG is not set
291# CONFIG_NET_VENDOR_SEEQ is not set 334# CONFIG_NET_VENDOR_SEEQ is not set
292# CONFIG_NET_VENDOR_STMICRO is not set 335# CONFIG_NET_VENDOR_STMICRO is not set
@@ -327,6 +370,7 @@ CONFIG_HID=m
327CONFIG_HIDRAW=y 370CONFIG_HIDRAW=y
328CONFIG_UHID=m 371CONFIG_UHID=m
329# CONFIG_HID_GENERIC is not set 372# CONFIG_HID_GENERIC is not set
373# CONFIG_HID_PLANTRONICS is not set
330# CONFIG_USB_SUPPORT is not set 374# CONFIG_USB_SUPPORT is not set
331CONFIG_RTC_CLASS=y 375CONFIG_RTC_CLASS=y
332CONFIG_RTC_DRV_GENERIC=m 376CONFIG_RTC_DRV_GENERIC=m
@@ -344,6 +388,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
344CONFIG_AUTOFS4_FS=m 388CONFIG_AUTOFS4_FS=m
345CONFIG_FUSE_FS=m 389CONFIG_FUSE_FS=m
346CONFIG_CUSE=m 390CONFIG_CUSE=m
391CONFIG_OVERLAY_FS=m
347CONFIG_ISO9660_FS=y 392CONFIG_ISO9660_FS=y
348CONFIG_JOLIET=y 393CONFIG_JOLIET=y
349CONFIG_ZISOFS=y 394CONFIG_ZISOFS=y
@@ -359,6 +404,7 @@ CONFIG_HFS_FS=m
359CONFIG_HFSPLUS_FS=m 404CONFIG_HFSPLUS_FS=m
360CONFIG_CRAMFS=m 405CONFIG_CRAMFS=m
361CONFIG_SQUASHFS=m 406CONFIG_SQUASHFS=m
407CONFIG_SQUASHFS_LZ4=y
362CONFIG_SQUASHFS_LZO=y 408CONFIG_SQUASHFS_LZO=y
363CONFIG_MINIX_FS=m 409CONFIG_MINIX_FS=m
364CONFIG_OMFS_FS=m 410CONFIG_OMFS_FS=m
@@ -428,10 +474,17 @@ CONFIG_DLM=m
428CONFIG_MAGIC_SYSRQ=y 474CONFIG_MAGIC_SYSRQ=y
429CONFIG_ASYNC_RAID6_TEST=m 475CONFIG_ASYNC_RAID6_TEST=m
430CONFIG_TEST_STRING_HELPERS=m 476CONFIG_TEST_STRING_HELPERS=m
477CONFIG_TEST_KSTRTOX=m
478CONFIG_TEST_LKM=m
479CONFIG_TEST_USER_COPY=m
480CONFIG_TEST_BPF=m
481CONFIG_TEST_FIRMWARE=m
482CONFIG_TEST_UDELAY=m
431CONFIG_ENCRYPTED_KEYS=m 483CONFIG_ENCRYPTED_KEYS=m
432CONFIG_CRYPTO_MANAGER=y 484CONFIG_CRYPTO_MANAGER=y
433CONFIG_CRYPTO_USER=m 485CONFIG_CRYPTO_USER=m
434CONFIG_CRYPTO_CRYPTD=m 486CONFIG_CRYPTO_CRYPTD=m
487CONFIG_CRYPTO_MCRYPTD=m
435CONFIG_CRYPTO_TEST=m 488CONFIG_CRYPTO_TEST=m
436CONFIG_CRYPTO_CCM=m 489CONFIG_CRYPTO_CCM=m
437CONFIG_CRYPTO_GCM=m 490CONFIG_CRYPTO_GCM=m
@@ -466,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
466CONFIG_CRYPTO_LZ4=m 519CONFIG_CRYPTO_LZ4=m
467CONFIG_CRYPTO_LZ4HC=m 520CONFIG_CRYPTO_LZ4HC=m
468# CONFIG_CRYPTO_ANSI_CPRNG is not set 521# CONFIG_CRYPTO_ANSI_CPRNG is not set
522CONFIG_CRYPTO_DRBG_MENU=m
523CONFIG_CRYPTO_DRBG_HASH=y
524CONFIG_CRYPTO_DRBG_CTR=y
469CONFIG_CRYPTO_USER_API_HASH=m 525CONFIG_CRYPTO_USER_API_HASH=m
470CONFIG_CRYPTO_USER_API_SKCIPHER=m 526CONFIG_CRYPTO_USER_API_SKCIPHER=m
471# CONFIG_CRYPTO_HW is not set 527# CONFIG_CRYPTO_HW is not set
472CONFIG_XZ_DEC_X86=y
473CONFIG_XZ_DEC_POWERPC=y
474CONFIG_XZ_DEC_IA64=y
475CONFIG_XZ_DEC_ARM=y
476CONFIG_XZ_DEC_ARMTHUMB=y
477CONFIG_XZ_DEC_SPARC=y
478CONFIG_XZ_DEC_TEST=m 528CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 5128a8c3f4e3..b65785eaff8d 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -29,6 +29,7 @@ CONFIG_BOOTINFO_PROC=y
29CONFIG_SUN3X=y 29CONFIG_SUN3X=y
30# CONFIG_COMPACTION is not set 30# CONFIG_COMPACTION is not set
31CONFIG_CLEANCACHE=y 31CONFIG_CLEANCACHE=y
32CONFIG_ZPOOL=m
32# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 33# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
33CONFIG_BINFMT_AOUT=m 34CONFIG_BINFMT_AOUT=m
34CONFIG_BINFMT_MISC=m 35CONFIG_BINFMT_MISC=m
@@ -48,6 +49,8 @@ CONFIG_NET_IPIP=m
48CONFIG_NET_IPGRE_DEMUX=m 49CONFIG_NET_IPGRE_DEMUX=m
49CONFIG_NET_IPGRE=m 50CONFIG_NET_IPGRE=m
50CONFIG_NET_IPVTI=m 51CONFIG_NET_IPVTI=m
52CONFIG_NET_FOU_IP_TUNNELS=y
53CONFIG_GENEVE=m
51CONFIG_INET_AH=m 54CONFIG_INET_AH=m
52CONFIG_INET_ESP=m 55CONFIG_INET_ESP=m
53CONFIG_INET_IPCOMP=m 56CONFIG_INET_IPCOMP=m
@@ -89,6 +92,8 @@ CONFIG_NFT_HASH=m
89CONFIG_NFT_COUNTER=m 92CONFIG_NFT_COUNTER=m
90CONFIG_NFT_LOG=m 93CONFIG_NFT_LOG=m
91CONFIG_NFT_LIMIT=m 94CONFIG_NFT_LIMIT=m
95CONFIG_NFT_MASQ=m
96CONFIG_NFT_REDIR=m
92CONFIG_NFT_NAT=m 97CONFIG_NFT_NAT=m
93CONFIG_NFT_QUEUE=m 98CONFIG_NFT_QUEUE=m
94CONFIG_NFT_REJECT=m 99CONFIG_NFT_REJECT=m
@@ -135,6 +140,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
135CONFIG_NETFILTER_XT_MATCH_OSF=m 140CONFIG_NETFILTER_XT_MATCH_OSF=m
136CONFIG_NETFILTER_XT_MATCH_OWNER=m 141CONFIG_NETFILTER_XT_MATCH_OWNER=m
137CONFIG_NETFILTER_XT_MATCH_POLICY=m 142CONFIG_NETFILTER_XT_MATCH_POLICY=m
143CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
138CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 144CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
139CONFIG_NETFILTER_XT_MATCH_QUOTA=m 145CONFIG_NETFILTER_XT_MATCH_QUOTA=m
140CONFIG_NETFILTER_XT_MATCH_RATEEST=m 146CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -156,6 +162,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
156CONFIG_IP_SET_HASH_IPPORT=m 162CONFIG_IP_SET_HASH_IPPORT=m
157CONFIG_IP_SET_HASH_IPPORTIP=m 163CONFIG_IP_SET_HASH_IPPORTIP=m
158CONFIG_IP_SET_HASH_IPPORTNET=m 164CONFIG_IP_SET_HASH_IPPORTNET=m
165CONFIG_IP_SET_HASH_MAC=m
159CONFIG_IP_SET_HASH_NETPORTNET=m 166CONFIG_IP_SET_HASH_NETPORTNET=m
160CONFIG_IP_SET_HASH_NET=m 167CONFIG_IP_SET_HASH_NET=m
161CONFIG_IP_SET_HASH_NETNET=m 168CONFIG_IP_SET_HASH_NETNET=m
@@ -163,9 +170,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
163CONFIG_IP_SET_HASH_NETIFACE=m 170CONFIG_IP_SET_HASH_NETIFACE=m
164CONFIG_IP_SET_LIST_SET=m 171CONFIG_IP_SET_LIST_SET=m
165CONFIG_NF_CONNTRACK_IPV4=m 172CONFIG_NF_CONNTRACK_IPV4=m
173CONFIG_NF_LOG_ARP=m
166CONFIG_NFT_CHAIN_ROUTE_IPV4=m 174CONFIG_NFT_CHAIN_ROUTE_IPV4=m
167CONFIG_NFT_CHAIN_NAT_IPV4=m
168CONFIG_NF_TABLES_ARP=m 175CONFIG_NF_TABLES_ARP=m
176CONFIG_NFT_CHAIN_NAT_IPV4=m
177CONFIG_NFT_MASQ_IPV4=m
178CONFIG_NFT_REDIR_IPV4=m
169CONFIG_IP_NF_IPTABLES=m 179CONFIG_IP_NF_IPTABLES=m
170CONFIG_IP_NF_MATCH_AH=m 180CONFIG_IP_NF_MATCH_AH=m
171CONFIG_IP_NF_MATCH_ECN=m 181CONFIG_IP_NF_MATCH_ECN=m
@@ -174,8 +184,7 @@ CONFIG_IP_NF_MATCH_TTL=m
174CONFIG_IP_NF_FILTER=m 184CONFIG_IP_NF_FILTER=m
175CONFIG_IP_NF_TARGET_REJECT=m 185CONFIG_IP_NF_TARGET_REJECT=m
176CONFIG_IP_NF_TARGET_SYNPROXY=m 186CONFIG_IP_NF_TARGET_SYNPROXY=m
177CONFIG_IP_NF_TARGET_ULOG=m 187CONFIG_IP_NF_NAT=m
178CONFIG_NF_NAT_IPV4=m
179CONFIG_IP_NF_TARGET_MASQUERADE=m 188CONFIG_IP_NF_TARGET_MASQUERADE=m
180CONFIG_IP_NF_TARGET_NETMAP=m 189CONFIG_IP_NF_TARGET_NETMAP=m
181CONFIG_IP_NF_TARGET_REDIRECT=m 190CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -190,6 +199,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
190CONFIG_NF_CONNTRACK_IPV6=m 199CONFIG_NF_CONNTRACK_IPV6=m
191CONFIG_NFT_CHAIN_ROUTE_IPV6=m 200CONFIG_NFT_CHAIN_ROUTE_IPV6=m
192CONFIG_NFT_CHAIN_NAT_IPV6=m 201CONFIG_NFT_CHAIN_NAT_IPV6=m
202CONFIG_NFT_MASQ_IPV6=m
203CONFIG_NFT_REDIR_IPV6=m
193CONFIG_IP6_NF_IPTABLES=m 204CONFIG_IP6_NF_IPTABLES=m
194CONFIG_IP6_NF_MATCH_AH=m 205CONFIG_IP6_NF_MATCH_AH=m
195CONFIG_IP6_NF_MATCH_EUI64=m 206CONFIG_IP6_NF_MATCH_EUI64=m
@@ -206,17 +217,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
206CONFIG_IP6_NF_TARGET_SYNPROXY=m 217CONFIG_IP6_NF_TARGET_SYNPROXY=m
207CONFIG_IP6_NF_MANGLE=m 218CONFIG_IP6_NF_MANGLE=m
208CONFIG_IP6_NF_RAW=m 219CONFIG_IP6_NF_RAW=m
209CONFIG_NF_NAT_IPV6=m 220CONFIG_IP6_NF_NAT=m
210CONFIG_IP6_NF_TARGET_MASQUERADE=m 221CONFIG_IP6_NF_TARGET_MASQUERADE=m
211CONFIG_IP6_NF_TARGET_NPT=m 222CONFIG_IP6_NF_TARGET_NPT=m
212CONFIG_NF_TABLES_BRIDGE=m 223CONFIG_NF_TABLES_BRIDGE=m
224CONFIG_NFT_BRIDGE_META=m
225CONFIG_NFT_BRIDGE_REJECT=m
226CONFIG_NF_LOG_BRIDGE=m
227CONFIG_BRIDGE_NF_EBTABLES=m
228CONFIG_BRIDGE_EBT_BROUTE=m
229CONFIG_BRIDGE_EBT_T_FILTER=m
230CONFIG_BRIDGE_EBT_T_NAT=m
231CONFIG_BRIDGE_EBT_802_3=m
232CONFIG_BRIDGE_EBT_AMONG=m
233CONFIG_BRIDGE_EBT_ARP=m
234CONFIG_BRIDGE_EBT_IP=m
235CONFIG_BRIDGE_EBT_IP6=m
236CONFIG_BRIDGE_EBT_LIMIT=m
237CONFIG_BRIDGE_EBT_MARK=m
238CONFIG_BRIDGE_EBT_PKTTYPE=m
239CONFIG_BRIDGE_EBT_STP=m
240CONFIG_BRIDGE_EBT_VLAN=m
241CONFIG_BRIDGE_EBT_ARPREPLY=m
242CONFIG_BRIDGE_EBT_DNAT=m
243CONFIG_BRIDGE_EBT_MARK_T=m
244CONFIG_BRIDGE_EBT_REDIRECT=m
245CONFIG_BRIDGE_EBT_SNAT=m
246CONFIG_BRIDGE_EBT_LOG=m
247CONFIG_BRIDGE_EBT_NFLOG=m
213CONFIG_IP_DCCP=m 248CONFIG_IP_DCCP=m
214# CONFIG_IP_DCCP_CCID3 is not set 249# CONFIG_IP_DCCP_CCID3 is not set
215CONFIG_SCTP_COOKIE_HMAC_SHA1=y 250CONFIG_SCTP_COOKIE_HMAC_SHA1=y
216CONFIG_RDS=m 251CONFIG_RDS=m
217CONFIG_RDS_TCP=m 252CONFIG_RDS_TCP=m
218CONFIG_L2TP=m 253CONFIG_L2TP=m
254CONFIG_BRIDGE=m
219CONFIG_ATALK=m 255CONFIG_ATALK=m
256CONFIG_6LOWPAN=m
220CONFIG_DNS_RESOLVER=y 257CONFIG_DNS_RESOLVER=y
221CONFIG_BATMAN_ADV=m 258CONFIG_BATMAN_ADV=m
222CONFIG_BATMAN_ADV_DAT=y 259CONFIG_BATMAN_ADV_DAT=y
@@ -225,9 +262,10 @@ CONFIG_BATMAN_ADV_MCAST=y
225CONFIG_NETLINK_DIAG=m 262CONFIG_NETLINK_DIAG=m
226CONFIG_NET_MPLS_GSO=m 263CONFIG_NET_MPLS_GSO=m
227# CONFIG_WIRELESS is not set 264# CONFIG_WIRELESS is not set
265# CONFIG_UEVENT_HELPER is not set
228CONFIG_DEVTMPFS=y 266CONFIG_DEVTMPFS=y
267CONFIG_DEVTMPFS_MOUNT=y
229# CONFIG_FIRMWARE_IN_KERNEL is not set 268# CONFIG_FIRMWARE_IN_KERNEL is not set
230# CONFIG_FW_LOADER_USER_HELPER is not set
231CONFIG_CONNECTOR=m 269CONFIG_CONNECTOR=m
232CONFIG_BLK_DEV_LOOP=y 270CONFIG_BLK_DEV_LOOP=y
233CONFIG_BLK_DEV_CRYPTOLOOP=m 271CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -277,6 +315,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
277CONFIG_NET_TEAM_MODE_RANDOM=m 315CONFIG_NET_TEAM_MODE_RANDOM=m
278CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m 316CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
279CONFIG_NET_TEAM_MODE_LOADBALANCE=m 317CONFIG_NET_TEAM_MODE_LOADBALANCE=m
318CONFIG_MACVLAN=m
319CONFIG_MACVTAP=m
320CONFIG_IPVLAN=m
280CONFIG_VXLAN=m 321CONFIG_VXLAN=m
281CONFIG_NETCONSOLE=m 322CONFIG_NETCONSOLE=m
282CONFIG_NETCONSOLE_DYNAMIC=y 323CONFIG_NETCONSOLE_DYNAMIC=y
@@ -288,6 +329,8 @@ CONFIG_SUN3LANCE=y
288# CONFIG_NET_VENDOR_MARVELL is not set 329# CONFIG_NET_VENDOR_MARVELL is not set
289# CONFIG_NET_VENDOR_MICREL is not set 330# CONFIG_NET_VENDOR_MICREL is not set
290# CONFIG_NET_VENDOR_NATSEMI is not set 331# CONFIG_NET_VENDOR_NATSEMI is not set
332# CONFIG_NET_VENDOR_QUALCOMM is not set
333# CONFIG_NET_VENDOR_ROCKER is not set
291# CONFIG_NET_VENDOR_SAMSUNG is not set 334# CONFIG_NET_VENDOR_SAMSUNG is not set
292# CONFIG_NET_VENDOR_SEEQ is not set 335# CONFIG_NET_VENDOR_SEEQ is not set
293# CONFIG_NET_VENDOR_STMICRO is not set 336# CONFIG_NET_VENDOR_STMICRO is not set
@@ -327,6 +370,7 @@ CONFIG_HID=m
327CONFIG_HIDRAW=y 370CONFIG_HIDRAW=y
328CONFIG_UHID=m 371CONFIG_UHID=m
329# CONFIG_HID_GENERIC is not set 372# CONFIG_HID_GENERIC is not set
373# CONFIG_HID_PLANTRONICS is not set
330# CONFIG_USB_SUPPORT is not set 374# CONFIG_USB_SUPPORT is not set
331CONFIG_RTC_CLASS=y 375CONFIG_RTC_CLASS=y
332CONFIG_RTC_DRV_GENERIC=m 376CONFIG_RTC_DRV_GENERIC=m
@@ -344,6 +388,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
344CONFIG_AUTOFS4_FS=m 388CONFIG_AUTOFS4_FS=m
345CONFIG_FUSE_FS=m 389CONFIG_FUSE_FS=m
346CONFIG_CUSE=m 390CONFIG_CUSE=m
391CONFIG_OVERLAY_FS=m
347CONFIG_ISO9660_FS=y 392CONFIG_ISO9660_FS=y
348CONFIG_JOLIET=y 393CONFIG_JOLIET=y
349CONFIG_ZISOFS=y 394CONFIG_ZISOFS=y
@@ -359,6 +404,7 @@ CONFIG_HFS_FS=m
359CONFIG_HFSPLUS_FS=m 404CONFIG_HFSPLUS_FS=m
360CONFIG_CRAMFS=m 405CONFIG_CRAMFS=m
361CONFIG_SQUASHFS=m 406CONFIG_SQUASHFS=m
407CONFIG_SQUASHFS_LZ4=y
362CONFIG_SQUASHFS_LZO=y 408CONFIG_SQUASHFS_LZO=y
363CONFIG_MINIX_FS=m 409CONFIG_MINIX_FS=m
364CONFIG_OMFS_FS=m 410CONFIG_OMFS_FS=m
@@ -428,10 +474,18 @@ CONFIG_DLM=m
428CONFIG_MAGIC_SYSRQ=y 474CONFIG_MAGIC_SYSRQ=y
429CONFIG_ASYNC_RAID6_TEST=m 475CONFIG_ASYNC_RAID6_TEST=m
430CONFIG_TEST_STRING_HELPERS=m 476CONFIG_TEST_STRING_HELPERS=m
477CONFIG_TEST_KSTRTOX=m
478CONFIG_TEST_LKM=m
479CONFIG_TEST_USER_COPY=m
480CONFIG_TEST_BPF=m
481CONFIG_TEST_FIRMWARE=m
482CONFIG_TEST_UDELAY=m
483CONFIG_EARLY_PRINTK=y
431CONFIG_ENCRYPTED_KEYS=m 484CONFIG_ENCRYPTED_KEYS=m
432CONFIG_CRYPTO_MANAGER=y 485CONFIG_CRYPTO_MANAGER=y
433CONFIG_CRYPTO_USER=m 486CONFIG_CRYPTO_USER=m
434CONFIG_CRYPTO_CRYPTD=m 487CONFIG_CRYPTO_CRYPTD=m
488CONFIG_CRYPTO_MCRYPTD=m
435CONFIG_CRYPTO_TEST=m 489CONFIG_CRYPTO_TEST=m
436CONFIG_CRYPTO_CCM=m 490CONFIG_CRYPTO_CCM=m
437CONFIG_CRYPTO_GCM=m 491CONFIG_CRYPTO_GCM=m
@@ -466,13 +520,10 @@ CONFIG_CRYPTO_LZO=m
466CONFIG_CRYPTO_LZ4=m 520CONFIG_CRYPTO_LZ4=m
467CONFIG_CRYPTO_LZ4HC=m 521CONFIG_CRYPTO_LZ4HC=m
468# CONFIG_CRYPTO_ANSI_CPRNG is not set 522# CONFIG_CRYPTO_ANSI_CPRNG is not set
523CONFIG_CRYPTO_DRBG_MENU=m
524CONFIG_CRYPTO_DRBG_HASH=y
525CONFIG_CRYPTO_DRBG_CTR=y
469CONFIG_CRYPTO_USER_API_HASH=m 526CONFIG_CRYPTO_USER_API_HASH=m
470CONFIG_CRYPTO_USER_API_SKCIPHER=m 527CONFIG_CRYPTO_USER_API_SKCIPHER=m
471# CONFIG_CRYPTO_HW is not set 528# CONFIG_CRYPTO_HW is not set
472CONFIG_XZ_DEC_X86=y
473CONFIG_XZ_DEC_POWERPC=y
474CONFIG_XZ_DEC_IA64=y
475CONFIG_XZ_DEC_ARM=y
476CONFIG_XZ_DEC_ARMTHUMB=y
477CONFIG_XZ_DEC_SPARC=y
478CONFIG_XZ_DEC_TEST=m 529CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 9b6c691874bd..1517ed1c6471 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += device.h
6generic-y += emergency-restart.h 6generic-y += emergency-restart.h
7generic-y += errno.h 7generic-y += errno.h
8generic-y += exec.h 8generic-y += exec.h
9generic-y += futex.h
9generic-y += hw_irq.h 10generic-y += hw_irq.h
10generic-y += ioctl.h 11generic-y += ioctl.h
11generic-y += ipcbuf.h 12generic-y += ipcbuf.h
diff --git a/arch/m68k/include/asm/atariints.h b/arch/m68k/include/asm/atariints.h
index 953e0ac6855e..6321c4495620 100644
--- a/arch/m68k/include/asm/atariints.h
+++ b/arch/m68k/include/asm/atariints.h
@@ -40,11 +40,6 @@
40/* convert irq_handler index to vector number */ 40/* convert irq_handler index to vector number */
41#define IRQ_SOURCE_TO_VECTOR(i) ((i) + ((i) < 8 ? 0x18 : (0x40-8))) 41#define IRQ_SOURCE_TO_VECTOR(i) ((i) + ((i) < 8 ? 0x18 : (0x40-8)))
42 42
43/* interrupt service types */
44#define IRQ_TYPE_SLOW 0
45#define IRQ_TYPE_FAST 1
46#define IRQ_TYPE_PRIO 2
47
48/* ST-MFP interrupts */ 43/* ST-MFP interrupts */
49#define IRQ_MFP_BUSY (8) 44#define IRQ_MFP_BUSY (8)
50#define IRQ_MFP_DCD (9) 45#define IRQ_MFP_DCD (9)
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
deleted file mode 100644
index bc868af10c96..000000000000
--- a/arch/m68k/include/asm/futex.h
+++ /dev/null
@@ -1,94 +0,0 @@
1#ifndef _ASM_M68K_FUTEX_H
2#define _ASM_M68K_FUTEX_H
3
4#ifdef __KERNEL__
5#if !defined(CONFIG_MMU)
6#include <asm-generic/futex.h>
7#else /* CONFIG_MMU */
8
9#include <linux/futex.h>
10#include <linux/uaccess.h>
11#include <asm/errno.h>
12
13static inline int
14futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
15 u32 oldval, u32 newval)
16{
17 u32 val;
18
19 if (unlikely(get_user(val, uaddr) != 0))
20 return -EFAULT;
21
22 if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
23 return -EFAULT;
24
25 *uval = val;
26
27 return 0;
28}
29
30static inline int
31futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
32{
33 int op = (encoded_op >> 28) & 7;
34 int cmp = (encoded_op >> 24) & 15;
35 int oparg = (encoded_op << 8) >> 20;
36 int cmparg = (encoded_op << 20) >> 20;
37 int oldval, ret;
38 u32 tmp;
39
40 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
41 oparg = 1 << oparg;
42
43 pagefault_disable(); /* implies preempt_disable() */
44
45 ret = -EFAULT;
46 if (unlikely(get_user(oldval, uaddr) != 0))
47 goto out_pagefault_enable;
48
49 ret = 0;
50 tmp = oldval;
51
52 switch (op) {
53 case FUTEX_OP_SET:
54 tmp = oparg;
55 break;
56 case FUTEX_OP_ADD:
57 tmp += oparg;
58 break;
59 case FUTEX_OP_OR:
60 tmp |= oparg;
61 break;
62 case FUTEX_OP_ANDN:
63 tmp &= ~oparg;
64 break;
65 case FUTEX_OP_XOR:
66 tmp ^= oparg;
67 break;
68 default:
69 ret = -ENOSYS;
70 }
71
72 if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
73 ret = -EFAULT;
74
75out_pagefault_enable:
76 pagefault_enable(); /* subsumes preempt_enable() */
77
78 if (ret == 0) {
79 switch (cmp) {
80 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
81 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
82 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
83 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
84 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
85 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
86 default: ret = -ENOSYS;
87 }
88 }
89 return ret;
90}
91
92#endif /* CONFIG_MMU */
93#endif /* __KERNEL__ */
94#endif /* _ASM_M68K_FUTEX_H */
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 29c7c6c3a5f2..42235e7fbeed 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -55,7 +55,7 @@ struct mac_model
55#define MAC_SCSI_QUADRA3 4 55#define MAC_SCSI_QUADRA3 4
56#define MAC_SCSI_IIFX 5 56#define MAC_SCSI_IIFX 5
57#define MAC_SCSI_DUO 6 57#define MAC_SCSI_DUO 6
58#define MAC_SCSI_CCL 7 58#define MAC_SCSI_LC 7
59#define MAC_SCSI_LATE 8 59#define MAC_SCSI_LATE 8
60 60
61#define MAC_IDE_NONE 0 61#define MAC_IDE_NONE 0
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index e9c3756139fc..689b47d292ac 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -296,7 +296,7 @@ static struct mac_model mac_data_table[] = {
296 .name = "IIvi", 296 .name = "IIvi",
297 .adb_type = MAC_ADB_IISI, 297 .adb_type = MAC_ADB_IISI,
298 .via_type = MAC_VIA_IICI, 298 .via_type = MAC_VIA_IICI,
299 .scsi_type = MAC_SCSI_OLD, 299 .scsi_type = MAC_SCSI_LC,
300 .scc_type = MAC_SCC_II, 300 .scc_type = MAC_SCC_II,
301 .nubus_type = MAC_NUBUS, 301 .nubus_type = MAC_NUBUS,
302 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 302 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -305,7 +305,7 @@ static struct mac_model mac_data_table[] = {
305 .name = "IIvx", 305 .name = "IIvx",
306 .adb_type = MAC_ADB_IISI, 306 .adb_type = MAC_ADB_IISI,
307 .via_type = MAC_VIA_IICI, 307 .via_type = MAC_VIA_IICI,
308 .scsi_type = MAC_SCSI_OLD, 308 .scsi_type = MAC_SCSI_LC,
309 .scc_type = MAC_SCC_II, 309 .scc_type = MAC_SCC_II,
310 .nubus_type = MAC_NUBUS, 310 .nubus_type = MAC_NUBUS,
311 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 311 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -320,7 +320,7 @@ static struct mac_model mac_data_table[] = {
320 .name = "Classic II", 320 .name = "Classic II",
321 .adb_type = MAC_ADB_IISI, 321 .adb_type = MAC_ADB_IISI,
322 .via_type = MAC_VIA_IICI, 322 .via_type = MAC_VIA_IICI,
323 .scsi_type = MAC_SCSI_OLD, 323 .scsi_type = MAC_SCSI_LC,
324 .scc_type = MAC_SCC_II, 324 .scc_type = MAC_SCC_II,
325 .nubus_type = MAC_NUBUS, 325 .nubus_type = MAC_NUBUS,
326 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 326 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -329,7 +329,7 @@ static struct mac_model mac_data_table[] = {
329 .name = "Color Classic", 329 .name = "Color Classic",
330 .adb_type = MAC_ADB_CUDA, 330 .adb_type = MAC_ADB_CUDA,
331 .via_type = MAC_VIA_IICI, 331 .via_type = MAC_VIA_IICI,
332 .scsi_type = MAC_SCSI_CCL, 332 .scsi_type = MAC_SCSI_LC,
333 .scc_type = MAC_SCC_II, 333 .scc_type = MAC_SCC_II,
334 .nubus_type = MAC_NUBUS, 334 .nubus_type = MAC_NUBUS,
335 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 335 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -338,7 +338,7 @@ static struct mac_model mac_data_table[] = {
338 .name = "Color Classic II", 338 .name = "Color Classic II",
339 .adb_type = MAC_ADB_CUDA, 339 .adb_type = MAC_ADB_CUDA,
340 .via_type = MAC_VIA_IICI, 340 .via_type = MAC_VIA_IICI,
341 .scsi_type = MAC_SCSI_CCL, 341 .scsi_type = MAC_SCSI_LC,
342 .scc_type = MAC_SCC_II, 342 .scc_type = MAC_SCC_II,
343 .nubus_type = MAC_NUBUS, 343 .nubus_type = MAC_NUBUS,
344 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 344 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -353,7 +353,7 @@ static struct mac_model mac_data_table[] = {
353 .name = "LC", 353 .name = "LC",
354 .adb_type = MAC_ADB_IISI, 354 .adb_type = MAC_ADB_IISI,
355 .via_type = MAC_VIA_IICI, 355 .via_type = MAC_VIA_IICI,
356 .scsi_type = MAC_SCSI_OLD, 356 .scsi_type = MAC_SCSI_LC,
357 .scc_type = MAC_SCC_II, 357 .scc_type = MAC_SCC_II,
358 .nubus_type = MAC_NUBUS, 358 .nubus_type = MAC_NUBUS,
359 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 359 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -362,7 +362,7 @@ static struct mac_model mac_data_table[] = {
362 .name = "LC II", 362 .name = "LC II",
363 .adb_type = MAC_ADB_IISI, 363 .adb_type = MAC_ADB_IISI,
364 .via_type = MAC_VIA_IICI, 364 .via_type = MAC_VIA_IICI,
365 .scsi_type = MAC_SCSI_OLD, 365 .scsi_type = MAC_SCSI_LC,
366 .scc_type = MAC_SCC_II, 366 .scc_type = MAC_SCC_II,
367 .nubus_type = MAC_NUBUS, 367 .nubus_type = MAC_NUBUS,
368 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 368 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -371,7 +371,7 @@ static struct mac_model mac_data_table[] = {
371 .name = "LC III", 371 .name = "LC III",
372 .adb_type = MAC_ADB_IISI, 372 .adb_type = MAC_ADB_IISI,
373 .via_type = MAC_VIA_IICI, 373 .via_type = MAC_VIA_IICI,
374 .scsi_type = MAC_SCSI_OLD, 374 .scsi_type = MAC_SCSI_LC,
375 .scc_type = MAC_SCC_II, 375 .scc_type = MAC_SCC_II,
376 .nubus_type = MAC_NUBUS, 376 .nubus_type = MAC_NUBUS,
377 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 377 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -499,7 +499,7 @@ static struct mac_model mac_data_table[] = {
499 .name = "Performa 460", 499 .name = "Performa 460",
500 .adb_type = MAC_ADB_IISI, 500 .adb_type = MAC_ADB_IISI,
501 .via_type = MAC_VIA_IICI, 501 .via_type = MAC_VIA_IICI,
502 .scsi_type = MAC_SCSI_OLD, 502 .scsi_type = MAC_SCSI_LC,
503 .scc_type = MAC_SCC_II, 503 .scc_type = MAC_SCC_II,
504 .nubus_type = MAC_NUBUS, 504 .nubus_type = MAC_NUBUS,
505 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 505 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -526,7 +526,7 @@ static struct mac_model mac_data_table[] = {
526 .name = "Performa 520", 526 .name = "Performa 520",
527 .adb_type = MAC_ADB_CUDA, 527 .adb_type = MAC_ADB_CUDA,
528 .via_type = MAC_VIA_IICI, 528 .via_type = MAC_VIA_IICI,
529 .scsi_type = MAC_SCSI_CCL, 529 .scsi_type = MAC_SCSI_LC,
530 .scc_type = MAC_SCC_II, 530 .scc_type = MAC_SCC_II,
531 .nubus_type = MAC_NUBUS, 531 .nubus_type = MAC_NUBUS,
532 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 532 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -535,7 +535,7 @@ static struct mac_model mac_data_table[] = {
535 .name = "Performa 550", 535 .name = "Performa 550",
536 .adb_type = MAC_ADB_CUDA, 536 .adb_type = MAC_ADB_CUDA,
537 .via_type = MAC_VIA_IICI, 537 .via_type = MAC_VIA_IICI,
538 .scsi_type = MAC_SCSI_CCL, 538 .scsi_type = MAC_SCSI_LC,
539 .scc_type = MAC_SCC_II, 539 .scc_type = MAC_SCC_II,
540 .nubus_type = MAC_NUBUS, 540 .nubus_type = MAC_NUBUS,
541 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 541 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -567,7 +567,7 @@ static struct mac_model mac_data_table[] = {
567 .name = "TV", 567 .name = "TV",
568 .adb_type = MAC_ADB_CUDA, 568 .adb_type = MAC_ADB_CUDA,
569 .via_type = MAC_VIA_IICI, 569 .via_type = MAC_VIA_IICI,
570 .scsi_type = MAC_SCSI_CCL, 570 .scsi_type = MAC_SCSI_LC,
571 .scc_type = MAC_SCC_II, 571 .scc_type = MAC_SCC_II,
572 .nubus_type = MAC_NUBUS, 572 .nubus_type = MAC_NUBUS,
573 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 573 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -576,7 +576,7 @@ static struct mac_model mac_data_table[] = {
576 .name = "Performa 600", 576 .name = "Performa 600",
577 .adb_type = MAC_ADB_IISI, 577 .adb_type = MAC_ADB_IISI,
578 .via_type = MAC_VIA_IICI, 578 .via_type = MAC_VIA_IICI,
579 .scsi_type = MAC_SCSI_OLD, 579 .scsi_type = MAC_SCSI_LC,
580 .scc_type = MAC_SCC_II, 580 .scc_type = MAC_SCC_II,
581 .nubus_type = MAC_NUBUS, 581 .nubus_type = MAC_NUBUS,
582 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 582 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -1109,8 +1109,10 @@ int __init mac_platform_init(void)
1109 platform_device_register_simple("mac_scsi", 0, 1109 platform_device_register_simple("mac_scsi", 0,
1110 mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc)); 1110 mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
1111 break; 1111 break;
1112 case MAC_SCSI_CCL: 1112 case MAC_SCSI_LC:
1113 /* Addresses from the Color Classic Developer Note. 1113 /* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
1114 * Also from the Developer Notes for Classic II, LC III,
1115 * Color Classic and IIvx.
1114 * $50F0 6000 - $50F0 7FFF: SCSI handshake 1116 * $50F0 6000 - $50F0 7FFF: SCSI handshake
1115 * $50F1 0000 - $50F1 1FFF: SCSI 1117 * $50F1 0000 - $50F1 1FFF: SCSI
1116 * $50F1 2000 - $50F1 3FFF: SCSI DMA 1118 * $50F1 2000 - $50F1 3FFF: SCSI DMA
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 1bb3ce6634d3..e6a3b56c6481 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -168,49 +168,3 @@ int mvme147_set_clock_mmss (unsigned long nowtime)
168{ 168{
169 return 0; 169 return 0;
170} 170}
171
172/*------------------- Serial console stuff ------------------------*/
173
174static void scc_delay (void)
175{
176 int n;
177 volatile int trash;
178
179 for (n = 0; n < 20; n++)
180 trash = n;
181}
182
183static void scc_write (char ch)
184{
185 volatile char *p = (volatile char *)M147_SCC_A_ADDR;
186
187 do {
188 scc_delay();
189 }
190 while (!(*p & 4));
191 scc_delay();
192 *p = 8;
193 scc_delay();
194 *p = ch;
195}
196
197
198void m147_scc_write (struct console *co, const char *str, unsigned count)
199{
200 unsigned long flags;
201
202 local_irq_save(flags);
203
204 while (count--)
205 {
206 if (*str == '\n')
207 scc_write ('\r');
208 scc_write (*str++);
209 }
210 local_irq_restore(flags);
211}
212
213void mvme147_init_console_port (struct console *co, int cflag)
214{
215 co->write = m147_scc_write;
216}
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index 6ef7a81a3b12..1755e2f7137d 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -161,4 +161,4 @@ static int __init rtc_MK48T08_init(void)
161 printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION); 161 printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION);
162 return misc_register(&rtc_dev); 162 return misc_register(&rtc_dev);
163} 163}
164module_init(rtc_MK48T08_init); 164device_initcall(rtc_MK48T08_init);
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 8e211cc28dac..91d2068da1b9 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -34,5 +34,4 @@ $(obj)/simpleImage.%: vmlinux FORCE
34 $(call if_changed,strip) 34 $(call if_changed,strip)
35 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 35 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
36 36
37 37clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
38clean-files += simpleImage.*.unstrip linux.bin.ub
diff --git a/arch/microblaze/boot/dts/Makefile b/arch/microblaze/boot/dts/Makefile
index c4982d16e555..a3d2e42c3c97 100644
--- a/arch/microblaze/boot/dts/Makefile
+++ b/arch/microblaze/boot/dts/Makefile
@@ -16,5 +16,3 @@ quiet_cmd_cp = CP $< $@$2
16 16
17# Rule to build device tree blobs 17# Rule to build device tree blobs
18DTC_FLAGS := -p 1024 18DTC_FLAGS := -p 1024
19
20clean-files += *.dtb
diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h
index 60cb39deb533..ea2a9cd9b159 100644
--- a/arch/microblaze/include/asm/delay.h
+++ b/arch/microblaze/include/asm/delay.h
@@ -15,7 +15,7 @@
15 15
16#include <linux/param.h> 16#include <linux/param.h>
17 17
18extern inline void __delay(unsigned long loops) 18static inline void __delay(unsigned long loops)
19{ 19{
20 asm volatile ("# __delay \n\t" \ 20 asm volatile ("# __delay \n\t" \
21 "1: addi %0, %0, -1\t\n" \ 21 "1: addi %0, %0, -1\t\n" \
@@ -43,7 +43,7 @@ extern inline void __delay(unsigned long loops)
43 43
44extern unsigned long loops_per_jiffy; 44extern unsigned long loops_per_jiffy;
45 45
46extern inline void __udelay(unsigned int x) 46static inline void __udelay(unsigned int x)
47{ 47{
48 48
49 unsigned long long tmp = 49 unsigned long long tmp =
diff --git a/arch/microblaze/include/asm/kgdb.h b/arch/microblaze/include/asm/kgdb.h
index 78b17d40b235..ad27acb2b15f 100644
--- a/arch/microblaze/include/asm/kgdb.h
+++ b/arch/microblaze/include/asm/kgdb.h
@@ -23,6 +23,9 @@ static inline void arch_kgdb_breakpoint(void)
23 __asm__ __volatile__("brki r16, 0x18;"); 23 __asm__ __volatile__("brki r16, 0x18;");
24} 24}
25 25
26struct pt_regs;
27asmlinkage void microblaze_kgdb_break(struct pt_regs *regs);
28
26#endif /* __ASSEMBLY__ */ 29#endif /* __ASSEMBLY__ */
27#endif /* __MICROBLAZE_KGDB_H__ */ 30#endif /* __MICROBLAZE_KGDB_H__ */
28#endif /* __KERNEL__ */ 31#endif /* __KERNEL__ */
diff --git a/arch/microblaze/include/asm/linkage.h b/arch/microblaze/include/asm/linkage.h
index 3a8e36d057eb..0540bbaad897 100644
--- a/arch/microblaze/include/asm/linkage.h
+++ b/arch/microblaze/include/asm/linkage.h
@@ -1,15 +1 @@
1/* #include <asm-generic/linkage.h>
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_LINKAGE_H
10#define _ASM_MICROBLAZE_LINKAGE_H
11
12#define __ALIGN .align 4
13#define __ALIGN_STR ".align 4"
14
15#endif /* _ASM_MICROBLAZE_LINKAGE_H */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 7fdf7fabc7d7..61436d69775c 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -60,7 +60,7 @@ extern unsigned long get_zero_page_fast(void);
60 60
61extern void __bad_pte(pmd_t *pmd); 61extern void __bad_pte(pmd_t *pmd);
62 62
63extern inline pgd_t *get_pgd_slow(void) 63static inline pgd_t *get_pgd_slow(void)
64{ 64{
65 pgd_t *ret; 65 pgd_t *ret;
66 66
@@ -70,7 +70,7 @@ extern inline pgd_t *get_pgd_slow(void)
70 return ret; 70 return ret;
71} 71}
72 72
73extern inline pgd_t *get_pgd_fast(void) 73static inline pgd_t *get_pgd_fast(void)
74{ 74{
75 unsigned long *ret; 75 unsigned long *ret;
76 76
@@ -84,14 +84,14 @@ extern inline pgd_t *get_pgd_fast(void)
84 return (pgd_t *)ret; 84 return (pgd_t *)ret;
85} 85}
86 86
87extern inline void free_pgd_fast(pgd_t *pgd) 87static inline void free_pgd_fast(pgd_t *pgd)
88{ 88{
89 *(unsigned long **)pgd = pgd_quicklist; 89 *(unsigned long **)pgd = pgd_quicklist;
90 pgd_quicklist = (unsigned long *) pgd; 90 pgd_quicklist = (unsigned long *) pgd;
91 pgtable_cache_size++; 91 pgtable_cache_size++;
92} 92}
93 93
94extern inline void free_pgd_slow(pgd_t *pgd) 94static inline void free_pgd_slow(pgd_t *pgd)
95{ 95{
96 free_page((unsigned long)pgd); 96 free_page((unsigned long)pgd);
97} 97}
@@ -146,19 +146,19 @@ static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
146 return (pte_t *)ret; 146 return (pte_t *)ret;
147} 147}
148 148
149extern inline void pte_free_fast(pte_t *pte) 149static inline void pte_free_fast(pte_t *pte)
150{ 150{
151 *(unsigned long **)pte = pte_quicklist; 151 *(unsigned long **)pte = pte_quicklist;
152 pte_quicklist = (unsigned long *) pte; 152 pte_quicklist = (unsigned long *) pte;
153 pgtable_cache_size++; 153 pgtable_cache_size++;
154} 154}
155 155
156extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 156static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
157{ 157{
158 free_page((unsigned long)pte); 158 free_page((unsigned long)pte);
159} 159}
160 160
161extern inline void pte_free_slow(struct page *ptepage) 161static inline void pte_free_slow(struct page *ptepage)
162{ 162{
163 __free_page(ptepage); 163 __free_page(ptepage);
164} 164}
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h
index 53cfaf34c343..04a5bece8168 100644
--- a/arch/microblaze/include/asm/syscall.h
+++ b/arch/microblaze/include/asm/syscall.h
@@ -97,7 +97,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
97 microblaze_set_syscall_arg(regs, i++, *args++); 97 microblaze_set_syscall_arg(regs, i++, *args++);
98} 98}
99 99
100asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 100asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs);
101asmlinkage void do_syscall_trace_leave(struct pt_regs *regs); 101asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
102 102
103static inline int syscall_get_arch(void) 103static inline int syscall_get_arch(void)
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 59a89a64a865..62942fd12672 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -220,7 +220,7 @@ extern long __user_bad(void);
220 } else { \ 220 } else { \
221 __gu_err = -EFAULT; \ 221 __gu_err = -EFAULT; \
222 } \ 222 } \
223 x = (typeof(*(ptr)))__gu_val; \ 223 x = (__force typeof(*(ptr)))__gu_val; \
224 __gu_err; \ 224 __gu_err; \
225}) 225})
226 226
@@ -242,7 +242,7 @@ extern long __user_bad(void);
242 default: \ 242 default: \
243 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\ 243 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
244 } \ 244 } \
245 x = (__typeof__(*(ptr))) __gu_val; \ 245 x = (__force __typeof__(*(ptr))) __gu_val; \
246 __gu_err; \ 246 __gu_err; \
247}) 247})
248 248
@@ -306,7 +306,7 @@ extern long __user_bad(void);
306 306
307#define __put_user_check(x, ptr, size) \ 307#define __put_user_check(x, ptr, size) \
308({ \ 308({ \
309 typeof(*(ptr)) volatile __pu_val = x; \ 309 typeof(*(ptr)) volatile __pu_val = x; \
310 typeof(*(ptr)) __user *__pu_addr = (ptr); \ 310 typeof(*(ptr)) __user *__pu_addr = (ptr); \
311 int __pu_err = 0; \ 311 int __pu_err = 0; \
312 \ 312 \
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 0a53362d5548..76ed17b56fea 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
38 38
39#endif /* __ASSEMBLY__ */ 39#endif /* __ASSEMBLY__ */
40 40
41#define __NR_syscalls 388 41#define __NR_syscalls 389
42 42
43#endif /* _ASM_MICROBLAZE_UNISTD_H */ 43#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index c712677f8a2a..32850c73be09 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -403,5 +403,6 @@
403#define __NR_getrandom 385 403#define __NR_getrandom 385
404#define __NR_memfd_create 386 404#define __NR_memfd_create 386
405#define __NR_bpf 387 405#define __NR_bpf 387
406#define __NR_execveat 388
406 407
407#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ 408#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index 08d50cc55e7d..f08bacaf8a95 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -16,7 +16,7 @@ extra-y := head.o vmlinux.lds
16 16
17obj-y += dma.o exceptions.o \ 17obj-y += dma.o exceptions.o \
18 hw_exception_handler.o intc.o irq.o \ 18 hw_exception_handler.o intc.o irq.o \
19 platform.o process.o prom.o prom_parse.o ptrace.o \ 19 platform.o process.o prom.o ptrace.o \
20 reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o 20 reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
21 21
22obj-y += cpu/ 22obj-y += cpu/
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index a6e44410672d..0bde47e4fa69 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -140,10 +140,10 @@ do { \
140/* It is used only first parameter for OP - for wic, wdc */ 140/* It is used only first parameter for OP - for wic, wdc */
141#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ 141#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
142do { \ 142do { \
143 int volatile temp = 0; \ 143 unsigned int volatile temp = 0; \
144 int align = ~(line_length - 1); \ 144 unsigned int align = ~(line_length - 1); \
145 end = ((end & align) == end) ? end - line_length : end & align; \ 145 end = ((end & align) == end) ? end - line_length : end & align; \
146 WARN_ON(end - start < 0); \ 146 WARN_ON(end < start); \
147 \ 147 \
148 __asm__ __volatile__ (" 1: " #op " %1, r0;" \ 148 __asm__ __volatile__ (" 1: " #op " %1, r0;" \
149 "cmpu %0, %1, %2;" \ 149 "cmpu %0, %1, %2;" \
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index 93c26cf50de5..a32daec96c12 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -33,7 +33,7 @@
33void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) 33void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
34{ 34{
35 struct pvr_s pvr; 35 struct pvr_s pvr;
36 int temp; /* for saving temp value */ 36 u32 temp; /* for saving temp value */
37 get_pvr(&pvr); 37 get_pvr(&pvr);
38 38
39 CI(ver_code, VERSION); 39 CI(ver_code, VERSION);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index 4854285b26e7..85dbda4a08a8 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -22,7 +22,7 @@ static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
22 22
23void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) 23void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
24{ 24{
25 int i = 0; 25 u32 i = 0;
26 26
27 ci->use_instr = 27 ci->use_instr =
28 (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) | 28 (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) |
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 234acad79b9e..d1dd6e83d59b 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -41,8 +41,12 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
41 {"8.40.a", 0x18}, 41 {"8.40.a", 0x18},
42 {"8.40.b", 0x19}, 42 {"8.40.b", 0x19},
43 {"8.50.a", 0x1a}, 43 {"8.50.a", 0x1a},
44 {"8.50.b", 0x1c},
45 {"8.50.c", 0x1e},
44 {"9.0", 0x1b}, 46 {"9.0", 0x1b},
45 {"9.1", 0x1d}, 47 {"9.1", 0x1d},
48 {"9.2", 0x1f},
49 {"9.3", 0x20},
46 {NULL, 0}, 50 {NULL, 0},
47}; 51};
48 52
@@ -61,11 +65,14 @@ const struct family_string_key family_string_lookup[] = {
61 {"spartan3adsp", 0xc}, 65 {"spartan3adsp", 0xc},
62 {"spartan6", 0xd}, 66 {"spartan6", 0xd},
63 {"virtex6", 0xe}, 67 {"virtex6", 0xe},
68 {"virtex7", 0xf},
64 /* FIXME There is no key code defined for spartan2 */ 69 /* FIXME There is no key code defined for spartan2 */
65 {"spartan2", 0xf0}, 70 {"spartan2", 0xf0},
66 {"kintex7", 0x10}, 71 {"kintex7", 0x10},
67 {"artix7", 0x11}, 72 {"artix7", 0x11},
68 {"zynq7000", 0x12}, 73 {"zynq7000", 0x12},
74 {"UltraScale Virtex", 0x13},
75 {"UltraScale Kintex", 0x14},
69 {NULL, 0}, 76 {NULL, 0},
70}; 77};
71 78
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 15c7c12ea0e7..719feee1e043 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -148,17 +148,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
148 ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq); 148 ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
149 if (ret < 0) { 149 if (ret < 0) {
150 pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__); 150 pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__);
151 return -EINVAL; 151 return ret;
152 } 152 }
153 153
154 ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask); 154 ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask);
155 if (ret < 0) { 155 if (ret < 0) {
156 pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__); 156 pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__);
157 return -EINVAL; 157 return ret;
158 } 158 }
159 159
160 if (intr_mask > (u32)((1ULL << nr_irq) - 1)) 160 if (intr_mask >> nr_irq)
161 pr_info(" ERROR: Mismatch in kind-of-intr param\n"); 161 pr_warn("%s: mismatch in kind-of-intr param\n", __func__);
162 162
163 pr_info("%s: num_irq=%d, edge=0x%x\n", 163 pr_info("%s: num_irq=%d, edge=0x%x\n",
164 intc->full_name, nr_irq, intr_mask); 164 intc->full_name, nr_irq, intr_mask);
diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c
index 09a5e8286137..8736af5806ae 100644
--- a/arch/microblaze/kernel/kgdb.c
+++ b/arch/microblaze/kernel/kgdb.c
@@ -12,6 +12,7 @@
12#include <linux/io.h> 12#include <linux/io.h>
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/kgdb.h>
15#include <asm/pvr.h> 16#include <asm/pvr.h>
16 17
17#define GDB_REG 0 18#define GDB_REG 0
@@ -35,9 +36,10 @@ struct pvr_s pvr;
35 36
36void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) 37void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
37{ 38{
38 int i; 39 unsigned int i;
39 unsigned long *pt_regb = (unsigned long *)regs; 40 unsigned long *pt_regb = (unsigned long *)regs;
40 int temp; 41 int temp;
42
41 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ 43 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
42 for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++) 44 for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
43 gdb_regs[i] = pt_regb[i]; 45 gdb_regs[i] = pt_regb[i];
@@ -67,7 +69,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
67 69
68void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) 70void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
69{ 71{
70 int i; 72 unsigned int i;
71 unsigned long *pt_regb = (unsigned long *)regs; 73 unsigned long *pt_regb = (unsigned long *)regs;
72 74
73 /* pt_regs and gdb_regs have the same 37 values. 75 /* pt_regs and gdb_regs have the same 37 values.
@@ -77,7 +79,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
77 pt_regb[i] = gdb_regs[i]; 79 pt_regb[i] = gdb_regs[i];
78} 80}
79 81
80void microblaze_kgdb_break(struct pt_regs *regs) 82asmlinkage void microblaze_kgdb_break(struct pt_regs *regs)
81{ 83{
82 if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) 84 if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
83 return; 85 return;
@@ -91,7 +93,7 @@ void microblaze_kgdb_break(struct pt_regs *regs)
91/* untested */ 93/* untested */
92void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) 94void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
93{ 95{
94 int i; 96 unsigned int i;
95 unsigned long *pt_regb = (unsigned long *)(p->thread.regs); 97 unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
96 98
97 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */ 99 /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
deleted file mode 100644
index 068762f55fd6..000000000000
--- a/arch/microblaze/kernel/prom_parse.c
+++ /dev/null
@@ -1,35 +0,0 @@
1#undef DEBUG
2
3#include <linux/export.h>
4#include <linux/kernel.h>
5#include <linux/string.h>
6#include <linux/ioport.h>
7#include <linux/etherdevice.h>
8#include <linux/of_address.h>
9#include <asm/prom.h>
10
11void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
12 unsigned long *busno, unsigned long *phys, unsigned long *size)
13{
14 const u32 *dma_window;
15 u32 cells;
16 const unsigned char *prop;
17
18 dma_window = dma_window_prop;
19
20 /* busno is always one cell */
21 *busno = *(dma_window++);
22
23 prop = of_get_property(dn, "ibm,#dma-address-cells", NULL);
24 if (!prop)
25 prop = of_get_property(dn, "#address-cells", NULL);
26
27 cells = prop ? *(u32 *)prop : of_n_addr_cells(dn);
28 *phys = of_read_number(dma_window, cells);
29
30 dma_window += cells;
31
32 prop = of_get_property(dn, "ibm,#dma-size-cells", NULL);
33 cells = prop ? *(u32 *)prop : of_n_size_cells(dn);
34 *size = of_read_number(dma_window, cells);
35}
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index bb10637ce688..8cfa98cadf3d 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -132,9 +132,9 @@ long arch_ptrace(struct task_struct *child, long request,
132 return rval; 132 return rval;
133} 133}
134 134
135asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 135asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs)
136{ 136{
137 long ret = 0; 137 unsigned long ret = 0;
138 138
139 secure_computing_strict(regs->r12); 139 secure_computing_strict(regs->r12);
140 140
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index fbe58c6554a8..bab4c8330ef4 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/of_platform.h> 11#include <linux/of_platform.h>
12#include <asm/prom.h>
13 12
14/* Trigger specific functions */ 13/* Trigger specific functions */
15#ifdef CONFIG_GPIOLIB 14#ifdef CONFIG_GPIOLIB
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 8955a3829cf0..235706055b7f 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -158,7 +158,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
158{ 158{
159 struct rt_sigframe __user *frame; 159 struct rt_sigframe __user *frame;
160 int err = 0, sig = ksig->sig; 160 int err = 0, sig = ksig->sig;
161 int signal; 161 unsigned long signal;
162 unsigned long address = 0; 162 unsigned long address = 0;
163#ifdef CONFIG_MMU 163#ifdef CONFIG_MMU
164 pmd_t *pmdp; 164 pmd_t *pmdp;
@@ -174,7 +174,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
174 && current_thread_info()->exec_domain->signal_invmap 174 && current_thread_info()->exec_domain->signal_invmap
175 && sig < 32 175 && sig < 32
176 ? current_thread_info()->exec_domain->signal_invmap[sig] 176 ? current_thread_info()->exec_domain->signal_invmap[sig]
177 : sig; 177 : (unsigned long)sig;
178 178
179 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 179 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
180 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 180 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 0166e890486c..29c8568ec55c 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -388,3 +388,4 @@ ENTRY(sys_call_table)
388 .long sys_getrandom /* 385 */ 388 .long sys_getrandom /* 385 */
389 .long sys_memfd_create 389 .long sys_memfd_create
390 .long sys_bpf 390 .long sys_bpf
391 .long sys_execveat
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
index 1f7b8d449668..61c04eed14d5 100644
--- a/arch/microblaze/kernel/unwind.c
+++ b/arch/microblaze/kernel/unwind.c
@@ -59,7 +59,7 @@ struct stack_trace;
59 * 59 *
60 * Return - Number of stack bytes the instruction reserves or reclaims 60 * Return - Number of stack bytes the instruction reserves or reclaims
61 */ 61 */
62inline long get_frame_size(unsigned long instr) 62static inline long get_frame_size(unsigned long instr)
63{ 63{
64 return abs((s16)(instr & 0xFFFF)); 64 return abs((s16)(instr & 0xFFFF));
65} 65}
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 30e334e823bd..2ae12825529f 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
20 select PREEMPT_NOTIFIERS 20 select PREEMPT_NOTIFIERS
21 select ANON_INODES 21 select ANON_INODES
22 select KVM_MMIO 22 select KVM_MMIO
23 select SRCU
23 ---help--- 24 ---help---
24 Support for hosting Guest kernels. 25 Support for hosting Guest kernels.
25 Currently supported on MIPS32 processors. 26 Currently supported on MIPS32 processors.
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index 5ec2a7bae02c..f2355e3e65a1 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
173} 173}
174 174
175struct pci_ops bcm1480_pci_ops = { 175struct pci_ops bcm1480_pci_ops = {
176 bcm1480_pcibios_read, 176 .read = bcm1480_pcibios_read,
177 bcm1480_pcibios_write, 177 .write = bcm1480_pcibios_write,
178}; 178};
179 179
180static struct resource bcm1480_mem_resource = { 180static struct resource bcm1480_mem_resource = {
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index d07e04121cc6..bedb72bd3a27 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
327 327
328 328
329static struct pci_ops octeon_pci_ops = { 329static struct pci_ops octeon_pci_ops = {
330 octeon_read_config, 330 .read = octeon_read_config,
331 octeon_write_config, 331 .write = octeon_write_config,
332}; 332};
333 333
334static struct resource octeon_pci_mem_resource = { 334static struct resource octeon_pci_mem_resource = {
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 5e36c33e5543..eb4a17ba4a53 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
1792} 1792}
1793 1793
1794static struct pci_ops octeon_pcie0_ops = { 1794static struct pci_ops octeon_pcie0_ops = {
1795 octeon_pcie0_read_config, 1795 .read = octeon_pcie0_read_config,
1796 octeon_pcie0_write_config, 1796 .write = octeon_pcie0_write_config,
1797}; 1797};
1798 1798
1799static struct resource octeon_pcie0_mem_resource = { 1799static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
1813}; 1813};
1814 1814
1815static struct pci_ops octeon_pcie1_ops = { 1815static struct pci_ops octeon_pcie1_ops = {
1816 octeon_pcie1_read_config, 1816 .read = octeon_pcie1_read_config,
1817 octeon_pcie1_write_config, 1817 .write = octeon_pcie1_write_config,
1818}; 1818};
1819 1819
1820static struct resource octeon_pcie1_mem_resource = { 1820static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
1834}; 1834};
1835 1835
1836static struct pci_ops octeon_dummy_ops = { 1836static struct pci_ops octeon_dummy_ops = {
1837 octeon_dummy_read_config, 1837 .read = octeon_dummy_read_config,
1838 octeon_dummy_write_config, 1838 .write = octeon_dummy_write_config,
1839}; 1839};
1840 1840
1841static struct resource octeon_dummy_mem_resource = { 1841static struct resource octeon_dummy_mem_resource = {
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 471ff398090c..613ca1e55b4b 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -228,8 +228,8 @@ static int pci_ampci_write_config(struct pci_bus *bus, unsigned int devfn,
228} 228}
229 229
230static struct pci_ops pci_direct_ampci = { 230static struct pci_ops pci_direct_ampci = {
231 pci_ampci_read_config, 231 .read = pci_ampci_read_config,
232 pci_ampci_write_config, 232 .write = pci_ampci_write_config,
233}; 233};
234 234
235/* 235/*
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index f5769f19ae25..11850f310fb4 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 select HAVE_KVM_EVENTFD 23 select HAVE_KVM_EVENTFD
24 select SRCU
24 25
25config KVM_BOOK3S_HANDLER 26config KVM_BOOK3S_HANDLER
26 bool 27 bool
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index f22387598040..94170e4f2ce7 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
399} 399}
400 400
401static struct pci_ops scc_pciex_pci_ops = { 401static struct pci_ops scc_pciex_pci_ops = {
402 scc_pciex_read_config, 402 .read = scc_pciex_read_config,
403 scc_pciex_write_config, 403 .write = scc_pciex_write_config,
404}; 404};
405 405
406static void pciex_clear_intr_all(unsigned int __iomem *base) 406static void pciex_clear_intr_all(unsigned int __iomem *base)
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 04702db35d45..f4071a67ad00 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -133,17 +133,23 @@ static void __init fixup_bus_range(struct device_node *bridge)
133 |(((unsigned int)(off)) & 0xFCUL) \ 133 |(((unsigned int)(off)) & 0xFCUL) \
134 |1UL) 134 |1UL)
135 135
136static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose, 136static void __iomem *macrisc_cfg_map_bus(struct pci_bus *bus,
137 u8 bus, u8 dev_fn, u8 offset) 137 unsigned int dev_fn,
138 int offset)
138{ 139{
139 unsigned int caddr; 140 unsigned int caddr;
141 struct pci_controller *hose;
140 142
141 if (bus == hose->first_busno) { 143 hose = pci_bus_to_host(bus);
144 if (hose == NULL)
145 return NULL;
146
147 if (bus->number == hose->first_busno) {
142 if (dev_fn < (11 << 3)) 148 if (dev_fn < (11 << 3))
143 return NULL; 149 return NULL;
144 caddr = MACRISC_CFA0(dev_fn, offset); 150 caddr = MACRISC_CFA0(dev_fn, offset);
145 } else 151 } else
146 caddr = MACRISC_CFA1(bus, dev_fn, offset); 152 caddr = MACRISC_CFA1(bus->number, dev_fn, offset);
147 153
148 /* Uninorth will return garbage if we don't read back the value ! */ 154 /* Uninorth will return garbage if we don't read back the value ! */
149 do { 155 do {
@@ -154,129 +160,46 @@ static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose,
154 return hose->cfg_data + offset; 160 return hose->cfg_data + offset;
155} 161}
156 162
157static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
158 int offset, int len, u32 *val)
159{
160 struct pci_controller *hose;
161 volatile void __iomem *addr;
162
163 hose = pci_bus_to_host(bus);
164 if (hose == NULL)
165 return PCIBIOS_DEVICE_NOT_FOUND;
166 if (offset >= 0x100)
167 return PCIBIOS_BAD_REGISTER_NUMBER;
168 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
169 if (!addr)
170 return PCIBIOS_DEVICE_NOT_FOUND;
171 /*
172 * Note: the caller has already checked that offset is
173 * suitably aligned and that len is 1, 2 or 4.
174 */
175 switch (len) {
176 case 1:
177 *val = in_8(addr);
178 break;
179 case 2:
180 *val = in_le16(addr);
181 break;
182 default:
183 *val = in_le32(addr);
184 break;
185 }
186 return PCIBIOS_SUCCESSFUL;
187}
188
189static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
190 int offset, int len, u32 val)
191{
192 struct pci_controller *hose;
193 volatile void __iomem *addr;
194
195 hose = pci_bus_to_host(bus);
196 if (hose == NULL)
197 return PCIBIOS_DEVICE_NOT_FOUND;
198 if (offset >= 0x100)
199 return PCIBIOS_BAD_REGISTER_NUMBER;
200 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
201 if (!addr)
202 return PCIBIOS_DEVICE_NOT_FOUND;
203 /*
204 * Note: the caller has already checked that offset is
205 * suitably aligned and that len is 1, 2 or 4.
206 */
207 switch (len) {
208 case 1:
209 out_8(addr, val);
210 break;
211 case 2:
212 out_le16(addr, val);
213 break;
214 default:
215 out_le32(addr, val);
216 break;
217 }
218 return PCIBIOS_SUCCESSFUL;
219}
220
221static struct pci_ops macrisc_pci_ops = 163static struct pci_ops macrisc_pci_ops =
222{ 164{
223 .read = macrisc_read_config, 165 .map_bus = macrisc_cfg_map_bus,
224 .write = macrisc_write_config, 166 .read = pci_generic_config_read,
167 .write = pci_generic_config_write,
225}; 168};
226 169
227#ifdef CONFIG_PPC32 170#ifdef CONFIG_PPC32
228/* 171/*
229 * Verify that a specific (bus, dev_fn) exists on chaos 172 * Verify that a specific (bus, dev_fn) exists on chaos
230 */ 173 */
231static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) 174static void __iomem *chaos_map_bus(struct pci_bus *bus, unsigned int devfn,
175 int offset)
232{ 176{
233 struct device_node *np; 177 struct device_node *np;
234 const u32 *vendor, *device; 178 const u32 *vendor, *device;
235 179
236 if (offset >= 0x100) 180 if (offset >= 0x100)
237 return PCIBIOS_BAD_REGISTER_NUMBER; 181 return NULL;
238 np = of_pci_find_child_device(bus->dev.of_node, devfn); 182 np = of_pci_find_child_device(bus->dev.of_node, devfn);
239 if (np == NULL) 183 if (np == NULL)
240 return PCIBIOS_DEVICE_NOT_FOUND; 184 return NULL;
241 185
242 vendor = of_get_property(np, "vendor-id", NULL); 186 vendor = of_get_property(np, "vendor-id", NULL);
243 device = of_get_property(np, "device-id", NULL); 187 device = of_get_property(np, "device-id", NULL);
244 if (vendor == NULL || device == NULL) 188 if (vendor == NULL || device == NULL)
245 return PCIBIOS_DEVICE_NOT_FOUND; 189 return NULL;
246 190
247 if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10) 191 if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
248 && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24)) 192 && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
249 return PCIBIOS_BAD_REGISTER_NUMBER; 193 return NULL;
250
251 return PCIBIOS_SUCCESSFUL;
252}
253 194
254static int 195 return macrisc_cfg_map_bus(bus, devfn, offset);
255chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
256 int len, u32 *val)
257{
258 int result = chaos_validate_dev(bus, devfn, offset);
259 if (result == PCIBIOS_BAD_REGISTER_NUMBER)
260 *val = ~0U;
261 if (result != PCIBIOS_SUCCESSFUL)
262 return result;
263 return macrisc_read_config(bus, devfn, offset, len, val);
264}
265
266static int
267chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
268 int len, u32 val)
269{
270 int result = chaos_validate_dev(bus, devfn, offset);
271 if (result != PCIBIOS_SUCCESSFUL)
272 return result;
273 return macrisc_write_config(bus, devfn, offset, len, val);
274} 196}
275 197
276static struct pci_ops chaos_pci_ops = 198static struct pci_ops chaos_pci_ops =
277{ 199{
278 .read = chaos_read_config, 200 .map_bus = chaos_map_bus,
279 .write = chaos_write_config, 201 .read = pci_generic_config_read,
202 .write = pci_generic_config_write,
280}; 203};
281 204
282static void __init setup_chaos(struct pci_controller *hose, 205static void __init setup_chaos(struct pci_controller *hose,
@@ -471,15 +394,24 @@ static struct pci_ops u3_ht_pci_ops =
471 |(((unsigned int)(off)) & 0xfcU) \ 394 |(((unsigned int)(off)) & 0xfcU) \
472 |1UL) 395 |1UL)
473 396
474static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose, 397static void __iomem *u4_pcie_cfg_map_bus(struct pci_bus *bus,
475 u8 bus, u8 dev_fn, int offset) 398 unsigned int dev_fn,
399 int offset)
476{ 400{
401 struct pci_controller *hose;
477 unsigned int caddr; 402 unsigned int caddr;
478 403
479 if (bus == hose->first_busno) { 404 if (offset >= 0x1000)
405 return NULL;
406
407 hose = pci_bus_to_host(bus);
408 if (!hose)
409 return NULL;
410
411 if (bus->number == hose->first_busno) {
480 caddr = U4_PCIE_CFA0(dev_fn, offset); 412 caddr = U4_PCIE_CFA0(dev_fn, offset);
481 } else 413 } else
482 caddr = U4_PCIE_CFA1(bus, dev_fn, offset); 414 caddr = U4_PCIE_CFA1(bus->number, dev_fn, offset);
483 415
484 /* Uninorth will return garbage if we don't read back the value ! */ 416 /* Uninorth will return garbage if we don't read back the value ! */
485 do { 417 do {
@@ -490,74 +422,11 @@ static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
490 return hose->cfg_data + offset; 422 return hose->cfg_data + offset;
491} 423}
492 424
493static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
494 int offset, int len, u32 *val)
495{
496 struct pci_controller *hose;
497 volatile void __iomem *addr;
498
499 hose = pci_bus_to_host(bus);
500 if (hose == NULL)
501 return PCIBIOS_DEVICE_NOT_FOUND;
502 if (offset >= 0x1000)
503 return PCIBIOS_BAD_REGISTER_NUMBER;
504 addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
505 if (!addr)
506 return PCIBIOS_DEVICE_NOT_FOUND;
507 /*
508 * Note: the caller has already checked that offset is
509 * suitably aligned and that len is 1, 2 or 4.
510 */
511 switch (len) {
512 case 1:
513 *val = in_8(addr);
514 break;
515 case 2:
516 *val = in_le16(addr);
517 break;
518 default:
519 *val = in_le32(addr);
520 break;
521 }
522 return PCIBIOS_SUCCESSFUL;
523}
524
525static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
526 int offset, int len, u32 val)
527{
528 struct pci_controller *hose;
529 volatile void __iomem *addr;
530
531 hose = pci_bus_to_host(bus);
532 if (hose == NULL)
533 return PCIBIOS_DEVICE_NOT_FOUND;
534 if (offset >= 0x1000)
535 return PCIBIOS_BAD_REGISTER_NUMBER;
536 addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
537 if (!addr)
538 return PCIBIOS_DEVICE_NOT_FOUND;
539 /*
540 * Note: the caller has already checked that offset is
541 * suitably aligned and that len is 1, 2 or 4.
542 */
543 switch (len) {
544 case 1:
545 out_8(addr, val);
546 break;
547 case 2:
548 out_le16(addr, val);
549 break;
550 default:
551 out_le32(addr, val);
552 break;
553 }
554 return PCIBIOS_SUCCESSFUL;
555}
556
557static struct pci_ops u4_pcie_pci_ops = 425static struct pci_ops u4_pcie_pci_ops =
558{ 426{
559 .read = u4_pcie_read_config, 427 .map_bus = u4_pcie_cfg_map_bus,
560 .write = u4_pcie_write_config, 428 .read = pci_generic_config_read,
429 .write = pci_generic_config_write,
561}; 430};
562 431
563static void pmac_pci_fixup_u4_of_node(struct pci_dev *dev) 432static void pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 6455c1eada1a..271b67e7670c 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -645,61 +645,21 @@ mapped:
645 return pcie->cfg_type1 + offset; 645 return pcie->cfg_type1 + offset;
646} 646}
647 647
648static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
649 int offset, int len, u32 *val)
650{
651 void __iomem *cfg_addr;
652
653 cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset);
654 if (!cfg_addr)
655 return PCIBIOS_DEVICE_NOT_FOUND;
656
657 switch (len) {
658 case 1:
659 *val = in_8(cfg_addr);
660 break;
661 case 2:
662 *val = in_le16(cfg_addr);
663 break;
664 default:
665 *val = in_le32(cfg_addr);
666 break;
667 }
668
669 return PCIBIOS_SUCCESSFUL;
670}
671
672static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, 648static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
673 int offset, int len, u32 val) 649 int offset, int len, u32 val)
674{ 650{
675 struct pci_controller *hose = pci_bus_to_host(bus); 651 struct pci_controller *hose = pci_bus_to_host(bus);
676 void __iomem *cfg_addr;
677
678 cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset);
679 if (!cfg_addr)
680 return PCIBIOS_DEVICE_NOT_FOUND;
681 652
682 /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */ 653 /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
683 if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno) 654 if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
684 val &= 0xffffff00; 655 val &= 0xffffff00;
685 656
686 switch (len) { 657 return pci_generic_config_write(bus, devfn, offset, len, val);
687 case 1:
688 out_8(cfg_addr, val);
689 break;
690 case 2:
691 out_le16(cfg_addr, val);
692 break;
693 default:
694 out_le32(cfg_addr, val);
695 break;
696 }
697
698 return PCIBIOS_SUCCESSFUL;
699} 658}
700 659
701static struct pci_ops mpc83xx_pcie_ops = { 660static struct pci_ops mpc83xx_pcie_ops = {
702 .read = mpc83xx_pcie_read_config, 661 .map_bus = mpc83xx_pcie_remap_cfg,
662 .read = pci_generic_config_read,
703 .write = mpc83xx_pcie_write_config, 663 .write = mpc83xx_pcie_write_config,
704}; 664};
705 665
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 646db9c467d1..5fce52cf0e57 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -28,6 +28,7 @@ config KVM
28 select HAVE_KVM_IRQCHIP 28 select HAVE_KVM_IRQCHIP
29 select HAVE_KVM_IRQFD 29 select HAVE_KVM_IRQFD
30 select HAVE_KVM_IRQ_ROUTING 30 select HAVE_KVM_IRQ_ROUTING
31 select SRCU
31 ---help--- 32 ---help---
32 Support hosting paravirtualized guest machines using the SIE 33 Support hosting paravirtualized guest machines using the SIE
33 virtualization capability on the mainframe. This should work 34 virtualization capability on the mainframe. This should work
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index f70c7892fa25..325df47f114d 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -245,7 +245,7 @@ static void fixup_read_and_payload_sizes(void)
245{ 245{
246 struct pci_dev *dev = NULL; 246 struct pci_dev *dev = NULL;
247 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */ 247 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
248 int max_read_size = 0x2; /* Limit to 512 byte reads. */ 248 int max_read_size = PCI_EXP_DEVCTL_READRQ_512B;
249 u16 new_values; 249 u16 new_values;
250 250
251 /* Scan for the smallest maximum payload size. */ 251 /* Scan for the smallest maximum payload size. */
@@ -258,7 +258,7 @@ static void fixup_read_and_payload_sizes(void)
258 } 258 }
259 259
260 /* Now, set the max_payload_size for all devices to that value. */ 260 /* Now, set the max_payload_size for all devices to that value. */
261 new_values = (max_read_size << 12) | (smallest_max_payload << 5); 261 new_values = max_read_size | (smallest_max_payload << 5);
262 for_each_pci_dev(dev) 262 for_each_pci_dev(dev)
263 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 263 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
264 PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ, 264 PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
index 2298cb1daff7..1e968f7550dc 100644
--- a/arch/tile/kvm/Kconfig
+++ b/arch/tile/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
21 depends on HAVE_KVM && MODULES 21 depends on HAVE_KVM && MODULES
22 select PREEMPT_NOTIFIERS 22 select PREEMPT_NOTIFIERS
23 select ANON_INODES 23 select ANON_INODES
24 select SRCU
24 ---help--- 25 ---help---
25 Support hosting paravirtualized guest machines. 26 Support hosting paravirtualized guest machines.
26 27
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0dc9d0144a27..019f4e5c2b75 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -138,6 +138,7 @@ config X86
138 select HAVE_ACPI_APEI_NMI if ACPI 138 select HAVE_ACPI_APEI_NMI if ACPI
139 select ACPI_LEGACY_TABLES_LOOKUP if ACPI 139 select ACPI_LEGACY_TABLES_LOOKUP if ACPI
140 select X86_FEATURE_NAMES if PROC_FS 140 select X86_FEATURE_NAMES if PROC_FS
141 select SRCU
141 142
142config INSTRUCTION_DECODER 143config INSTRUCTION_DECODER
143 def_bool y 144 def_bool y
@@ -496,6 +497,17 @@ config X86_INTEL_LPSS
496 things like clock tree (common clock framework) and pincontrol 497 things like clock tree (common clock framework) and pincontrol
497 which are needed by the LPSS peripheral drivers. 498 which are needed by the LPSS peripheral drivers.
498 499
500config X86_AMD_PLATFORM_DEVICE
501 bool "AMD ACPI2Platform devices support"
502 depends on ACPI
503 select COMMON_CLK
504 select PINCTRL
505 ---help---
506 Select to interpret AMD specific ACPI device to platform device
507 such as I2C, UART, GPIO found on AMD Carrizo and later chipsets.
508 I2C and UART depend on COMMON_CLK to set clock. GPIO driver is
509 implemented under PINCTRL subsystem.
510
499config IOSF_MBI 511config IOSF_MBI
500 tristate "Intel SoC IOSF Sideband support for SoC platforms" 512 tristate "Intel SoC IOSF Sideband support for SoC platforms"
501 depends on PCI 513 depends on PCI
@@ -855,6 +867,10 @@ config SCHED_MC
855 867
856source "kernel/Kconfig.preempt" 868source "kernel/Kconfig.preempt"
857 869
870config UP_LATE_INIT
871 def_bool y
872 depends on !SMP && X86_LOCAL_APIC
873
858config X86_UP_APIC 874config X86_UP_APIC
859 bool "Local APIC support on uniprocessors" 875 bool "Local APIC support on uniprocessors"
860 depends on X86_32 && !SMP && !X86_32_NON_STANDARD 876 depends on X86_32 && !SMP && !X86_32_NON_STANDARD
diff --git a/arch/x86/boot/ctype.h b/arch/x86/boot/ctype.h
index 25e13403193c..020f137df7a2 100644
--- a/arch/x86/boot/ctype.h
+++ b/arch/x86/boot/ctype.h
@@ -1,6 +1,5 @@
1#ifndef BOOT_ISDIGIT_H 1#ifndef BOOT_CTYPE_H
2 2#define BOOT_CTYPE_H
3#define BOOT_ISDIGIT_H
4 3
5static inline int isdigit(int ch) 4static inline int isdigit(int ch)
6{ 5{
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c
index 5df2869c874b..45a07684bbab 100644
--- a/arch/x86/boot/early_serial_console.c
+++ b/arch/x86/boot/early_serial_console.c
@@ -2,8 +2,6 @@
2 2
3#define DEFAULT_SERIAL_PORT 0x3f8 /* ttyS0 */ 3#define DEFAULT_SERIAL_PORT 0x3f8 /* ttyS0 */
4 4
5#define XMTRDY 0x20
6
7#define DLAB 0x80 5#define DLAB 0x80
8 6
9#define TXR 0 /* Transmit register (WRITE) */ 7#define TXR 0 /* Transmit register (WRITE) */
@@ -74,8 +72,8 @@ static void parse_earlyprintk(void)
74 static const int bases[] = { 0x3f8, 0x2f8 }; 72 static const int bases[] = { 0x3f8, 0x2f8 };
75 int idx = 0; 73 int idx = 0;
76 74
77 if (!strncmp(arg + pos, "ttyS", 4)) 75 /* += strlen("ttyS"); */
78 pos += 4; 76 pos += 4;
79 77
80 if (arg[pos++] == '1') 78 if (arg[pos++] == '1')
81 idx = 1; 79 idx = 1;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 82e8a1d44658..156ebcab4ada 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -179,8 +179,8 @@ sysenter_dispatch:
179sysexit_from_sys_call: 179sysexit_from_sys_call:
180 andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) 180 andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
181 /* clear IF, that popfq doesn't enable interrupts early */ 181 /* clear IF, that popfq doesn't enable interrupts early */
182 andl $~0x200,EFLAGS-R11(%rsp) 182 andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
183 movl RIP-R11(%rsp),%edx /* User %eip */ 183 movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
184 CFI_REGISTER rip,rdx 184 CFI_REGISTER rip,rdx
185 RESTORE_ARGS 0,24,0,0,0,0 185 RESTORE_ARGS 0,24,0,0,0,0
186 xorq %r8,%r8 186 xorq %r8,%r8
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 465b309af254..92003f3c8a42 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -106,7 +106,14 @@ extern u32 native_safe_apic_wait_icr_idle(void);
106extern void native_apic_icr_write(u32 low, u32 id); 106extern void native_apic_icr_write(u32 low, u32 id);
107extern u64 native_apic_icr_read(void); 107extern u64 native_apic_icr_read(void);
108 108
109extern int x2apic_mode; 109static inline bool apic_is_x2apic_enabled(void)
110{
111 u64 msr;
112
113 if (rdmsrl_safe(MSR_IA32_APICBASE, &msr))
114 return false;
115 return msr & X2APIC_ENABLE;
116}
110 117
111#ifdef CONFIG_X86_X2APIC 118#ifdef CONFIG_X86_X2APIC
112/* 119/*
@@ -169,48 +176,23 @@ static inline u64 native_x2apic_icr_read(void)
169 return val; 176 return val;
170} 177}
171 178
179extern int x2apic_mode;
172extern int x2apic_phys; 180extern int x2apic_phys;
173extern int x2apic_preenabled; 181extern void __init check_x2apic(void);
174extern void check_x2apic(void); 182extern void x2apic_setup(void);
175extern void enable_x2apic(void);
176static inline int x2apic_enabled(void) 183static inline int x2apic_enabled(void)
177{ 184{
178 u64 msr; 185 return cpu_has_x2apic && apic_is_x2apic_enabled();
179
180 if (!cpu_has_x2apic)
181 return 0;
182
183 rdmsrl(MSR_IA32_APICBASE, msr);
184 if (msr & X2APIC_ENABLE)
185 return 1;
186 return 0;
187} 186}
188 187
189#define x2apic_supported() (cpu_has_x2apic) 188#define x2apic_supported() (cpu_has_x2apic)
190static inline void x2apic_force_phys(void)
191{
192 x2apic_phys = 1;
193}
194#else 189#else
195static inline void disable_x2apic(void) 190static inline void check_x2apic(void) { }
196{ 191static inline void x2apic_setup(void) { }
197} 192static inline int x2apic_enabled(void) { return 0; }
198static inline void check_x2apic(void)
199{
200}
201static inline void enable_x2apic(void)
202{
203}
204static inline int x2apic_enabled(void)
205{
206 return 0;
207}
208static inline void x2apic_force_phys(void)
209{
210}
211 193
212#define x2apic_preenabled 0 194#define x2apic_mode (0)
213#define x2apic_supported() 0 195#define x2apic_supported() (0)
214#endif 196#endif
215 197
216extern void enable_IR_x2apic(void); 198extern void enable_IR_x2apic(void);
@@ -219,7 +201,6 @@ extern int get_physical_broadcast(void);
219 201
220extern int lapic_get_maxlvt(void); 202extern int lapic_get_maxlvt(void);
221extern void clear_local_APIC(void); 203extern void clear_local_APIC(void);
222extern void connect_bsp_APIC(void);
223extern void disconnect_bsp_APIC(int virt_wire_setup); 204extern void disconnect_bsp_APIC(int virt_wire_setup);
224extern void disable_local_APIC(void); 205extern void disable_local_APIC(void);
225extern void lapic_shutdown(void); 206extern void lapic_shutdown(void);
@@ -227,8 +208,6 @@ extern int verify_local_APIC(void);
227extern void sync_Arb_IDs(void); 208extern void sync_Arb_IDs(void);
228extern void init_bsp_APIC(void); 209extern void init_bsp_APIC(void);
229extern void setup_local_APIC(void); 210extern void setup_local_APIC(void);
230extern void end_local_APIC_setup(void);
231extern void bsp_end_local_APIC_setup(void);
232extern void init_apic_mappings(void); 211extern void init_apic_mappings(void);
233void register_lapic_address(unsigned long address); 212void register_lapic_address(unsigned long address);
234extern void setup_boot_APIC_clock(void); 213extern void setup_boot_APIC_clock(void);
@@ -236,6 +215,9 @@ extern void setup_secondary_APIC_clock(void);
236extern int APIC_init_uniprocessor(void); 215extern int APIC_init_uniprocessor(void);
237extern int apic_force_enable(unsigned long addr); 216extern int apic_force_enable(unsigned long addr);
238 217
218extern int apic_bsp_setup(bool upmode);
219extern void apic_ap_setup(void);
220
239/* 221/*
240 * On 32bit this is mach-xxx local 222 * On 32bit this is mach-xxx local
241 */ 223 */
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 76659b67fd11..1f1297b46f83 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -83,7 +83,6 @@ For 32-bit we have the following conventions - kernel is built with
83#define SS 160 83#define SS 160
84 84
85#define ARGOFFSET R11 85#define ARGOFFSET R11
86#define SWFRAME ORIG_RAX
87 86
88 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0 87 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
89 subq $9*8+\addskip, %rsp 88 subq $9*8+\addskip, %rsp
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index aede2c347bde..90a54851aedc 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -174,6 +174,7 @@
174#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ 174#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
175#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ 175#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
176#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ 176#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
177#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
177#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ 178#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
178 179
179/* 180/*
@@ -388,6 +389,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
388#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 389#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
389#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 390#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
390#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) 391#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
392#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
391 393
392#if __GNUC__ >= 4 394#if __GNUC__ >= 4
393extern void warn_pre_alternatives(void); 395extern void warn_pre_alternatives(void);
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 61fd18b83b6c..12cb66f6d3a5 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -114,5 +114,10 @@ static inline void debug_stack_usage_inc(void) { }
114static inline void debug_stack_usage_dec(void) { } 114static inline void debug_stack_usage_dec(void) { }
115#endif /* X86_64 */ 115#endif /* X86_64 */
116 116
117#ifdef CONFIG_CPU_SUP_AMD
118extern void set_dr_addr_mask(unsigned long mask, int dr);
119#else
120static inline void set_dr_addr_mask(unsigned long mask, int dr) { }
121#endif
117 122
118#endif /* _ASM_X86_DEBUGREG_H */ 123#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index e97622f57722..0dbc08282291 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -207,7 +207,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
207 if (config_enabled(CONFIG_X86_32)) 207 if (config_enabled(CONFIG_X86_32))
208 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); 208 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
209 else if (config_enabled(CONFIG_AS_FXSAVEQ)) 209 else if (config_enabled(CONFIG_AS_FXSAVEQ))
210 asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); 210 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
211 else { 211 else {
212 /* Using "rex64; fxsave %0" is broken because, if the memory 212 /* Using "rex64; fxsave %0" is broken because, if the memory
213 * operand uses any extended registers for addressing, a second 213 * operand uses any extended registers for addressing, a second
@@ -290,9 +290,11 @@ static inline int fpu_restore_checking(struct fpu *fpu)
290 290
291static inline int restore_fpu_checking(struct task_struct *tsk) 291static inline int restore_fpu_checking(struct task_struct *tsk)
292{ 292{
293 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 293 /*
294 is pending. Clear the x87 state here by setting it to fixed 294 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
295 values. "m" is a random variable that should be in L1 */ 295 * pending. Clear the x87 state here by setting it to fixed values.
296 * "m" is a random variable that should be in L1.
297 */
296 if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { 298 if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
297 asm volatile( 299 asm volatile(
298 "fnclex\n\t" 300 "fnclex\n\t"
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index ef1c4d2d41ec..6c98be864a75 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -12,6 +12,7 @@
12 */ 12 */
13struct arch_hw_breakpoint { 13struct arch_hw_breakpoint {
14 unsigned long address; 14 unsigned long address;
15 unsigned long mask;
15 u8 len; 16 u8 len;
16 u8 type; 17 u8 type;
17}; 18};
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index ed8089d69094..6eb6fcb83f63 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void);
40 40
41static inline void kernel_fpu_begin(void) 41static inline void kernel_fpu_begin(void)
42{ 42{
43 WARN_ON_ONCE(!irq_fpu_usable());
44 preempt_disable(); 43 preempt_disable();
44 WARN_ON_ONCE(!irq_fpu_usable());
45 __kernel_fpu_begin(); 45 __kernel_fpu_begin();
46} 46}
47 47
@@ -51,6 +51,10 @@ static inline void kernel_fpu_end(void)
51 preempt_enable(); 51 preempt_enable();
52} 52}
53 53
54/* Must be called with preempt disabled */
55extern void kernel_fpu_disable(void);
56extern void kernel_fpu_enable(void);
57
54/* 58/*
55 * Some instructions like VIA's padlock instructions generate a spurious 59 * Some instructions like VIA's padlock instructions generate a spurious
56 * DNA fault but don't modify SSE registers. And these instructions 60 * DNA fault but don't modify SSE registers. And these instructions
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index bf006cce9418..2f91685fe1cd 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -279,6 +279,11 @@ static inline void disable_ioapic_support(void) { }
279#define native_ioapic_set_affinity NULL 279#define native_ioapic_set_affinity NULL
280#define native_setup_ioapic_entry NULL 280#define native_setup_ioapic_entry NULL
281#define native_eoi_ioapic_pin NULL 281#define native_eoi_ioapic_pin NULL
282
283static inline void setup_IO_APIC(void) { }
284static inline void enable_IO_APIC(void) { }
285static inline void setup_ioapic_dest(void) { }
286
282#endif 287#endif
283 288
284#endif /* _ASM_X86_IO_APIC_H */ 289#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index b7747c4c2cf2..6224d316c405 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -33,8 +33,6 @@ struct irq_cfg;
33 33
34#ifdef CONFIG_IRQ_REMAP 34#ifdef CONFIG_IRQ_REMAP
35 35
36extern void setup_irq_remapping_ops(void);
37extern int irq_remapping_supported(void);
38extern void set_irq_remapping_broken(void); 36extern void set_irq_remapping_broken(void);
39extern int irq_remapping_prepare(void); 37extern int irq_remapping_prepare(void);
40extern int irq_remapping_enable(void); 38extern int irq_remapping_enable(void);
@@ -60,8 +58,6 @@ void irq_remap_modify_chip_defaults(struct irq_chip *chip);
60 58
61#else /* CONFIG_IRQ_REMAP */ 59#else /* CONFIG_IRQ_REMAP */
62 60
63static inline void setup_irq_remapping_ops(void) { }
64static inline int irq_remapping_supported(void) { return 0; }
65static inline void set_irq_remapping_broken(void) { } 61static inline void set_irq_remapping_broken(void) { }
66static inline int irq_remapping_prepare(void) { return -ENODEV; } 62static inline int irq_remapping_prepare(void) { return -ENODEV; }
67static inline int irq_remapping_enable(void) { return -ENODEV; } 63static inline int irq_remapping_enable(void) { return -ENODEV; }
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 51b26e895933..9b3de99dc004 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -190,7 +190,6 @@ enum mcp_flags {
190void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); 190void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
191 191
192int mce_notify_irq(void); 192int mce_notify_irq(void);
193void mce_notify_process(void);
194 193
195DECLARE_PER_CPU(struct mce, injectm); 194DECLARE_PER_CPU(struct mce, injectm);
196 195
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 164e3f8d3c3d..fa1195dae425 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,8 +93,6 @@ extern raw_spinlock_t pci_config_lock;
93extern int (*pcibios_enable_irq)(struct pci_dev *dev); 93extern int (*pcibios_enable_irq)(struct pci_dev *dev);
94extern void (*pcibios_disable_irq)(struct pci_dev *dev); 94extern void (*pcibios_disable_irq)(struct pci_dev *dev);
95 95
96extern bool mp_should_keep_irq(struct device *dev);
97
98struct pci_raw_ops { 96struct pci_raw_ops {
99 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, 97 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
100 int reg, int len, u32 *val); 98 int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/pmc_atom.h b/arch/x86/include/asm/pmc_atom.h
index fc7a17c05d35..bc0fc0866553 100644
--- a/arch/x86/include/asm/pmc_atom.h
+++ b/arch/x86/include/asm/pmc_atom.h
@@ -53,6 +53,28 @@
53/* Sleep state counter is in units of of 32us */ 53/* Sleep state counter is in units of of 32us */
54#define PMC_TMR_SHIFT 5 54#define PMC_TMR_SHIFT 5
55 55
56/* Power status of power islands */
57#define PMC_PSS 0x98
58
59#define PMC_PSS_BIT_GBE BIT(0)
60#define PMC_PSS_BIT_SATA BIT(1)
61#define PMC_PSS_BIT_HDA BIT(2)
62#define PMC_PSS_BIT_SEC BIT(3)
63#define PMC_PSS_BIT_PCIE BIT(4)
64#define PMC_PSS_BIT_LPSS BIT(5)
65#define PMC_PSS_BIT_LPE BIT(6)
66#define PMC_PSS_BIT_DFX BIT(7)
67#define PMC_PSS_BIT_USH_CTRL BIT(8)
68#define PMC_PSS_BIT_USH_SUS BIT(9)
69#define PMC_PSS_BIT_USH_VCCS BIT(10)
70#define PMC_PSS_BIT_USH_VCCA BIT(11)
71#define PMC_PSS_BIT_OTG_CTRL BIT(12)
72#define PMC_PSS_BIT_OTG_VCCS BIT(13)
73#define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14)
74#define PMC_PSS_BIT_OTG_VCCA BIT(15)
75#define PMC_PSS_BIT_USB BIT(16)
76#define PMC_PSS_BIT_USB_SUS BIT(17)
77
56/* These registers reflect D3 status of functions */ 78/* These registers reflect D3 status of functions */
57#define PMC_D3_STS_0 0xA0 79#define PMC_D3_STS_0 0xA0
58 80
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
deleted file mode 100644
index 0da7409f0bec..000000000000
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
2 * which needs to alter them. */
3
4static inline void smpboot_clear_io_apic_irqs(void)
5{
6#ifdef CONFIG_X86_IO_APIC
7 io_apic_irqs = 0;
8#endif
9}
10
11static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
12{
13 unsigned long flags;
14
15 spin_lock_irqsave(&rtc_lock, flags);
16 CMOS_WRITE(0xa, 0xf);
17 spin_unlock_irqrestore(&rtc_lock, flags);
18 local_flush_tlb();
19 pr_debug("1.\n");
20 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
21 start_eip >> 4;
22 pr_debug("2.\n");
23 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
24 start_eip & 0xf;
25 pr_debug("3.\n");
26}
27
28static inline void smpboot_restore_warm_reset_vector(void)
29{
30 unsigned long flags;
31
32 /*
33 * Install writable page 0 entry to set BIOS data area.
34 */
35 local_flush_tlb();
36
37 /*
38 * Paranoid: Set warm reset code and vector here back
39 * to default values.
40 */
41 spin_lock_irqsave(&rtc_lock, flags);
42 CMOS_WRITE(0, 0xf);
43 spin_unlock_irqrestore(&rtc_lock, flags);
44
45 *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
46}
47
48static inline void __init smpboot_setup_io_apic(void)
49{
50#ifdef CONFIG_X86_IO_APIC
51 /*
52 * Here we can be sure that there is an IO-APIC in the system. Let's
53 * go and set it up:
54 */
55 if (!skip_ioapic_setup && nr_ioapics)
56 setup_IO_APIC();
57 else {
58 nr_ioapics = 0;
59 }
60#endif
61}
62
63static inline void smpboot_clear_io_apic(void)
64{
65#ifdef CONFIG_X86_IO_APIC
66 nr_ioapics = 0;
67#endif
68}
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 547e344a6dc6..e82e95abc92b 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -75,7 +75,6 @@ struct thread_info {
75#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 75#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
76#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 76#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
77#define TIF_SECCOMP 8 /* secure computing */ 77#define TIF_SECCOMP 8 /* secure computing */
78#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
79#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ 78#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
80#define TIF_UPROBE 12 /* breakpointed or singlestepping */ 79#define TIF_UPROBE 12 /* breakpointed or singlestepping */
81#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 80#define TIF_NOTSC 16 /* TSC is not accessible in userland */
@@ -100,7 +99,6 @@ struct thread_info {
100#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 99#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
101#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 100#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
102#define _TIF_SECCOMP (1 << TIF_SECCOMP) 101#define _TIF_SECCOMP (1 << TIF_SECCOMP)
103#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
104#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) 102#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
105#define _TIF_UPROBE (1 << TIF_UPROBE) 103#define _TIF_UPROBE (1 << TIF_UPROBE)
106#define _TIF_NOTSC (1 << TIF_NOTSC) 104#define _TIF_NOTSC (1 << TIF_NOTSC)
@@ -140,7 +138,7 @@ struct thread_info {
140 138
141/* Only used for 64 bit */ 139/* Only used for 64 bit */
142#define _TIF_DO_NOTIFY_MASK \ 140#define _TIF_DO_NOTIFY_MASK \
143 (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ 141 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
144 _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE) 142 _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
145 143
146/* flags to check in __switch_to() */ 144/* flags to check in __switch_to() */
@@ -170,6 +168,17 @@ static inline struct thread_info *current_thread_info(void)
170 return ti; 168 return ti;
171} 169}
172 170
171static inline unsigned long current_stack_pointer(void)
172{
173 unsigned long sp;
174#ifdef CONFIG_X86_64
175 asm("mov %%rsp,%0" : "=g" (sp));
176#else
177 asm("mov %%esp,%0" : "=g" (sp));
178#endif
179 return sp;
180}
181
173#else /* !__ASSEMBLY__ */ 182#else /* !__ASSEMBLY__ */
174 183
175/* how to get the thread information struct from ASM */ 184/* how to get the thread information struct from ASM */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 707adc6549d8..4e49d7dff78e 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_X86_TRAPS_H 1#ifndef _ASM_X86_TRAPS_H
2#define _ASM_X86_TRAPS_H 2#define _ASM_X86_TRAPS_H
3 3
4#include <linux/context_tracking_state.h>
4#include <linux/kprobes.h> 5#include <linux/kprobes.h>
5 6
6#include <asm/debugreg.h> 7#include <asm/debugreg.h>
@@ -110,6 +111,11 @@ asmlinkage void smp_thermal_interrupt(void);
110asmlinkage void mce_threshold_interrupt(void); 111asmlinkage void mce_threshold_interrupt(void);
111#endif 112#endif
112 113
114extern enum ctx_state ist_enter(struct pt_regs *regs);
115extern void ist_exit(struct pt_regs *regs, enum ctx_state prev_state);
116extern void ist_begin_non_atomic(struct pt_regs *regs);
117extern void ist_end_non_atomic(void);
118
113/* Interrupts/Exceptions */ 119/* Interrupts/Exceptions */
114enum { 120enum {
115 X86_TRAP_DE = 0, /* 0, Divide-by-zero */ 121 X86_TRAP_DE = 0, /* 0, Divide-by-zero */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 5eea09915a15..358dcd338915 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -55,9 +55,8 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
55 struct gnttab_map_grant_ref *kmap_ops, 55 struct gnttab_map_grant_ref *kmap_ops,
56 struct page **pages, unsigned int count); 56 struct page **pages, unsigned int count);
57extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 57extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
58 struct gnttab_map_grant_ref *kmap_ops, 58 struct gnttab_unmap_grant_ref *kunmap_ops,
59 struct page **pages, unsigned int count); 59 struct page **pages, unsigned int count);
60extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
61 60
62/* 61/*
63 * Helper functions to write or read unsigned long values to/from 62 * Helper functions to write or read unsigned long values to/from
@@ -154,21 +153,12 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
154 return mfn; 153 return mfn;
155 154
156 pfn = mfn_to_pfn_no_overrides(mfn); 155 pfn = mfn_to_pfn_no_overrides(mfn);
157 if (__pfn_to_mfn(pfn) != mfn) { 156 if (__pfn_to_mfn(pfn) != mfn)
158 /* 157 pfn = ~0;
159 * If this appears to be a foreign mfn (because the pfn
160 * doesn't map back to the mfn), then check the local override
161 * table to see if there's a better pfn to use.
162 *
163 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
164 */
165 pfn = m2p_find_override_pfn(mfn, ~0);
166 }
167 158
168 /* 159 /*
169 * pfn is ~0 if there are no entries in the m2p for mfn or if the 160 * pfn is ~0 if there are no entries in the m2p for mfn or the
170 * entry doesn't map back to the mfn and m2p_override doesn't have a 161 * entry doesn't map back to the mfn.
171 * valid entry for it.
172 */ 162 */
173 if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn)) 163 if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
174 pfn = mfn; 164 pfn = mfn;
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index f721330541cb..536240fa9a95 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -255,6 +255,10 @@
255/* Fam 16h MSRs */ 255/* Fam 16h MSRs */
256#define MSR_F16H_L2I_PERF_CTL 0xc0010230 256#define MSR_F16H_L2I_PERF_CTL 0xc0010230
257#define MSR_F16H_L2I_PERF_CTR 0xc0010231 257#define MSR_F16H_L2I_PERF_CTR 0xc0010231
258#define MSR_F16H_DR1_ADDR_MASK 0xc0011019
259#define MSR_F16H_DR2_ADDR_MASK 0xc001101a
260#define MSR_F16H_DR3_ADDR_MASK 0xc001101b
261#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
258 262
259/* Fam 15h MSRs */ 263/* Fam 15h MSRs */
260#define MSR_F15H_PERF_CTL 0xc0010200 264#define MSR_F15H_PERF_CTL 0xc0010200
@@ -362,6 +366,7 @@
362 366
363#define MSR_IA32_PERF_STATUS 0x00000198 367#define MSR_IA32_PERF_STATUS 0x00000198
364#define MSR_IA32_PERF_CTL 0x00000199 368#define MSR_IA32_PERF_CTL 0x00000199
369#define INTEL_PERF_CTL_MASK 0xffff
365#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 370#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
366#define MSR_AMD_PERF_STATUS 0xc0010063 371#define MSR_AMD_PERF_STATUS 0xc0010063
367#define MSR_AMD_PERF_CTL 0xc0010062 372#define MSR_AMD_PERF_CTL 0xc0010062
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b9e30daa0881..ae97ed0873c6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -653,6 +653,7 @@ static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
653 return gsi; 653 return gsi;
654} 654}
655 655
656#ifdef CONFIG_X86_LOCAL_APIC
656static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi, 657static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
657 int trigger, int polarity) 658 int trigger, int polarity)
658{ 659{
@@ -675,6 +676,7 @@ static void acpi_unregister_gsi_ioapic(u32 gsi)
675 mutex_unlock(&acpi_ioapic_lock); 676 mutex_unlock(&acpi_ioapic_lock);
676#endif 677#endif
677} 678}
679#endif
678 680
679int (*__acpi_register_gsi)(struct device *dev, u32 gsi, 681int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
680 int trigger, int polarity) = acpi_register_gsi_pic; 682 int trigger, int polarity) = acpi_register_gsi_pic;
@@ -843,13 +845,7 @@ int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
843 845
844static int __init acpi_parse_sbf(struct acpi_table_header *table) 846static int __init acpi_parse_sbf(struct acpi_table_header *table)
845{ 847{
846 struct acpi_table_boot *sb; 848 struct acpi_table_boot *sb = (struct acpi_table_boot *)table;
847
848 sb = (struct acpi_table_boot *)table;
849 if (!sb) {
850 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
851 return -ENODEV;
852 }
853 849
854 sbf_port = sb->cmos_index; /* Save CMOS port */ 850 sbf_port = sb->cmos_index; /* Save CMOS port */
855 851
@@ -863,13 +859,7 @@ static struct resource *hpet_res __initdata;
863 859
864static int __init acpi_parse_hpet(struct acpi_table_header *table) 860static int __init acpi_parse_hpet(struct acpi_table_header *table)
865{ 861{
866 struct acpi_table_hpet *hpet_tbl; 862 struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
867
868 hpet_tbl = (struct acpi_table_hpet *)table;
869 if (!hpet_tbl) {
870 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
871 return -ENODEV;
872 }
873 863
874 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) { 864 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
875 printk(KERN_WARNING PREFIX "HPET timers must be located in " 865 printk(KERN_WARNING PREFIX "HPET timers must be located in "
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index b708738d016e..6a7c23ff21d3 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -135,14 +135,6 @@ static inline void apbt_clear_mapping(void)
135 apbt_virt_address = NULL; 135 apbt_virt_address = NULL;
136} 136}
137 137
138/*
139 * APBT timer interrupt enable / disable
140 */
141static inline int is_apbt_capable(void)
142{
143 return apbt_virt_address ? 1 : 0;
144}
145
146static int __init apbt_clockevent_register(void) 138static int __init apbt_clockevent_register(void)
147{ 139{
148 struct sfi_timer_table_entry *mtmr; 140 struct sfi_timer_table_entry *mtmr;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 29b5b18afa27..b665d241efad 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -134,9 +134,6 @@ static inline void imcr_apic_to_pic(void)
134 */ 134 */
135static int force_enable_local_apic __initdata; 135static int force_enable_local_apic __initdata;
136 136
137/* Control whether x2APIC mode is enabled or not */
138static bool nox2apic __initdata;
139
140/* 137/*
141 * APIC command line parameters 138 * APIC command line parameters
142 */ 139 */
@@ -161,33 +158,6 @@ static __init int setup_apicpmtimer(char *s)
161__setup("apicpmtimer", setup_apicpmtimer); 158__setup("apicpmtimer", setup_apicpmtimer);
162#endif 159#endif
163 160
164int x2apic_mode;
165#ifdef CONFIG_X86_X2APIC
166/* x2apic enabled before OS handover */
167int x2apic_preenabled;
168static int x2apic_disabled;
169static int __init setup_nox2apic(char *str)
170{
171 if (x2apic_enabled()) {
172 int apicid = native_apic_msr_read(APIC_ID);
173
174 if (apicid >= 255) {
175 pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
176 apicid);
177 return 0;
178 }
179
180 pr_warning("x2apic already enabled. will disable it\n");
181 } else
182 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
183
184 nox2apic = true;
185
186 return 0;
187}
188early_param("nox2apic", setup_nox2apic);
189#endif
190
191unsigned long mp_lapic_addr; 161unsigned long mp_lapic_addr;
192int disable_apic; 162int disable_apic;
193/* Disable local APIC timer from the kernel commandline or via dmi quirk */ 163/* Disable local APIC timer from the kernel commandline or via dmi quirk */
@@ -1475,7 +1445,7 @@ void setup_local_APIC(void)
1475#endif 1445#endif
1476} 1446}
1477 1447
1478void end_local_APIC_setup(void) 1448static void end_local_APIC_setup(void)
1479{ 1449{
1480 lapic_setup_esr(); 1450 lapic_setup_esr();
1481 1451
@@ -1492,116 +1462,184 @@ void end_local_APIC_setup(void)
1492 apic_pm_activate(); 1462 apic_pm_activate();
1493} 1463}
1494 1464
1495void __init bsp_end_local_APIC_setup(void) 1465/*
1466 * APIC setup function for application processors. Called from smpboot.c
1467 */
1468void apic_ap_setup(void)
1496{ 1469{
1470 setup_local_APIC();
1497 end_local_APIC_setup(); 1471 end_local_APIC_setup();
1498
1499 /*
1500 * Now that local APIC setup is completed for BP, configure the fault
1501 * handling for interrupt remapping.
1502 */
1503 irq_remap_enable_fault_handling();
1504
1505} 1472}
1506 1473
1507#ifdef CONFIG_X86_X2APIC 1474#ifdef CONFIG_X86_X2APIC
1508/* 1475int x2apic_mode;
1509 * Need to disable xapic and x2apic at the same time and then enable xapic mode
1510 */
1511static inline void __disable_x2apic(u64 msr)
1512{
1513 wrmsrl(MSR_IA32_APICBASE,
1514 msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1515 wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1516}
1517 1476
1518static __init void disable_x2apic(void) 1477enum {
1478 X2APIC_OFF,
1479 X2APIC_ON,
1480 X2APIC_DISABLED,
1481};
1482static int x2apic_state;
1483
1484static inline void __x2apic_disable(void)
1519{ 1485{
1520 u64 msr; 1486 u64 msr;
1521 1487
1522 if (!cpu_has_x2apic) 1488 if (cpu_has_apic)
1523 return; 1489 return;
1524 1490
1525 rdmsrl(MSR_IA32_APICBASE, msr); 1491 rdmsrl(MSR_IA32_APICBASE, msr);
1526 if (msr & X2APIC_ENABLE) { 1492 if (!(msr & X2APIC_ENABLE))
1527 u32 x2apic_id = read_apic_id(); 1493 return;
1528 1494 /* Disable xapic and x2apic first and then reenable xapic mode */
1529 if (x2apic_id >= 255) 1495 wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1530 panic("Cannot disable x2apic, id: %08x\n", x2apic_id); 1496 wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1497 printk_once(KERN_INFO "x2apic disabled\n");
1498}
1531 1499
1532 pr_info("Disabling x2apic\n"); 1500static inline void __x2apic_enable(void)
1533 __disable_x2apic(msr); 1501{
1502 u64 msr;
1534 1503
1535 if (nox2apic) { 1504 rdmsrl(MSR_IA32_APICBASE, msr);
1536 clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC); 1505 if (msr & X2APIC_ENABLE)
1537 setup_clear_cpu_cap(X86_FEATURE_X2APIC); 1506 return;
1538 } 1507 wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
1508 printk_once(KERN_INFO "x2apic enabled\n");
1509}
1539 1510
1540 x2apic_disabled = 1; 1511static int __init setup_nox2apic(char *str)
1541 x2apic_mode = 0; 1512{
1513 if (x2apic_enabled()) {
1514 int apicid = native_apic_msr_read(APIC_ID);
1542 1515
1543 register_lapic_address(mp_lapic_addr); 1516 if (apicid >= 255) {
1517 pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
1518 apicid);
1519 return 0;
1520 }
1521 pr_warning("x2apic already enabled.\n");
1522 __x2apic_disable();
1544 } 1523 }
1524 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
1525 x2apic_state = X2APIC_DISABLED;
1526 x2apic_mode = 0;
1527 return 0;
1545} 1528}
1529early_param("nox2apic", setup_nox2apic);
1546 1530
1547void check_x2apic(void) 1531/* Called from cpu_init() to enable x2apic on (secondary) cpus */
1532void x2apic_setup(void)
1548{ 1533{
1549 if (x2apic_enabled()) { 1534 /*
1550 pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); 1535 * If x2apic is not in ON state, disable it if already enabled
1551 x2apic_preenabled = x2apic_mode = 1; 1536 * from BIOS.
1537 */
1538 if (x2apic_state != X2APIC_ON) {
1539 __x2apic_disable();
1540 return;
1552 } 1541 }
1542 __x2apic_enable();
1553} 1543}
1554 1544
1555void enable_x2apic(void) 1545static __init void x2apic_disable(void)
1556{ 1546{
1557 u64 msr; 1547 u32 x2apic_id;
1558 1548
1559 rdmsrl(MSR_IA32_APICBASE, msr); 1549 if (x2apic_state != X2APIC_ON)
1560 if (x2apic_disabled) { 1550 goto out;
1561 __disable_x2apic(msr); 1551
1552 x2apic_id = read_apic_id();
1553 if (x2apic_id >= 255)
1554 panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
1555
1556 __x2apic_disable();
1557 register_lapic_address(mp_lapic_addr);
1558out:
1559 x2apic_state = X2APIC_DISABLED;
1560 x2apic_mode = 0;
1561}
1562
1563static __init void x2apic_enable(void)
1564{
1565 if (x2apic_state != X2APIC_OFF)
1562 return; 1566 return;
1563 }
1564 1567
1565 if (!x2apic_mode) 1568 x2apic_mode = 1;
1569 x2apic_state = X2APIC_ON;
1570 __x2apic_enable();
1571}
1572
1573static __init void try_to_enable_x2apic(int remap_mode)
1574{
1575 if (x2apic_state == X2APIC_DISABLED)
1566 return; 1576 return;
1567 1577
1568 if (!(msr & X2APIC_ENABLE)) { 1578 if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
1569 printk_once(KERN_INFO "Enabling x2apic\n"); 1579 /* IR is required if there is APIC ID > 255 even when running
1570 wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE); 1580 * under KVM
1581 */
1582 if (max_physical_apicid > 255 ||
1583 (IS_ENABLED(CONFIG_HYPERVISOR_GUEST) &&
1584 !hypervisor_x2apic_available())) {
1585 pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
1586 x2apic_disable();
1587 return;
1588 }
1589
1590 /*
1591 * without IR all CPUs can be addressed by IOAPIC/MSI
1592 * only in physical mode
1593 */
1594 x2apic_phys = 1;
1571 } 1595 }
1596 x2apic_enable();
1572} 1597}
1573#endif /* CONFIG_X86_X2APIC */
1574 1598
1575int __init enable_IR(void) 1599void __init check_x2apic(void)
1576{ 1600{
1577#ifdef CONFIG_IRQ_REMAP 1601 if (x2apic_enabled()) {
1578 if (!irq_remapping_supported()) { 1602 pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
1579 pr_debug("intr-remapping not supported\n"); 1603 x2apic_mode = 1;
1580 return -1; 1604 x2apic_state = X2APIC_ON;
1605 } else if (!cpu_has_x2apic) {
1606 x2apic_state = X2APIC_DISABLED;
1581 } 1607 }
1608}
1609#else /* CONFIG_X86_X2APIC */
1610static int __init validate_x2apic(void)
1611{
1612 if (!apic_is_x2apic_enabled())
1613 return 0;
1614 /*
1615 * Checkme: Can we simply turn off x2apic here instead of panic?
1616 */
1617 panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
1618}
1619early_initcall(validate_x2apic);
1582 1620
1583 if (!x2apic_preenabled && skip_ioapic_setup) { 1621static inline void try_to_enable_x2apic(int remap_mode) { }
1584 pr_info("Skipped enabling intr-remap because of skipping " 1622static inline void __x2apic_enable(void) { }
1585 "io-apic setup\n"); 1623#endif /* !CONFIG_X86_X2APIC */
1624
1625static int __init try_to_enable_IR(void)
1626{
1627#ifdef CONFIG_X86_IO_APIC
1628 if (!x2apic_enabled() && skip_ioapic_setup) {
1629 pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
1586 return -1; 1630 return -1;
1587 } 1631 }
1588
1589 return irq_remapping_enable();
1590#endif 1632#endif
1591 return -1; 1633 return irq_remapping_enable();
1592} 1634}
1593 1635
1594void __init enable_IR_x2apic(void) 1636void __init enable_IR_x2apic(void)
1595{ 1637{
1596 unsigned long flags; 1638 unsigned long flags;
1597 int ret, x2apic_enabled = 0; 1639 int ret, ir_stat;
1598 int hardware_init_ret;
1599
1600 /* Make sure irq_remap_ops are initialized */
1601 setup_irq_remapping_ops();
1602 1640
1603 hardware_init_ret = irq_remapping_prepare(); 1641 ir_stat = irq_remapping_prepare();
1604 if (hardware_init_ret && !x2apic_supported()) 1642 if (ir_stat < 0 && !x2apic_supported())
1605 return; 1643 return;
1606 1644
1607 ret = save_ioapic_entries(); 1645 ret = save_ioapic_entries();
@@ -1614,49 +1652,13 @@ void __init enable_IR_x2apic(void)
1614 legacy_pic->mask_all(); 1652 legacy_pic->mask_all();
1615 mask_ioapic_entries(); 1653 mask_ioapic_entries();
1616 1654
1617 if (x2apic_preenabled && nox2apic) 1655 /* If irq_remapping_prepare() succeded, try to enable it */
1618 disable_x2apic(); 1656 if (ir_stat >= 0)
1619 1657 ir_stat = try_to_enable_IR();
1620 if (hardware_init_ret) 1658 /* ir_stat contains the remap mode or an error code */
1621 ret = -1; 1659 try_to_enable_x2apic(ir_stat);
1622 else
1623 ret = enable_IR();
1624
1625 if (!x2apic_supported())
1626 goto skip_x2apic;
1627 1660
1628 if (ret < 0) { 1661 if (ir_stat < 0)
1629 /* IR is required if there is APIC ID > 255 even when running
1630 * under KVM
1631 */
1632 if (max_physical_apicid > 255 ||
1633 !hypervisor_x2apic_available()) {
1634 if (x2apic_preenabled)
1635 disable_x2apic();
1636 goto skip_x2apic;
1637 }
1638 /*
1639 * without IR all CPUs can be addressed by IOAPIC/MSI
1640 * only in physical mode
1641 */
1642 x2apic_force_phys();
1643 }
1644
1645 if (ret == IRQ_REMAP_XAPIC_MODE) {
1646 pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
1647 goto skip_x2apic;
1648 }
1649
1650 x2apic_enabled = 1;
1651
1652 if (x2apic_supported() && !x2apic_mode) {
1653 x2apic_mode = 1;
1654 enable_x2apic();
1655 pr_info("Enabled x2apic\n");
1656 }
1657
1658skip_x2apic:
1659 if (ret < 0) /* IR enabling failed */
1660 restore_ioapic_entries(); 1662 restore_ioapic_entries();
1661 legacy_pic->restore_mask(); 1663 legacy_pic->restore_mask();
1662 local_irq_restore(flags); 1664 local_irq_restore(flags);
@@ -1847,82 +1849,8 @@ void __init register_lapic_address(unsigned long address)
1847 } 1849 }
1848} 1850}
1849 1851
1850/*
1851 * This initializes the IO-APIC and APIC hardware if this is
1852 * a UP kernel.
1853 */
1854int apic_version[MAX_LOCAL_APIC]; 1852int apic_version[MAX_LOCAL_APIC];
1855 1853
1856int __init APIC_init_uniprocessor(void)
1857{
1858 if (disable_apic) {
1859 pr_info("Apic disabled\n");
1860 return -1;
1861 }
1862#ifdef CONFIG_X86_64
1863 if (!cpu_has_apic) {
1864 disable_apic = 1;
1865 pr_info("Apic disabled by BIOS\n");
1866 return -1;
1867 }
1868#else
1869 if (!smp_found_config && !cpu_has_apic)
1870 return -1;
1871
1872 /*
1873 * Complain if the BIOS pretends there is one.
1874 */
1875 if (!cpu_has_apic &&
1876 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1877 pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
1878 boot_cpu_physical_apicid);
1879 return -1;
1880 }
1881#endif
1882
1883 default_setup_apic_routing();
1884
1885 verify_local_APIC();
1886 connect_bsp_APIC();
1887
1888#ifdef CONFIG_X86_64
1889 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
1890#else
1891 /*
1892 * Hack: In case of kdump, after a crash, kernel might be booting
1893 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1894 * might be zero if read from MP tables. Get it from LAPIC.
1895 */
1896# ifdef CONFIG_CRASH_DUMP
1897 boot_cpu_physical_apicid = read_apic_id();
1898# endif
1899#endif
1900 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1901 setup_local_APIC();
1902
1903#ifdef CONFIG_X86_IO_APIC
1904 /*
1905 * Now enable IO-APICs, actually call clear_IO_APIC
1906 * We need clear_IO_APIC before enabling error vector
1907 */
1908 if (!skip_ioapic_setup && nr_ioapics)
1909 enable_IO_APIC();
1910#endif
1911
1912 bsp_end_local_APIC_setup();
1913
1914#ifdef CONFIG_X86_IO_APIC
1915 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1916 setup_IO_APIC();
1917 else {
1918 nr_ioapics = 0;
1919 }
1920#endif
1921
1922 x86_init.timers.setup_percpu_clockev();
1923 return 0;
1924}
1925
1926/* 1854/*
1927 * Local APIC interrupts 1855 * Local APIC interrupts
1928 */ 1856 */
@@ -2027,7 +1955,7 @@ __visible void smp_trace_error_interrupt(struct pt_regs *regs)
2027/** 1955/**
2028 * connect_bsp_APIC - attach the APIC to the interrupt system 1956 * connect_bsp_APIC - attach the APIC to the interrupt system
2029 */ 1957 */
2030void __init connect_bsp_APIC(void) 1958static void __init connect_bsp_APIC(void)
2031{ 1959{
2032#ifdef CONFIG_X86_32 1960#ifdef CONFIG_X86_32
2033 if (pic_mode) { 1961 if (pic_mode) {
@@ -2274,6 +2202,100 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
2274 } 2202 }
2275} 2203}
2276 2204
2205static void __init apic_bsp_up_setup(void)
2206{
2207#ifdef CONFIG_X86_64
2208 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
2209#else
2210 /*
2211 * Hack: In case of kdump, after a crash, kernel might be booting
2212 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
2213 * might be zero if read from MP tables. Get it from LAPIC.
2214 */
2215# ifdef CONFIG_CRASH_DUMP
2216 boot_cpu_physical_apicid = read_apic_id();
2217# endif
2218#endif
2219 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
2220}
2221
2222/**
2223 * apic_bsp_setup - Setup function for local apic and io-apic
2224 * @upmode: Force UP mode (for APIC_init_uniprocessor)
2225 *
2226 * Returns:
2227 * apic_id of BSP APIC
2228 */
2229int __init apic_bsp_setup(bool upmode)
2230{
2231 int id;
2232
2233 connect_bsp_APIC();
2234 if (upmode)
2235 apic_bsp_up_setup();
2236 setup_local_APIC();
2237
2238 if (x2apic_mode)
2239 id = apic_read(APIC_LDR);
2240 else
2241 id = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
2242
2243 enable_IO_APIC();
2244 end_local_APIC_setup();
2245 irq_remap_enable_fault_handling();
2246 setup_IO_APIC();
2247 /* Setup local timer */
2248 x86_init.timers.setup_percpu_clockev();
2249 return id;
2250}
2251
2252/*
2253 * This initializes the IO-APIC and APIC hardware if this is
2254 * a UP kernel.
2255 */
2256int __init APIC_init_uniprocessor(void)
2257{
2258 if (disable_apic) {
2259 pr_info("Apic disabled\n");
2260 return -1;
2261 }
2262#ifdef CONFIG_X86_64
2263 if (!cpu_has_apic) {
2264 disable_apic = 1;
2265 pr_info("Apic disabled by BIOS\n");
2266 return -1;
2267 }
2268#else
2269 if (!smp_found_config && !cpu_has_apic)
2270 return -1;
2271
2272 /*
2273 * Complain if the BIOS pretends there is one.
2274 */
2275 if (!cpu_has_apic &&
2276 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
2277 pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
2278 boot_cpu_physical_apicid);
2279 return -1;
2280 }
2281#endif
2282
2283 if (!smp_found_config)
2284 disable_ioapic_support();
2285
2286 default_setup_apic_routing();
2287 verify_local_APIC();
2288 apic_bsp_setup(true);
2289 return 0;
2290}
2291
2292#ifdef CONFIG_UP_LATE_INIT
2293void __init up_late_init(void)
2294{
2295 APIC_init_uniprocessor();
2296}
2297#endif
2298
2277/* 2299/*
2278 * Power management 2300 * Power management
2279 */ 2301 */
@@ -2359,9 +2381,9 @@ static void lapic_resume(void)
2359 mask_ioapic_entries(); 2381 mask_ioapic_entries();
2360 legacy_pic->mask_all(); 2382 legacy_pic->mask_all();
2361 2383
2362 if (x2apic_mode) 2384 if (x2apic_mode) {
2363 enable_x2apic(); 2385 __x2apic_enable();
2364 else { 2386 } else {
2365 /* 2387 /*
2366 * Make sure the APICBASE points to the right address 2388 * Make sure the APICBASE points to the right address
2367 * 2389 *
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3f5f60406ab1..f4dc2462a1ac 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1507,7 +1507,10 @@ void __init enable_IO_APIC(void)
1507 int i8259_apic, i8259_pin; 1507 int i8259_apic, i8259_pin;
1508 int apic, pin; 1508 int apic, pin;
1509 1509
1510 if (!nr_legacy_irqs()) 1510 if (skip_ioapic_setup)
1511 nr_ioapics = 0;
1512
1513 if (!nr_legacy_irqs() || !nr_ioapics)
1511 return; 1514 return;
1512 1515
1513 for_each_ioapic_pin(apic, pin) { 1516 for_each_ioapic_pin(apic, pin) {
@@ -2295,7 +2298,7 @@ static inline void __init check_timer(void)
2295 } 2298 }
2296 local_irq_disable(); 2299 local_irq_disable();
2297 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2300 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2298 if (x2apic_preenabled) 2301 if (apic_is_x2apic_enabled())
2299 apic_printk(APIC_QUIET, KERN_INFO 2302 apic_printk(APIC_QUIET, KERN_INFO
2300 "Perhaps problem with the pre-enabled x2apic mode\n" 2303 "Perhaps problem with the pre-enabled x2apic mode\n"
2301 "Try booting with x2apic and interrupt-remapping disabled in the bios.\n"); 2304 "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
@@ -2373,9 +2376,9 @@ void __init setup_IO_APIC(void)
2373{ 2376{
2374 int ioapic; 2377 int ioapic;
2375 2378
2376 /* 2379 if (skip_ioapic_setup || !nr_ioapics)
2377 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2380 return;
2378 */ 2381
2379 io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL; 2382 io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL;
2380 2383
2381 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2384 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 15c5df92f74e..a220239cea65 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -869,3 +869,22 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
869 869
870 return false; 870 return false;
871} 871}
872
873void set_dr_addr_mask(unsigned long mask, int dr)
874{
875 if (!cpu_has_bpext)
876 return;
877
878 switch (dr) {
879 case 0:
880 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
881 break;
882 case 1:
883 case 2:
884 case 3:
885 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
886 break;
887 default:
888 break;
889 }
890}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c6049650c093..b15bffcaba6d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -491,17 +491,18 @@ u16 __read_mostly tlb_lld_2m[NR_INFO];
491u16 __read_mostly tlb_lld_4m[NR_INFO]; 491u16 __read_mostly tlb_lld_4m[NR_INFO];
492u16 __read_mostly tlb_lld_1g[NR_INFO]; 492u16 __read_mostly tlb_lld_1g[NR_INFO];
493 493
494void cpu_detect_tlb(struct cpuinfo_x86 *c) 494static void cpu_detect_tlb(struct cpuinfo_x86 *c)
495{ 495{
496 if (this_cpu->c_detect_tlb) 496 if (this_cpu->c_detect_tlb)
497 this_cpu->c_detect_tlb(c); 497 this_cpu->c_detect_tlb(c);
498 498
499 printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" 499 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
500 "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
501 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 500 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
502 tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], 501 tlb_lli_4m[ENTRIES]);
503 tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], 502
504 tlb_lld_1g[ENTRIES]); 503 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
504 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
505 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
505} 506}
506 507
507void detect_ht(struct cpuinfo_x86 *c) 508void detect_ht(struct cpuinfo_x86 *c)
@@ -1332,7 +1333,7 @@ void cpu_init(void)
1332 barrier(); 1333 barrier();
1333 1334
1334 x86_configure_nx(); 1335 x86_configure_nx();
1335 enable_x2apic(); 1336 x2apic_setup();
1336 1337
1337 /* 1338 /*
1338 * set up and load the per-CPU TSS 1339 * set up and load the per-CPU TSS
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9cc6b6f25f42..94d7dcb12145 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -487,10 +487,8 @@ static void init_intel(struct cpuinfo_x86 *c)
487 487
488 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 488 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
489 if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) { 489 if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
490 printk_once(KERN_WARNING "ENERGY_PERF_BIAS:" 490 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
491 " Set to 'normal', was 'performance'\n" 491 pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
492 "ENERGY_PERF_BIAS: View and update with"
493 " x86_energy_perf_policy(8)\n");
494 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; 492 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
495 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 493 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
496 } 494 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d2c611699cd9..cdfed7953963 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -43,6 +43,7 @@
43#include <linux/export.h> 43#include <linux/export.h>
44 44
45#include <asm/processor.h> 45#include <asm/processor.h>
46#include <asm/traps.h>
46#include <asm/mce.h> 47#include <asm/mce.h>
47#include <asm/msr.h> 48#include <asm/msr.h>
48 49
@@ -115,7 +116,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
115 * CPU/chipset specific EDAC code can register a notifier call here to print 116 * CPU/chipset specific EDAC code can register a notifier call here to print
116 * MCE errors in a human-readable form. 117 * MCE errors in a human-readable form.
117 */ 118 */
118ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); 119static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
119 120
120/* Do initial initialization of a struct mce */ 121/* Do initial initialization of a struct mce */
121void mce_setup(struct mce *m) 122void mce_setup(struct mce *m)
@@ -311,7 +312,7 @@ static void wait_for_panic(void)
311 panic("Panicing machine check CPU died"); 312 panic("Panicing machine check CPU died");
312} 313}
313 314
314static void mce_panic(char *msg, struct mce *final, char *exp) 315static void mce_panic(const char *msg, struct mce *final, char *exp)
315{ 316{
316 int i, apei_err = 0; 317 int i, apei_err = 0;
317 318
@@ -529,7 +530,7 @@ static void mce_schedule_work(void)
529 schedule_work(this_cpu_ptr(&mce_work)); 530 schedule_work(this_cpu_ptr(&mce_work));
530} 531}
531 532
532DEFINE_PER_CPU(struct irq_work, mce_irq_work); 533static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
533 534
534static void mce_irq_work_cb(struct irq_work *entry) 535static void mce_irq_work_cb(struct irq_work *entry)
535{ 536{
@@ -735,7 +736,7 @@ static atomic_t mce_callin;
735/* 736/*
736 * Check if a timeout waiting for other CPUs happened. 737 * Check if a timeout waiting for other CPUs happened.
737 */ 738 */
738static int mce_timed_out(u64 *t) 739static int mce_timed_out(u64 *t, const char *msg)
739{ 740{
740 /* 741 /*
741 * The others already did panic for some reason. 742 * The others already did panic for some reason.
@@ -750,8 +751,7 @@ static int mce_timed_out(u64 *t)
750 goto out; 751 goto out;
751 if ((s64)*t < SPINUNIT) { 752 if ((s64)*t < SPINUNIT) {
752 if (mca_cfg.tolerant <= 1) 753 if (mca_cfg.tolerant <= 1)
753 mce_panic("Timeout synchronizing machine check over CPUs", 754 mce_panic(msg, NULL, NULL);
754 NULL, NULL);
755 cpu_missing = 1; 755 cpu_missing = 1;
756 return 1; 756 return 1;
757 } 757 }
@@ -867,7 +867,8 @@ static int mce_start(int *no_way_out)
867 * Wait for everyone. 867 * Wait for everyone.
868 */ 868 */
869 while (atomic_read(&mce_callin) != cpus) { 869 while (atomic_read(&mce_callin) != cpus) {
870 if (mce_timed_out(&timeout)) { 870 if (mce_timed_out(&timeout,
871 "Timeout: Not all CPUs entered broadcast exception handler")) {
871 atomic_set(&global_nwo, 0); 872 atomic_set(&global_nwo, 0);
872 return -1; 873 return -1;
873 } 874 }
@@ -892,7 +893,8 @@ static int mce_start(int *no_way_out)
892 * only seen by one CPU before cleared, avoiding duplicates. 893 * only seen by one CPU before cleared, avoiding duplicates.
893 */ 894 */
894 while (atomic_read(&mce_executing) < order) { 895 while (atomic_read(&mce_executing) < order) {
895 if (mce_timed_out(&timeout)) { 896 if (mce_timed_out(&timeout,
897 "Timeout: Subject CPUs unable to finish machine check processing")) {
896 atomic_set(&global_nwo, 0); 898 atomic_set(&global_nwo, 0);
897 return -1; 899 return -1;
898 } 900 }
@@ -936,7 +938,8 @@ static int mce_end(int order)
936 * loops. 938 * loops.
937 */ 939 */
938 while (atomic_read(&mce_executing) <= cpus) { 940 while (atomic_read(&mce_executing) <= cpus) {
939 if (mce_timed_out(&timeout)) 941 if (mce_timed_out(&timeout,
942 "Timeout: Monarch CPU unable to finish machine check processing"))
940 goto reset; 943 goto reset;
941 ndelay(SPINUNIT); 944 ndelay(SPINUNIT);
942 } 945 }
@@ -949,7 +952,8 @@ static int mce_end(int order)
949 * Subject: Wait for Monarch to finish. 952 * Subject: Wait for Monarch to finish.
950 */ 953 */
951 while (atomic_read(&mce_executing) != 0) { 954 while (atomic_read(&mce_executing) != 0) {
952 if (mce_timed_out(&timeout)) 955 if (mce_timed_out(&timeout,
956 "Timeout: Monarch CPU did not finish machine check processing"))
953 goto reset; 957 goto reset;
954 ndelay(SPINUNIT); 958 ndelay(SPINUNIT);
955 } 959 }
@@ -1003,51 +1007,6 @@ static void mce_clear_state(unsigned long *toclear)
1003} 1007}
1004 1008
1005/* 1009/*
1006 * Need to save faulting physical address associated with a process
1007 * in the machine check handler some place where we can grab it back
1008 * later in mce_notify_process()
1009 */
1010#define MCE_INFO_MAX 16
1011
1012struct mce_info {
1013 atomic_t inuse;
1014 struct task_struct *t;
1015 __u64 paddr;
1016 int restartable;
1017} mce_info[MCE_INFO_MAX];
1018
1019static void mce_save_info(__u64 addr, int c)
1020{
1021 struct mce_info *mi;
1022
1023 for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) {
1024 if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
1025 mi->t = current;
1026 mi->paddr = addr;
1027 mi->restartable = c;
1028 return;
1029 }
1030 }
1031
1032 mce_panic("Too many concurrent recoverable errors", NULL, NULL);
1033}
1034
1035static struct mce_info *mce_find_info(void)
1036{
1037 struct mce_info *mi;
1038
1039 for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++)
1040 if (atomic_read(&mi->inuse) && mi->t == current)
1041 return mi;
1042 return NULL;
1043}
1044
1045static void mce_clear_info(struct mce_info *mi)
1046{
1047 atomic_set(&mi->inuse, 0);
1048}
1049
1050/*
1051 * The actual machine check handler. This only handles real 1010 * The actual machine check handler. This only handles real
1052 * exceptions when something got corrupted coming in through int 18. 1011 * exceptions when something got corrupted coming in through int 18.
1053 * 1012 *
@@ -1063,6 +1022,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1063{ 1022{
1064 struct mca_config *cfg = &mca_cfg; 1023 struct mca_config *cfg = &mca_cfg;
1065 struct mce m, *final; 1024 struct mce m, *final;
1025 enum ctx_state prev_state;
1066 int i; 1026 int i;
1067 int worst = 0; 1027 int worst = 0;
1068 int severity; 1028 int severity;
@@ -1084,6 +1044,10 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1084 DECLARE_BITMAP(toclear, MAX_NR_BANKS); 1044 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1085 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); 1045 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1086 char *msg = "Unknown"; 1046 char *msg = "Unknown";
1047 u64 recover_paddr = ~0ull;
1048 int flags = MF_ACTION_REQUIRED;
1049
1050 prev_state = ist_enter(regs);
1087 1051
1088 this_cpu_inc(mce_exception_count); 1052 this_cpu_inc(mce_exception_count);
1089 1053
@@ -1203,9 +1167,9 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1203 if (no_way_out) 1167 if (no_way_out)
1204 mce_panic("Fatal machine check on current CPU", &m, msg); 1168 mce_panic("Fatal machine check on current CPU", &m, msg);
1205 if (worst == MCE_AR_SEVERITY) { 1169 if (worst == MCE_AR_SEVERITY) {
1206 /* schedule action before return to userland */ 1170 recover_paddr = m.addr;
1207 mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV); 1171 if (!(m.mcgstatus & MCG_STATUS_RIPV))
1208 set_thread_flag(TIF_MCE_NOTIFY); 1172 flags |= MF_MUST_KILL;
1209 } else if (kill_it) { 1173 } else if (kill_it) {
1210 force_sig(SIGBUS, current); 1174 force_sig(SIGBUS, current);
1211 } 1175 }
@@ -1216,6 +1180,27 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1216 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); 1180 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1217out: 1181out:
1218 sync_core(); 1182 sync_core();
1183
1184 if (recover_paddr == ~0ull)
1185 goto done;
1186
1187 pr_err("Uncorrected hardware memory error in user-access at %llx",
1188 recover_paddr);
1189 /*
1190 * We must call memory_failure() here even if the current process is
1191 * doomed. We still need to mark the page as poisoned and alert any
1192 * other users of the page.
1193 */
1194 ist_begin_non_atomic(regs);
1195 local_irq_enable();
1196 if (memory_failure(recover_paddr >> PAGE_SHIFT, MCE_VECTOR, flags) < 0) {
1197 pr_err("Memory error not recovered");
1198 force_sig(SIGBUS, current);
1199 }
1200 local_irq_disable();
1201 ist_end_non_atomic();
1202done:
1203 ist_exit(regs, prev_state);
1219} 1204}
1220EXPORT_SYMBOL_GPL(do_machine_check); 1205EXPORT_SYMBOL_GPL(do_machine_check);
1221 1206
@@ -1233,42 +1218,6 @@ int memory_failure(unsigned long pfn, int vector, int flags)
1233#endif 1218#endif
1234 1219
1235/* 1220/*
1236 * Called in process context that interrupted by MCE and marked with
1237 * TIF_MCE_NOTIFY, just before returning to erroneous userland.
1238 * This code is allowed to sleep.
1239 * Attempt possible recovery such as calling the high level VM handler to
1240 * process any corrupted pages, and kill/signal current process if required.
1241 * Action required errors are handled here.
1242 */
1243void mce_notify_process(void)
1244{
1245 unsigned long pfn;
1246 struct mce_info *mi = mce_find_info();
1247 int flags = MF_ACTION_REQUIRED;
1248
1249 if (!mi)
1250 mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
1251 pfn = mi->paddr >> PAGE_SHIFT;
1252
1253 clear_thread_flag(TIF_MCE_NOTIFY);
1254
1255 pr_err("Uncorrected hardware memory error in user-access at %llx",
1256 mi->paddr);
1257 /*
1258 * We must call memory_failure() here even if the current process is
1259 * doomed. We still need to mark the page as poisoned and alert any
1260 * other users of the page.
1261 */
1262 if (!mi->restartable)
1263 flags |= MF_MUST_KILL;
1264 if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
1265 pr_err("Memory error not recovered");
1266 force_sig(SIGBUS, current);
1267 }
1268 mce_clear_info(mi);
1269}
1270
1271/*
1272 * Action optional processing happens here (picking up 1221 * Action optional processing happens here (picking up
1273 * from the list of faulting pages that do_machine_check() 1222 * from the list of faulting pages that do_machine_check()
1274 * placed into the "ring"). 1223 * placed into the "ring").
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index a3042989398c..ec2663a708e4 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -8,6 +8,7 @@
8#include <linux/smp.h> 8#include <linux/smp.h>
9 9
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/traps.h>
11#include <asm/mce.h> 12#include <asm/mce.h>
12#include <asm/msr.h> 13#include <asm/msr.h>
13 14
@@ -17,8 +18,11 @@ int mce_p5_enabled __read_mostly;
17/* Machine check handler for Pentium class Intel CPUs: */ 18/* Machine check handler for Pentium class Intel CPUs: */
18static void pentium_machine_check(struct pt_regs *regs, long error_code) 19static void pentium_machine_check(struct pt_regs *regs, long error_code)
19{ 20{
21 enum ctx_state prev_state;
20 u32 loaddr, hi, lotype; 22 u32 loaddr, hi, lotype;
21 23
24 prev_state = ist_enter(regs);
25
22 rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); 26 rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
23 rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); 27 rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
24 28
@@ -33,6 +37,8 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
33 } 37 }
34 38
35 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 39 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
40
41 ist_exit(regs, prev_state);
36} 42}
37 43
38/* Set up machine check reporting for processors with Intel style MCE: */ 44/* Set up machine check reporting for processors with Intel style MCE: */
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 7dc5564d0cdf..bd5d46a32210 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -7,14 +7,19 @@
7#include <linux/types.h> 7#include <linux/types.h>
8 8
9#include <asm/processor.h> 9#include <asm/processor.h>
10#include <asm/traps.h>
10#include <asm/mce.h> 11#include <asm/mce.h>
11#include <asm/msr.h> 12#include <asm/msr.h>
12 13
13/* Machine check handler for WinChip C6: */ 14/* Machine check handler for WinChip C6: */
14static void winchip_machine_check(struct pt_regs *regs, long error_code) 15static void winchip_machine_check(struct pt_regs *regs, long error_code)
15{ 16{
17 enum ctx_state prev_state = ist_enter(regs);
18
16 printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); 19 printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
17 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 20 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
21
22 ist_exit(regs, prev_state);
18} 23}
19 24
20/* Set up machine check reporting on the Winchip C6 series */ 25/* Set up machine check reporting on the Winchip C6 series */
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index dd2f07ae9d0c..46201deee923 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -184,9 +184,9 @@ void __init e820_print_map(char *who)
184 * overwritten in the same location, starting at biosmap. 184 * overwritten in the same location, starting at biosmap.
185 * 185 *
186 * The integer pointed to by pnr_map must be valid on entry (the 186 * The integer pointed to by pnr_map must be valid on entry (the
187 * current number of valid entries located at biosmap) and will 187 * current number of valid entries located at biosmap). If the
188 * be updated on return, with the new number of valid entries 188 * sanitizing succeeds the *pnr_map will be updated with the new
189 * (something no more than max_nr_map.) 189 * number of valid entries (something no more than max_nr_map).
190 * 190 *
191 * The return value from sanitize_e820_map() is zero if it 191 * The return value from sanitize_e820_map() is zero if it
192 * successfully 'sanitized' the map entries passed in, and is -1 192 * successfully 'sanitized' the map entries passed in, and is -1
@@ -561,23 +561,15 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
561 561
562void __init update_e820(void) 562void __init update_e820(void)
563{ 563{
564 u32 nr_map; 564 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map))
565
566 nr_map = e820.nr_map;
567 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
568 return; 565 return;
569 e820.nr_map = nr_map;
570 printk(KERN_INFO "e820: modified physical RAM map:\n"); 566 printk(KERN_INFO "e820: modified physical RAM map:\n");
571 e820_print_map("modified"); 567 e820_print_map("modified");
572} 568}
573static void __init update_e820_saved(void) 569static void __init update_e820_saved(void)
574{ 570{
575 u32 nr_map; 571 sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map),
576 572 &e820_saved.nr_map);
577 nr_map = e820_saved.nr_map;
578 if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
579 return;
580 e820_saved.nr_map = nr_map;
581} 573}
582#define MAX_GAP_END 0x100000000ull 574#define MAX_GAP_END 0x100000000ull
583/* 575/*
@@ -898,11 +890,9 @@ early_param("memmap", parse_memmap_opt);
898void __init finish_e820_parsing(void) 890void __init finish_e820_parsing(void)
899{ 891{
900 if (userdef) { 892 if (userdef) {
901 u32 nr = e820.nr_map; 893 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map),
902 894 &e820.nr_map) < 0)
903 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
904 early_panic("Invalid user supplied memory map"); 895 early_panic("Invalid user supplied memory map");
905 e820.nr_map = nr;
906 896
907 printk(KERN_INFO "e820: user-defined physical RAM map:\n"); 897 printk(KERN_INFO "e820: user-defined physical RAM map:\n");
908 e820_print_map("user"); 898 e820_print_map("user");
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 9ebaf63ba182..db13655c3a2a 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -143,7 +143,8 @@ ENDPROC(native_usergs_sysret64)
143 movq \tmp,RSP+\offset(%rsp) 143 movq \tmp,RSP+\offset(%rsp)
144 movq $__USER_DS,SS+\offset(%rsp) 144 movq $__USER_DS,SS+\offset(%rsp)
145 movq $__USER_CS,CS+\offset(%rsp) 145 movq $__USER_CS,CS+\offset(%rsp)
146 movq $-1,RCX+\offset(%rsp) 146 movq RIP+\offset(%rsp),\tmp /* get rip */
147 movq \tmp,RCX+\offset(%rsp) /* copy it to rcx as sysret would do */
147 movq R11+\offset(%rsp),\tmp /* get eflags */ 148 movq R11+\offset(%rsp),\tmp /* get eflags */
148 movq \tmp,EFLAGS+\offset(%rsp) 149 movq \tmp,EFLAGS+\offset(%rsp)
149 .endm 150 .endm
@@ -155,27 +156,6 @@ ENDPROC(native_usergs_sysret64)
155 movq \tmp,R11+\offset(%rsp) 156 movq \tmp,R11+\offset(%rsp)
156 .endm 157 .endm
157 158
158 .macro FAKE_STACK_FRAME child_rip
159 /* push in order ss, rsp, eflags, cs, rip */
160 xorl %eax, %eax
161 pushq_cfi $__KERNEL_DS /* ss */
162 /*CFI_REL_OFFSET ss,0*/
163 pushq_cfi %rax /* rsp */
164 CFI_REL_OFFSET rsp,0
165 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
166 /*CFI_REL_OFFSET rflags,0*/
167 pushq_cfi $__KERNEL_CS /* cs */
168 /*CFI_REL_OFFSET cs,0*/
169 pushq_cfi \child_rip /* rip */
170 CFI_REL_OFFSET rip,0
171 pushq_cfi %rax /* orig rax */
172 .endm
173
174 .macro UNFAKE_STACK_FRAME
175 addq $8*6, %rsp
176 CFI_ADJUST_CFA_OFFSET -(6*8)
177 .endm
178
179/* 159/*
180 * initial frame state for interrupts (and exceptions without error code) 160 * initial frame state for interrupts (and exceptions without error code)
181 */ 161 */
@@ -238,51 +218,6 @@ ENDPROC(native_usergs_sysret64)
238 CFI_REL_OFFSET r15, R15+\offset 218 CFI_REL_OFFSET r15, R15+\offset
239 .endm 219 .endm
240 220
241/* save partial stack frame */
242 .macro SAVE_ARGS_IRQ
243 cld
244 /* start from rbp in pt_regs and jump over */
245 movq_cfi rdi, (RDI-RBP)
246 movq_cfi rsi, (RSI-RBP)
247 movq_cfi rdx, (RDX-RBP)
248 movq_cfi rcx, (RCX-RBP)
249 movq_cfi rax, (RAX-RBP)
250 movq_cfi r8, (R8-RBP)
251 movq_cfi r9, (R9-RBP)
252 movq_cfi r10, (R10-RBP)
253 movq_cfi r11, (R11-RBP)
254
255 /* Save rbp so that we can unwind from get_irq_regs() */
256 movq_cfi rbp, 0
257
258 /* Save previous stack value */
259 movq %rsp, %rsi
260
261 leaq -RBP(%rsp),%rdi /* arg1 for handler */
262 testl $3, CS-RBP(%rsi)
263 je 1f
264 SWAPGS
265 /*
266 * irq_count is used to check if a CPU is already on an interrupt stack
267 * or not. While this is essentially redundant with preempt_count it is
268 * a little cheaper to use a separate counter in the PDA (short of
269 * moving irq_enter into assembly, which would be too much work)
270 */
2711: incl PER_CPU_VAR(irq_count)
272 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
273 CFI_DEF_CFA_REGISTER rsi
274
275 /* Store previous stack value */
276 pushq %rsi
277 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
278 0x77 /* DW_OP_breg7 */, 0, \
279 0x06 /* DW_OP_deref */, \
280 0x08 /* DW_OP_const1u */, SS+8-RBP, \
281 0x22 /* DW_OP_plus */
282 /* We entered an interrupt context - irqs are off: */
283 TRACE_IRQS_OFF
284 .endm
285
286ENTRY(save_paranoid) 221ENTRY(save_paranoid)
287 XCPT_FRAME 1 RDI+8 222 XCPT_FRAME 1 RDI+8
288 cld 223 cld
@@ -426,15 +361,12 @@ system_call_fastpath:
426 * Has incomplete stack frame and undefined top of stack. 361 * Has incomplete stack frame and undefined top of stack.
427 */ 362 */
428ret_from_sys_call: 363ret_from_sys_call:
429 movl $_TIF_ALLWORK_MASK,%edi 364 testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
430 /* edi: flagmask */ 365 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
431sysret_check: 366
432 LOCKDEP_SYS_EXIT 367 LOCKDEP_SYS_EXIT
433 DISABLE_INTERRUPTS(CLBR_NONE) 368 DISABLE_INTERRUPTS(CLBR_NONE)
434 TRACE_IRQS_OFF 369 TRACE_IRQS_OFF
435 movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
436 andl %edi,%edx
437 jnz sysret_careful
438 CFI_REMEMBER_STATE 370 CFI_REMEMBER_STATE
439 /* 371 /*
440 * sysretq will re-enable interrupts: 372 * sysretq will re-enable interrupts:
@@ -448,49 +380,10 @@ sysret_check:
448 USERGS_SYSRET64 380 USERGS_SYSRET64
449 381
450 CFI_RESTORE_STATE 382 CFI_RESTORE_STATE
451 /* Handle reschedules */
452 /* edx: work, edi: workmask */
453sysret_careful:
454 bt $TIF_NEED_RESCHED,%edx
455 jnc sysret_signal
456 TRACE_IRQS_ON
457 ENABLE_INTERRUPTS(CLBR_NONE)
458 pushq_cfi %rdi
459 SCHEDULE_USER
460 popq_cfi %rdi
461 jmp sysret_check
462 383
463 /* Handle a signal */ 384int_ret_from_sys_call_fixup:
464sysret_signal:
465 TRACE_IRQS_ON
466 ENABLE_INTERRUPTS(CLBR_NONE)
467#ifdef CONFIG_AUDITSYSCALL
468 bt $TIF_SYSCALL_AUDIT,%edx
469 jc sysret_audit
470#endif
471 /*
472 * We have a signal, or exit tracing or single-step.
473 * These all wind up with the iret return path anyway,
474 * so just join that path right now.
475 */
476 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET 385 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
477 jmp int_check_syscall_exit_work 386 jmp int_ret_from_sys_call
478
479#ifdef CONFIG_AUDITSYSCALL
480 /*
481 * Return fast path for syscall audit. Call __audit_syscall_exit()
482 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
483 * masked off.
484 */
485sysret_audit:
486 movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */
487 cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */
488 setbe %al /* 1 if so, 0 if not */
489 movzbl %al,%edi /* zero-extend that into %edi */
490 call __audit_syscall_exit
491 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
492 jmp sysret_check
493#endif /* CONFIG_AUDITSYSCALL */
494 387
495 /* Do syscall tracing */ 388 /* Do syscall tracing */
496tracesys: 389tracesys:
@@ -626,19 +519,6 @@ END(\label)
626 FORK_LIKE vfork 519 FORK_LIKE vfork
627 FIXED_FRAME stub_iopl, sys_iopl 520 FIXED_FRAME stub_iopl, sys_iopl
628 521
629ENTRY(ptregscall_common)
630 DEFAULT_FRAME 1 8 /* offset 8: return address */
631 RESTORE_TOP_OF_STACK %r11, 8
632 movq_cfi_restore R15+8, r15
633 movq_cfi_restore R14+8, r14
634 movq_cfi_restore R13+8, r13
635 movq_cfi_restore R12+8, r12
636 movq_cfi_restore RBP+8, rbp
637 movq_cfi_restore RBX+8, rbx
638 ret $REST_SKIP /* pop extended registers */
639 CFI_ENDPROC
640END(ptregscall_common)
641
642ENTRY(stub_execve) 522ENTRY(stub_execve)
643 CFI_STARTPROC 523 CFI_STARTPROC
644 addq $8, %rsp 524 addq $8, %rsp
@@ -779,7 +659,48 @@ END(interrupt)
779 /* reserve pt_regs for scratch regs and rbp */ 659 /* reserve pt_regs for scratch regs and rbp */
780 subq $ORIG_RAX-RBP, %rsp 660 subq $ORIG_RAX-RBP, %rsp
781 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP 661 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
782 SAVE_ARGS_IRQ 662 cld
663 /* start from rbp in pt_regs and jump over */
664 movq_cfi rdi, (RDI-RBP)
665 movq_cfi rsi, (RSI-RBP)
666 movq_cfi rdx, (RDX-RBP)
667 movq_cfi rcx, (RCX-RBP)
668 movq_cfi rax, (RAX-RBP)
669 movq_cfi r8, (R8-RBP)
670 movq_cfi r9, (R9-RBP)
671 movq_cfi r10, (R10-RBP)
672 movq_cfi r11, (R11-RBP)
673
674 /* Save rbp so that we can unwind from get_irq_regs() */
675 movq_cfi rbp, 0
676
677 /* Save previous stack value */
678 movq %rsp, %rsi
679
680 leaq -RBP(%rsp),%rdi /* arg1 for handler */
681 testl $3, CS-RBP(%rsi)
682 je 1f
683 SWAPGS
684 /*
685 * irq_count is used to check if a CPU is already on an interrupt stack
686 * or not. While this is essentially redundant with preempt_count it is
687 * a little cheaper to use a separate counter in the PDA (short of
688 * moving irq_enter into assembly, which would be too much work)
689 */
6901: incl PER_CPU_VAR(irq_count)
691 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
692 CFI_DEF_CFA_REGISTER rsi
693
694 /* Store previous stack value */
695 pushq %rsi
696 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
697 0x77 /* DW_OP_breg7 */, 0, \
698 0x06 /* DW_OP_deref */, \
699 0x08 /* DW_OP_const1u */, SS+8-RBP, \
700 0x22 /* DW_OP_plus */
701 /* We entered an interrupt context - irqs are off: */
702 TRACE_IRQS_OFF
703
783 call \func 704 call \func
784 .endm 705 .endm
785 706
@@ -831,6 +752,60 @@ retint_swapgs: /* return to user-space */
831 */ 752 */
832 DISABLE_INTERRUPTS(CLBR_ANY) 753 DISABLE_INTERRUPTS(CLBR_ANY)
833 TRACE_IRQS_IRETQ 754 TRACE_IRQS_IRETQ
755
756 /*
757 * Try to use SYSRET instead of IRET if we're returning to
758 * a completely clean 64-bit userspace context.
759 */
760 movq (RCX-R11)(%rsp), %rcx
761 cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
762 jne opportunistic_sysret_failed
763
764 /*
765 * On Intel CPUs, sysret with non-canonical RCX/RIP will #GP
766 * in kernel space. This essentially lets the user take over
767 * the kernel, since userspace controls RSP. It's not worth
768 * testing for canonicalness exactly -- this check detects any
769 * of the 17 high bits set, which is true for non-canonical
770 * or kernel addresses. (This will pessimize vsyscall=native.
771 * Big deal.)
772 *
773 * If virtual addresses ever become wider, this will need
774 * to be updated to remain correct on both old and new CPUs.
775 */
776 .ifne __VIRTUAL_MASK_SHIFT - 47
777 .error "virtual address width changed -- sysret checks need update"
778 .endif
779 shr $__VIRTUAL_MASK_SHIFT, %rcx
780 jnz opportunistic_sysret_failed
781
782 cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
783 jne opportunistic_sysret_failed
784
785 movq (R11-ARGOFFSET)(%rsp), %r11
786 cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */
787 jne opportunistic_sysret_failed
788
789 testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */
790 jnz opportunistic_sysret_failed
791
792 /* nothing to check for RSP */
793
794 cmpq $__USER_DS,(SS-ARGOFFSET)(%rsp) /* SS must match SYSRET */
795 jne opportunistic_sysret_failed
796
797 /*
798 * We win! This label is here just for ease of understanding
799 * perf profiles. Nothing jumps here.
800 */
801irq_return_via_sysret:
802 CFI_REMEMBER_STATE
803 RESTORE_ARGS 1,8,1
804 movq (RSP-RIP)(%rsp),%rsp
805 USERGS_SYSRET64
806 CFI_RESTORE_STATE
807
808opportunistic_sysret_failed:
834 SWAPGS 809 SWAPGS
835 jmp restore_args 810 jmp restore_args
836 811
@@ -1048,6 +1023,11 @@ ENTRY(\sym)
1048 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 1023 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1049 1024
1050 .if \paranoid 1025 .if \paranoid
1026 .if \paranoid == 1
1027 CFI_REMEMBER_STATE
1028 testl $3, CS(%rsp) /* If coming from userspace, switch */
1029 jnz 1f /* stacks. */
1030 .endif
1051 call save_paranoid 1031 call save_paranoid
1052 .else 1032 .else
1053 call error_entry 1033 call error_entry
@@ -1088,6 +1068,36 @@ ENTRY(\sym)
1088 jmp error_exit /* %ebx: no swapgs flag */ 1068 jmp error_exit /* %ebx: no swapgs flag */
1089 .endif 1069 .endif
1090 1070
1071 .if \paranoid == 1
1072 CFI_RESTORE_STATE
1073 /*
1074 * Paranoid entry from userspace. Switch stacks and treat it
1075 * as a normal entry. This means that paranoid handlers
1076 * run in real process context if user_mode(regs).
1077 */
10781:
1079 call error_entry
1080
1081 DEFAULT_FRAME 0
1082
1083 movq %rsp,%rdi /* pt_regs pointer */
1084 call sync_regs
1085 movq %rax,%rsp /* switch stack */
1086
1087 movq %rsp,%rdi /* pt_regs pointer */
1088
1089 .if \has_error_code
1090 movq ORIG_RAX(%rsp),%rsi /* get error code */
1091 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1092 .else
1093 xorl %esi,%esi /* no error code */
1094 .endif
1095
1096 call \do_sym
1097
1098 jmp error_exit /* %ebx: no swapgs flag */
1099 .endif
1100
1091 CFI_ENDPROC 1101 CFI_ENDPROC
1092END(\sym) 1102END(\sym)
1093.endm 1103.endm
@@ -1108,7 +1118,7 @@ idtentry overflow do_overflow has_error_code=0
1108idtentry bounds do_bounds has_error_code=0 1118idtentry bounds do_bounds has_error_code=0
1109idtentry invalid_op do_invalid_op has_error_code=0 1119idtentry invalid_op do_invalid_op has_error_code=0
1110idtentry device_not_available do_device_not_available has_error_code=0 1120idtentry device_not_available do_device_not_available has_error_code=0
1111idtentry double_fault do_double_fault has_error_code=1 paranoid=1 1121idtentry double_fault do_double_fault has_error_code=1 paranoid=2
1112idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 1122idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
1113idtentry invalid_TSS do_invalid_TSS has_error_code=1 1123idtentry invalid_TSS do_invalid_TSS has_error_code=1
1114idtentry segment_not_present do_segment_not_present has_error_code=1 1124idtentry segment_not_present do_segment_not_present has_error_code=1
@@ -1289,16 +1299,14 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
1289#endif 1299#endif
1290 1300
1291 /* 1301 /*
1292 * "Paranoid" exit path from exception stack. 1302 * "Paranoid" exit path from exception stack. This is invoked
1293 * Paranoid because this is used by NMIs and cannot take 1303 * only on return from non-NMI IST interrupts that came
1294 * any kernel state for granted. 1304 * from kernel space.
1295 * We don't do kernel preemption checks here, because only
1296 * NMI should be common and it does not enable IRQs and
1297 * cannot get reschedule ticks.
1298 * 1305 *
1299 * "trace" is 0 for the NMI handler only, because irq-tracing 1306 * We may be returning to very strange contexts (e.g. very early
1300 * is fundamentally NMI-unsafe. (we cannot change the soft and 1307 * in syscall entry), so checking for preemption here would
1301 * hard flags at once, atomically) 1308 * be complicated. Fortunately, we there's no good reason
1309 * to try to handle preemption here.
1302 */ 1310 */
1303 1311
1304 /* ebx: no swapgs flag */ 1312 /* ebx: no swapgs flag */
@@ -1308,43 +1316,14 @@ ENTRY(paranoid_exit)
1308 TRACE_IRQS_OFF_DEBUG 1316 TRACE_IRQS_OFF_DEBUG
1309 testl %ebx,%ebx /* swapgs needed? */ 1317 testl %ebx,%ebx /* swapgs needed? */
1310 jnz paranoid_restore 1318 jnz paranoid_restore
1311 testl $3,CS(%rsp)
1312 jnz paranoid_userspace
1313paranoid_swapgs:
1314 TRACE_IRQS_IRETQ 0 1319 TRACE_IRQS_IRETQ 0
1315 SWAPGS_UNSAFE_STACK 1320 SWAPGS_UNSAFE_STACK
1316 RESTORE_ALL 8 1321 RESTORE_ALL 8
1317 jmp irq_return 1322 INTERRUPT_RETURN
1318paranoid_restore: 1323paranoid_restore:
1319 TRACE_IRQS_IRETQ_DEBUG 0 1324 TRACE_IRQS_IRETQ_DEBUG 0
1320 RESTORE_ALL 8 1325 RESTORE_ALL 8
1321 jmp irq_return 1326 INTERRUPT_RETURN
1322paranoid_userspace:
1323 GET_THREAD_INFO(%rcx)
1324 movl TI_flags(%rcx),%ebx
1325 andl $_TIF_WORK_MASK,%ebx
1326 jz paranoid_swapgs
1327 movq %rsp,%rdi /* &pt_regs */
1328 call sync_regs
1329 movq %rax,%rsp /* switch stack for scheduling */
1330 testl $_TIF_NEED_RESCHED,%ebx
1331 jnz paranoid_schedule
1332 movl %ebx,%edx /* arg3: thread flags */
1333 TRACE_IRQS_ON
1334 ENABLE_INTERRUPTS(CLBR_NONE)
1335 xorl %esi,%esi /* arg2: oldset */
1336 movq %rsp,%rdi /* arg1: &pt_regs */
1337 call do_notify_resume
1338 DISABLE_INTERRUPTS(CLBR_NONE)
1339 TRACE_IRQS_OFF
1340 jmp paranoid_userspace
1341paranoid_schedule:
1342 TRACE_IRQS_ON
1343 ENABLE_INTERRUPTS(CLBR_ANY)
1344 SCHEDULE_USER
1345 DISABLE_INTERRUPTS(CLBR_ANY)
1346 TRACE_IRQS_OFF
1347 jmp paranoid_userspace
1348 CFI_ENDPROC 1327 CFI_ENDPROC
1349END(paranoid_exit) 1328END(paranoid_exit)
1350 1329
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 3d5fb509bdeb..7114ba220fd4 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -126,6 +126,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
126 *dr7 |= encode_dr7(i, info->len, info->type); 126 *dr7 |= encode_dr7(i, info->len, info->type);
127 127
128 set_debugreg(*dr7, 7); 128 set_debugreg(*dr7, 7);
129 if (info->mask)
130 set_dr_addr_mask(info->mask, i);
129 131
130 return 0; 132 return 0;
131} 133}
@@ -161,29 +163,8 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
161 *dr7 &= ~__encode_dr7(i, info->len, info->type); 163 *dr7 &= ~__encode_dr7(i, info->len, info->type);
162 164
163 set_debugreg(*dr7, 7); 165 set_debugreg(*dr7, 7);
164} 166 if (info->mask)
165 167 set_dr_addr_mask(0, i);
166static int get_hbp_len(u8 hbp_len)
167{
168 unsigned int len_in_bytes = 0;
169
170 switch (hbp_len) {
171 case X86_BREAKPOINT_LEN_1:
172 len_in_bytes = 1;
173 break;
174 case X86_BREAKPOINT_LEN_2:
175 len_in_bytes = 2;
176 break;
177 case X86_BREAKPOINT_LEN_4:
178 len_in_bytes = 4;
179 break;
180#ifdef CONFIG_X86_64
181 case X86_BREAKPOINT_LEN_8:
182 len_in_bytes = 8;
183 break;
184#endif
185 }
186 return len_in_bytes;
187} 168}
188 169
189/* 170/*
@@ -196,7 +177,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
196 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 177 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
197 178
198 va = info->address; 179 va = info->address;
199 len = get_hbp_len(info->len); 180 len = bp->attr.bp_len;
200 181
201 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 182 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
202} 183}
@@ -277,6 +258,8 @@ static int arch_build_bp_info(struct perf_event *bp)
277 } 258 }
278 259
279 /* Len */ 260 /* Len */
261 info->mask = 0;
262
280 switch (bp->attr.bp_len) { 263 switch (bp->attr.bp_len) {
281 case HW_BREAKPOINT_LEN_1: 264 case HW_BREAKPOINT_LEN_1:
282 info->len = X86_BREAKPOINT_LEN_1; 265 info->len = X86_BREAKPOINT_LEN_1;
@@ -293,11 +276,17 @@ static int arch_build_bp_info(struct perf_event *bp)
293 break; 276 break;
294#endif 277#endif
295 default: 278 default:
296 return -EINVAL; 279 if (!is_power_of_2(bp->attr.bp_len))
280 return -EINVAL;
281 if (!cpu_has_bpext)
282 return -EOPNOTSUPP;
283 info->mask = bp->attr.bp_len - 1;
284 info->len = X86_BREAKPOINT_LEN_1;
297 } 285 }
298 286
299 return 0; 287 return 0;
300} 288}
289
301/* 290/*
302 * Validate the arch-specific HW Breakpoint register settings 291 * Validate the arch-specific HW Breakpoint register settings
303 */ 292 */
@@ -312,11 +301,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
312 if (ret) 301 if (ret)
313 return ret; 302 return ret;
314 303
315 ret = -EINVAL;
316
317 switch (info->len) { 304 switch (info->len) {
318 case X86_BREAKPOINT_LEN_1: 305 case X86_BREAKPOINT_LEN_1:
319 align = 0; 306 align = 0;
307 if (info->mask)
308 align = info->mask;
320 break; 309 break;
321 case X86_BREAKPOINT_LEN_2: 310 case X86_BREAKPOINT_LEN_2:
322 align = 1; 311 align = 1;
@@ -330,7 +319,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
330 break; 319 break;
331#endif 320#endif
332 default: 321 default:
333 return ret; 322 WARN_ON_ONCE(1);
334 } 323 }
335 324
336 /* 325 /*
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index a9a4229f6161..81049ffab2d6 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -19,6 +19,19 @@
19#include <asm/fpu-internal.h> 19#include <asm/fpu-internal.h>
20#include <asm/user.h> 20#include <asm/user.h>
21 21
22static DEFINE_PER_CPU(bool, in_kernel_fpu);
23
24void kernel_fpu_disable(void)
25{
26 WARN_ON(this_cpu_read(in_kernel_fpu));
27 this_cpu_write(in_kernel_fpu, true);
28}
29
30void kernel_fpu_enable(void)
31{
32 this_cpu_write(in_kernel_fpu, false);
33}
34
22/* 35/*
23 * Were we in an interrupt that interrupted kernel mode? 36 * Were we in an interrupt that interrupted kernel mode?
24 * 37 *
@@ -33,6 +46,9 @@
33 */ 46 */
34static inline bool interrupted_kernel_fpu_idle(void) 47static inline bool interrupted_kernel_fpu_idle(void)
35{ 48{
49 if (this_cpu_read(in_kernel_fpu))
50 return false;
51
36 if (use_eager_fpu()) 52 if (use_eager_fpu())
37 return __thread_has_fpu(current); 53 return __thread_has_fpu(current);
38 54
@@ -73,10 +89,10 @@ void __kernel_fpu_begin(void)
73{ 89{
74 struct task_struct *me = current; 90 struct task_struct *me = current;
75 91
92 this_cpu_write(in_kernel_fpu, true);
93
76 if (__thread_has_fpu(me)) { 94 if (__thread_has_fpu(me)) {
77 __thread_clear_has_fpu(me);
78 __save_init_fpu(me); 95 __save_init_fpu(me);
79 /* We do 'stts()' in __kernel_fpu_end() */
80 } else if (!use_eager_fpu()) { 96 } else if (!use_eager_fpu()) {
81 this_cpu_write(fpu_owner_task, NULL); 97 this_cpu_write(fpu_owner_task, NULL);
82 clts(); 98 clts();
@@ -86,19 +102,16 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
86 102
87void __kernel_fpu_end(void) 103void __kernel_fpu_end(void)
88{ 104{
89 if (use_eager_fpu()) { 105 struct task_struct *me = current;
90 /* 106
91 * For eager fpu, most the time, tsk_used_math() is true. 107 if (__thread_has_fpu(me)) {
92 * Restore the user math as we are done with the kernel usage. 108 if (WARN_ON(restore_fpu_checking(me)))
93 * At few instances during thread exit, signal handling etc, 109 drop_init_fpu(me);
94 * tsk_used_math() is false. Those few places will take proper 110 } else if (!use_eager_fpu()) {
95 * actions, so we don't need to restore the math here.
96 */
97 if (likely(tsk_used_math(current)))
98 math_state_restore();
99 } else {
100 stts(); 111 stts();
101 } 112 }
113
114 this_cpu_write(in_kernel_fpu, false);
102} 115}
103EXPORT_SYMBOL(__kernel_fpu_end); 116EXPORT_SYMBOL(__kernel_fpu_end);
104 117
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 63ce838e5a54..28d28f5eb8f4 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -69,16 +69,9 @@ static void call_on_stack(void *func, void *stack)
69 : "memory", "cc", "edx", "ecx", "eax"); 69 : "memory", "cc", "edx", "ecx", "eax");
70} 70}
71 71
72/* how to get the current stack pointer from C */
73#define current_stack_pointer ({ \
74 unsigned long sp; \
75 asm("mov %%esp,%0" : "=g" (sp)); \
76 sp; \
77})
78
79static inline void *current_stack(void) 72static inline void *current_stack(void)
80{ 73{
81 return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); 74 return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
82} 75}
83 76
84static inline int 77static inline int
@@ -103,7 +96,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
103 96
104 /* Save the next esp at the bottom of the stack */ 97 /* Save the next esp at the bottom of the stack */
105 prev_esp = (u32 *)irqstk; 98 prev_esp = (u32 *)irqstk;
106 *prev_esp = current_stack_pointer; 99 *prev_esp = current_stack_pointer();
107 100
108 if (unlikely(overflow)) 101 if (unlikely(overflow))
109 call_on_stack(print_stack_overflow, isp); 102 call_on_stack(print_stack_overflow, isp);
@@ -156,7 +149,7 @@ void do_softirq_own_stack(void)
156 149
157 /* Push the previous esp onto the stack */ 150 /* Push the previous esp onto the stack */
158 prev_esp = (u32 *)irqstk; 151 prev_esp = (u32 *)irqstk;
159 *prev_esp = current_stack_pointer; 152 *prev_esp = current_stack_pointer();
160 153
161 call_on_stack(__do_softirq, isp); 154 call_on_stack(__do_softirq, isp);
162} 155}
diff --git a/arch/x86/kernel/pmc_atom.c b/arch/x86/kernel/pmc_atom.c
index 0ee5025e0fa4..d66a4fe6caee 100644
--- a/arch/x86/kernel/pmc_atom.c
+++ b/arch/x86/kernel/pmc_atom.c
@@ -25,8 +25,6 @@
25 25
26#include <asm/pmc_atom.h> 26#include <asm/pmc_atom.h>
27 27
28#define DRIVER_NAME KBUILD_MODNAME
29
30struct pmc_dev { 28struct pmc_dev {
31 u32 base_addr; 29 u32 base_addr;
32 void __iomem *regmap; 30 void __iomem *regmap;
@@ -38,12 +36,12 @@ struct pmc_dev {
38static struct pmc_dev pmc_device; 36static struct pmc_dev pmc_device;
39static u32 acpi_base_addr; 37static u32 acpi_base_addr;
40 38
41struct pmc_dev_map { 39struct pmc_bit_map {
42 const char *name; 40 const char *name;
43 u32 bit_mask; 41 u32 bit_mask;
44}; 42};
45 43
46static const struct pmc_dev_map dev_map[] = { 44static const struct pmc_bit_map dev_map[] = {
47 {"0 - LPSS1_F0_DMA", BIT_LPSS1_F0_DMA}, 45 {"0 - LPSS1_F0_DMA", BIT_LPSS1_F0_DMA},
48 {"1 - LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1}, 46 {"1 - LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1},
49 {"2 - LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2}, 47 {"2 - LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2},
@@ -82,6 +80,27 @@ static const struct pmc_dev_map dev_map[] = {
82 {"35 - DFX", BIT_DFX}, 80 {"35 - DFX", BIT_DFX},
83}; 81};
84 82
83static const struct pmc_bit_map pss_map[] = {
84 {"0 - GBE", PMC_PSS_BIT_GBE},
85 {"1 - SATA", PMC_PSS_BIT_SATA},
86 {"2 - HDA", PMC_PSS_BIT_HDA},
87 {"3 - SEC", PMC_PSS_BIT_SEC},
88 {"4 - PCIE", PMC_PSS_BIT_PCIE},
89 {"5 - LPSS", PMC_PSS_BIT_LPSS},
90 {"6 - LPE", PMC_PSS_BIT_LPE},
91 {"7 - DFX", PMC_PSS_BIT_DFX},
92 {"8 - USH_CTRL", PMC_PSS_BIT_USH_CTRL},
93 {"9 - USH_SUS", PMC_PSS_BIT_USH_SUS},
94 {"10 - USH_VCCS", PMC_PSS_BIT_USH_VCCS},
95 {"11 - USH_VCCA", PMC_PSS_BIT_USH_VCCA},
96 {"12 - OTG_CTRL", PMC_PSS_BIT_OTG_CTRL},
97 {"13 - OTG_VCCS", PMC_PSS_BIT_OTG_VCCS},
98 {"14 - OTG_VCCA_CLK", PMC_PSS_BIT_OTG_VCCA_CLK},
99 {"15 - OTG_VCCA", PMC_PSS_BIT_OTG_VCCA},
100 {"16 - USB", PMC_PSS_BIT_USB},
101 {"17 - USB_SUS", PMC_PSS_BIT_USB_SUS},
102};
103
85static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset) 104static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset)
86{ 105{
87 return readl(pmc->regmap + reg_offset); 106 return readl(pmc->regmap + reg_offset);
@@ -169,6 +188,32 @@ static const struct file_operations pmc_dev_state_ops = {
169 .release = single_release, 188 .release = single_release,
170}; 189};
171 190
191static int pmc_pss_state_show(struct seq_file *s, void *unused)
192{
193 struct pmc_dev *pmc = s->private;
194 u32 pss = pmc_reg_read(pmc, PMC_PSS);
195 int pss_index;
196
197 for (pss_index = 0; pss_index < ARRAY_SIZE(pss_map); pss_index++) {
198 seq_printf(s, "Island: %-32s\tState: %s\n",
199 pss_map[pss_index].name,
200 pss_map[pss_index].bit_mask & pss ? "Off" : "On");
201 }
202 return 0;
203}
204
205static int pmc_pss_state_open(struct inode *inode, struct file *file)
206{
207 return single_open(file, pmc_pss_state_show, inode->i_private);
208}
209
210static const struct file_operations pmc_pss_state_ops = {
211 .open = pmc_pss_state_open,
212 .read = seq_read,
213 .llseek = seq_lseek,
214 .release = single_release,
215};
216
172static int pmc_sleep_tmr_show(struct seq_file *s, void *unused) 217static int pmc_sleep_tmr_show(struct seq_file *s, void *unused)
173{ 218{
174 struct pmc_dev *pmc = s->private; 219 struct pmc_dev *pmc = s->private;
@@ -202,11 +247,7 @@ static const struct file_operations pmc_sleep_tmr_ops = {
202 247
203static void pmc_dbgfs_unregister(struct pmc_dev *pmc) 248static void pmc_dbgfs_unregister(struct pmc_dev *pmc)
204{ 249{
205 if (!pmc->dbgfs_dir)
206 return;
207
208 debugfs_remove_recursive(pmc->dbgfs_dir); 250 debugfs_remove_recursive(pmc->dbgfs_dir);
209 pmc->dbgfs_dir = NULL;
210} 251}
211 252
212static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev) 253static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
@@ -217,19 +258,29 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
217 if (!dir) 258 if (!dir)
218 return -ENOMEM; 259 return -ENOMEM;
219 260
261 pmc->dbgfs_dir = dir;
262
220 f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO, 263 f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO,
221 dir, pmc, &pmc_dev_state_ops); 264 dir, pmc, &pmc_dev_state_ops);
222 if (!f) { 265 if (!f) {
223 dev_err(&pdev->dev, "dev_states register failed\n"); 266 dev_err(&pdev->dev, "dev_state register failed\n");
224 goto err; 267 goto err;
225 } 268 }
269
270 f = debugfs_create_file("pss_state", S_IFREG | S_IRUGO,
271 dir, pmc, &pmc_pss_state_ops);
272 if (!f) {
273 dev_err(&pdev->dev, "pss_state register failed\n");
274 goto err;
275 }
276
226 f = debugfs_create_file("sleep_state", S_IFREG | S_IRUGO, 277 f = debugfs_create_file("sleep_state", S_IFREG | S_IRUGO,
227 dir, pmc, &pmc_sleep_tmr_ops); 278 dir, pmc, &pmc_sleep_tmr_ops);
228 if (!f) { 279 if (!f) {
229 dev_err(&pdev->dev, "sleep_state register failed\n"); 280 dev_err(&pdev->dev, "sleep_state register failed\n");
230 goto err; 281 goto err;
231 } 282 }
232 pmc->dbgfs_dir = dir; 283
233 return 0; 284 return 0;
234err: 285err:
235 pmc_dbgfs_unregister(pmc); 286 pmc_dbgfs_unregister(pmc);
@@ -292,7 +343,6 @@ MODULE_DEVICE_TABLE(pci, pmc_pci_ids);
292 343
293static int __init pmc_atom_init(void) 344static int __init pmc_atom_init(void)
294{ 345{
295 int err = -ENODEV;
296 struct pci_dev *pdev = NULL; 346 struct pci_dev *pdev = NULL;
297 const struct pci_device_id *ent; 347 const struct pci_device_id *ent;
298 348
@@ -306,14 +356,11 @@ static int __init pmc_atom_init(void)
306 */ 356 */
307 for_each_pci_dev(pdev) { 357 for_each_pci_dev(pdev) {
308 ent = pci_match_id(pmc_pci_ids, pdev); 358 ent = pci_match_id(pmc_pci_ids, pdev);
309 if (ent) { 359 if (ent)
310 err = pmc_setup_dev(pdev); 360 return pmc_setup_dev(pdev);
311 goto out;
312 }
313 } 361 }
314 /* Device not found. */ 362 /* Device not found. */
315out: 363 return -ENODEV;
316 return err;
317} 364}
318 365
319module_init(pmc_atom_init); 366module_init(pmc_atom_init);
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index ca9622a25e95..fe3dbfe0c4a5 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -170,7 +170,7 @@ static struct platform_device rtc_device = {
170static __init int add_rtc_cmos(void) 170static __init int add_rtc_cmos(void)
171{ 171{
172#ifdef CONFIG_PNP 172#ifdef CONFIG_PNP
173 static const char * const const ids[] __initconst = 173 static const char * const ids[] __initconst =
174 { "PNP0b00", "PNP0b01", "PNP0b02", }; 174 { "PNP0b00", "PNP0b01", "PNP0b02", };
175 struct pnp_dev *dev; 175 struct pnp_dev *dev;
176 struct pnp_id *id; 176 struct pnp_id *id;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ab4734e5411d..c4648adadd7d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -431,15 +431,13 @@ static void __init parse_setup_data(void)
431 431
432 pa_data = boot_params.hdr.setup_data; 432 pa_data = boot_params.hdr.setup_data;
433 while (pa_data) { 433 while (pa_data) {
434 u32 data_len, map_len, data_type; 434 u32 data_len, data_type;
435 435
436 map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK), 436 data = early_memremap(pa_data, sizeof(*data));
437 (u64)sizeof(struct setup_data));
438 data = early_memremap(pa_data, map_len);
439 data_len = data->len + sizeof(struct setup_data); 437 data_len = data->len + sizeof(struct setup_data);
440 data_type = data->type; 438 data_type = data->type;
441 pa_next = data->next; 439 pa_next = data->next;
442 early_iounmap(data, map_len); 440 early_iounmap(data, sizeof(*data));
443 441
444 switch (data_type) { 442 switch (data_type) {
445 case SETUP_E820_EXT: 443 case SETUP_E820_EXT:
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index ed37a768d0fc..2a33c8f68319 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -740,12 +740,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
740{ 740{
741 user_exit(); 741 user_exit();
742 742
743#ifdef CONFIG_X86_MCE
744 /* notify userspace of pending MCEs */
745 if (thread_info_flags & _TIF_MCE_NOTIFY)
746 mce_notify_process();
747#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
748
749 if (thread_info_flags & _TIF_UPROBE) 743 if (thread_info_flags & _TIF_UPROBE)
750 uprobe_notify_resume(regs); 744 uprobe_notify_resume(regs);
751 745
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6d7022c683e3..febc6aabc72e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -73,7 +73,6 @@
73#include <asm/setup.h> 73#include <asm/setup.h>
74#include <asm/uv/uv.h> 74#include <asm/uv/uv.h>
75#include <linux/mc146818rtc.h> 75#include <linux/mc146818rtc.h>
76#include <asm/smpboot_hooks.h>
77#include <asm/i8259.h> 76#include <asm/i8259.h>
78#include <asm/realmode.h> 77#include <asm/realmode.h>
79#include <asm/misc.h> 78#include <asm/misc.h>
@@ -104,6 +103,43 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
104 103
105atomic_t init_deasserted; 104atomic_t init_deasserted;
106 105
106static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
107{
108 unsigned long flags;
109
110 spin_lock_irqsave(&rtc_lock, flags);
111 CMOS_WRITE(0xa, 0xf);
112 spin_unlock_irqrestore(&rtc_lock, flags);
113 local_flush_tlb();
114 pr_debug("1.\n");
115 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
116 start_eip >> 4;
117 pr_debug("2.\n");
118 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
119 start_eip & 0xf;
120 pr_debug("3.\n");
121}
122
123static inline void smpboot_restore_warm_reset_vector(void)
124{
125 unsigned long flags;
126
127 /*
128 * Install writable page 0 entry to set BIOS data area.
129 */
130 local_flush_tlb();
131
132 /*
133 * Paranoid: Set warm reset code and vector here back
134 * to default values.
135 */
136 spin_lock_irqsave(&rtc_lock, flags);
137 CMOS_WRITE(0, 0xf);
138 spin_unlock_irqrestore(&rtc_lock, flags);
139
140 *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
141}
142
107/* 143/*
108 * Report back to the Boot Processor during boot time or to the caller processor 144 * Report back to the Boot Processor during boot time or to the caller processor
109 * during CPU online. 145 * during CPU online.
@@ -136,8 +172,7 @@ static void smp_callin(void)
136 * CPU, first the APIC. (this is probably redundant on most 172 * CPU, first the APIC. (this is probably redundant on most
137 * boards) 173 * boards)
138 */ 174 */
139 setup_local_APIC(); 175 apic_ap_setup();
140 end_local_APIC_setup();
141 176
142 /* 177 /*
143 * Need to setup vector mappings before we enable interrupts. 178 * Need to setup vector mappings before we enable interrupts.
@@ -955,9 +990,12 @@ void arch_disable_smp_support(void)
955 */ 990 */
956static __init void disable_smp(void) 991static __init void disable_smp(void)
957{ 992{
993 pr_info("SMP disabled\n");
994
995 disable_ioapic_support();
996
958 init_cpu_present(cpumask_of(0)); 997 init_cpu_present(cpumask_of(0));
959 init_cpu_possible(cpumask_of(0)); 998 init_cpu_possible(cpumask_of(0));
960 smpboot_clear_io_apic_irqs();
961 999
962 if (smp_found_config) 1000 if (smp_found_config)
963 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); 1001 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
@@ -967,6 +1005,13 @@ static __init void disable_smp(void)
967 cpumask_set_cpu(0, cpu_core_mask(0)); 1005 cpumask_set_cpu(0, cpu_core_mask(0));
968} 1006}
969 1007
1008enum {
1009 SMP_OK,
1010 SMP_NO_CONFIG,
1011 SMP_NO_APIC,
1012 SMP_FORCE_UP,
1013};
1014
970/* 1015/*
971 * Various sanity checks. 1016 * Various sanity checks.
972 */ 1017 */
@@ -1014,10 +1059,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
1014 if (!smp_found_config && !acpi_lapic) { 1059 if (!smp_found_config && !acpi_lapic) {
1015 preempt_enable(); 1060 preempt_enable();
1016 pr_notice("SMP motherboard not detected\n"); 1061 pr_notice("SMP motherboard not detected\n");
1017 disable_smp(); 1062 return SMP_NO_CONFIG;
1018 if (APIC_init_uniprocessor())
1019 pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
1020 return -1;
1021 } 1063 }
1022 1064
1023 /* 1065 /*
@@ -1041,9 +1083,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
1041 boot_cpu_physical_apicid); 1083 boot_cpu_physical_apicid);
1042 pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n"); 1084 pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
1043 } 1085 }
1044 smpboot_clear_io_apic(); 1086 return SMP_NO_APIC;
1045 disable_ioapic_support();
1046 return -1;
1047 } 1087 }
1048 1088
1049 verify_local_APIC(); 1089 verify_local_APIC();
@@ -1053,15 +1093,10 @@ static int __init smp_sanity_check(unsigned max_cpus)
1053 */ 1093 */
1054 if (!max_cpus) { 1094 if (!max_cpus) {
1055 pr_info("SMP mode deactivated\n"); 1095 pr_info("SMP mode deactivated\n");
1056 smpboot_clear_io_apic(); 1096 return SMP_FORCE_UP;
1057
1058 connect_bsp_APIC();
1059 setup_local_APIC();
1060 bsp_end_local_APIC_setup();
1061 return -1;
1062 } 1097 }
1063 1098
1064 return 0; 1099 return SMP_OK;
1065} 1100}
1066 1101
1067static void __init smp_cpu_index_default(void) 1102static void __init smp_cpu_index_default(void)
@@ -1101,10 +1136,21 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1101 } 1136 }
1102 set_cpu_sibling_map(0); 1137 set_cpu_sibling_map(0);
1103 1138
1104 if (smp_sanity_check(max_cpus) < 0) { 1139 switch (smp_sanity_check(max_cpus)) {
1105 pr_info("SMP disabled\n"); 1140 case SMP_NO_CONFIG:
1106 disable_smp(); 1141 disable_smp();
1142 if (APIC_init_uniprocessor())
1143 pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
1107 return; 1144 return;
1145 case SMP_NO_APIC:
1146 disable_smp();
1147 return;
1148 case SMP_FORCE_UP:
1149 disable_smp();
1150 apic_bsp_setup(false);
1151 return;
1152 case SMP_OK:
1153 break;
1108 } 1154 }
1109 1155
1110 default_setup_apic_routing(); 1156 default_setup_apic_routing();
@@ -1115,33 +1161,10 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1115 /* Or can we switch back to PIC here? */ 1161 /* Or can we switch back to PIC here? */
1116 } 1162 }
1117 1163
1118 connect_bsp_APIC(); 1164 cpu0_logical_apicid = apic_bsp_setup(false);
1119
1120 /*
1121 * Switch from PIC to APIC mode.
1122 */
1123 setup_local_APIC();
1124
1125 if (x2apic_mode)
1126 cpu0_logical_apicid = apic_read(APIC_LDR);
1127 else
1128 cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1129
1130 /*
1131 * Enable IO APIC before setting up error vector
1132 */
1133 if (!skip_ioapic_setup && nr_ioapics)
1134 enable_IO_APIC();
1135
1136 bsp_end_local_APIC_setup();
1137 smpboot_setup_io_apic();
1138 /*
1139 * Set up local APIC timer on boot CPU.
1140 */
1141 1165
1142 pr_info("CPU%d: ", 0); 1166 pr_info("CPU%d: ", 0);
1143 print_cpu_info(&cpu_data(0)); 1167 print_cpu_info(&cpu_data(0));
1144 x86_init.timers.setup_percpu_clockev();
1145 1168
1146 if (is_uv_system()) 1169 if (is_uv_system())
1147 uv_system_init(); 1170 uv_system_init();
@@ -1177,9 +1200,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1177 1200
1178 nmi_selftest(); 1201 nmi_selftest();
1179 impress_friends(); 1202 impress_friends();
1180#ifdef CONFIG_X86_IO_APIC
1181 setup_ioapic_dest(); 1203 setup_ioapic_dest();
1182#endif
1183 mtrr_aps_init(); 1204 mtrr_aps_init();
1184} 1205}
1185 1206
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 88900e288021..9d2073e2ecc9 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -108,6 +108,88 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
108 preempt_count_dec(); 108 preempt_count_dec();
109} 109}
110 110
111enum ctx_state ist_enter(struct pt_regs *regs)
112{
113 enum ctx_state prev_state;
114
115 if (user_mode_vm(regs)) {
116 /* Other than that, we're just an exception. */
117 prev_state = exception_enter();
118 } else {
119 /*
120 * We might have interrupted pretty much anything. In
121 * fact, if we're a machine check, we can even interrupt
122 * NMI processing. We don't want in_nmi() to return true,
123 * but we need to notify RCU.
124 */
125 rcu_nmi_enter();
126 prev_state = IN_KERNEL; /* the value is irrelevant. */
127 }
128
129 /*
130 * We are atomic because we're on the IST stack (or we're on x86_32,
131 * in which case we still shouldn't schedule).
132 *
133 * This must be after exception_enter(), because exception_enter()
134 * won't do anything if in_interrupt() returns true.
135 */
136 preempt_count_add(HARDIRQ_OFFSET);
137
138 /* This code is a bit fragile. Test it. */
139 rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
140
141 return prev_state;
142}
143
144void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
145{
146 /* Must be before exception_exit. */
147 preempt_count_sub(HARDIRQ_OFFSET);
148
149 if (user_mode_vm(regs))
150 return exception_exit(prev_state);
151 else
152 rcu_nmi_exit();
153}
154
155/**
156 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
157 * @regs: regs passed to the IST exception handler
158 *
159 * IST exception handlers normally cannot schedule. As a special
160 * exception, if the exception interrupted userspace code (i.e.
161 * user_mode_vm(regs) would return true) and the exception was not
162 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
163 * begins a non-atomic section within an ist_enter()/ist_exit() region.
164 * Callers are responsible for enabling interrupts themselves inside
165 * the non-atomic section, and callers must call is_end_non_atomic()
166 * before ist_exit().
167 */
168void ist_begin_non_atomic(struct pt_regs *regs)
169{
170 BUG_ON(!user_mode_vm(regs));
171
172 /*
173 * Sanity check: we need to be on the normal thread stack. This
174 * will catch asm bugs and any attempt to use ist_preempt_enable
175 * from double_fault.
176 */
177 BUG_ON(((current_stack_pointer() ^ this_cpu_read_stable(kernel_stack))
178 & ~(THREAD_SIZE - 1)) != 0);
179
180 preempt_count_sub(HARDIRQ_OFFSET);
181}
182
183/**
184 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
185 *
186 * Ends a non-atomic section started with ist_begin_non_atomic().
187 */
188void ist_end_non_atomic(void)
189{
190 preempt_count_add(HARDIRQ_OFFSET);
191}
192
111static nokprobe_inline int 193static nokprobe_inline int
112do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, 194do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
113 struct pt_regs *regs, long error_code) 195 struct pt_regs *regs, long error_code)
@@ -251,6 +333,8 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
251 * end up promoting it to a doublefault. In that case, modify 333 * end up promoting it to a doublefault. In that case, modify
252 * the stack to make it look like we just entered the #GP 334 * the stack to make it look like we just entered the #GP
253 * handler from user space, similar to bad_iret. 335 * handler from user space, similar to bad_iret.
336 *
337 * No need for ist_enter here because we don't use RCU.
254 */ 338 */
255 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && 339 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
256 regs->cs == __KERNEL_CS && 340 regs->cs == __KERNEL_CS &&
@@ -263,12 +347,12 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
263 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ 347 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
264 regs->ip = (unsigned long)general_protection; 348 regs->ip = (unsigned long)general_protection;
265 regs->sp = (unsigned long)&normal_regs->orig_ax; 349 regs->sp = (unsigned long)&normal_regs->orig_ax;
350
266 return; 351 return;
267 } 352 }
268#endif 353#endif
269 354
270 exception_enter(); 355 ist_enter(regs); /* Discard prev_state because we won't return. */
271 /* Return not checked because double check cannot be ignored */
272 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 356 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
273 357
274 tsk->thread.error_code = error_code; 358 tsk->thread.error_code = error_code;
@@ -434,7 +518,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
434 if (poke_int3_handler(regs)) 518 if (poke_int3_handler(regs))
435 return; 519 return;
436 520
437 prev_state = exception_enter(); 521 prev_state = ist_enter(regs);
438#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 522#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
439 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 523 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
440 SIGTRAP) == NOTIFY_STOP) 524 SIGTRAP) == NOTIFY_STOP)
@@ -460,33 +544,20 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
460 preempt_conditional_cli(regs); 544 preempt_conditional_cli(regs);
461 debug_stack_usage_dec(); 545 debug_stack_usage_dec();
462exit: 546exit:
463 exception_exit(prev_state); 547 ist_exit(regs, prev_state);
464} 548}
465NOKPROBE_SYMBOL(do_int3); 549NOKPROBE_SYMBOL(do_int3);
466 550
467#ifdef CONFIG_X86_64 551#ifdef CONFIG_X86_64
468/* 552/*
469 * Help handler running on IST stack to switch back to user stack 553 * Help handler running on IST stack to switch off the IST stack if the
470 * for scheduling or signal handling. The actual stack switch is done in 554 * interrupted code was in user mode. The actual stack switch is done in
471 * entry.S 555 * entry_64.S
472 */ 556 */
473asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) 557asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
474{ 558{
475 struct pt_regs *regs = eregs; 559 struct pt_regs *regs = task_pt_regs(current);
476 /* Did already sync */ 560 *regs = *eregs;
477 if (eregs == (struct pt_regs *)eregs->sp)
478 ;
479 /* Exception from user space */
480 else if (user_mode(eregs))
481 regs = task_pt_regs(current);
482 /*
483 * Exception from kernel and interrupts are enabled. Move to
484 * kernel process stack.
485 */
486 else if (eregs->flags & X86_EFLAGS_IF)
487 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
488 if (eregs != regs)
489 *regs = *eregs;
490 return regs; 561 return regs;
491} 562}
492NOKPROBE_SYMBOL(sync_regs); 563NOKPROBE_SYMBOL(sync_regs);
@@ -554,7 +625,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
554 unsigned long dr6; 625 unsigned long dr6;
555 int si_code; 626 int si_code;
556 627
557 prev_state = exception_enter(); 628 prev_state = ist_enter(regs);
558 629
559 get_debugreg(dr6, 6); 630 get_debugreg(dr6, 6);
560 631
@@ -629,7 +700,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
629 debug_stack_usage_dec(); 700 debug_stack_usage_dec();
630 701
631exit: 702exit:
632 exception_exit(prev_state); 703 ist_exit(regs, prev_state);
633} 704}
634NOKPROBE_SYMBOL(do_debug); 705NOKPROBE_SYMBOL(do_debug);
635 706
@@ -788,18 +859,16 @@ void math_state_restore(void)
788 local_irq_disable(); 859 local_irq_disable();
789 } 860 }
790 861
862 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
863 kernel_fpu_disable();
791 __thread_fpu_begin(tsk); 864 __thread_fpu_begin(tsk);
792
793 /*
794 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
795 */
796 if (unlikely(restore_fpu_checking(tsk))) { 865 if (unlikely(restore_fpu_checking(tsk))) {
797 drop_init_fpu(tsk); 866 drop_init_fpu(tsk);
798 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); 867 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
799 return; 868 } else {
869 tsk->thread.fpu_counter++;
800 } 870 }
801 871 kernel_fpu_enable();
802 tsk->thread.fpu_counter++;
803} 872}
804EXPORT_SYMBOL_GPL(math_state_restore); 873EXPORT_SYMBOL_GPL(math_state_restore);
805 874
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index f9d16ff56c6b..7dc7ba577ecd 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -40,6 +40,7 @@ config KVM
40 select HAVE_KVM_MSI 40 select HAVE_KVM_MSI
41 select HAVE_KVM_CPU_RELAX_INTERCEPT 41 select HAVE_KVM_CPU_RELAX_INTERCEPT
42 select KVM_VFIO 42 select KVM_VFIO
43 select SRCU
43 ---help--- 44 ---help---
44 Support hosting fully virtualized guest machines using hardware 45 Support hosting fully virtualized guest machines using hardware
45 virtualization extensions. You will need a fairly recent 46 virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index cfd1b132b8e3..6ac273832f28 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -10,9 +10,6 @@
10struct pci_root_info { 10struct pci_root_info {
11 struct acpi_device *bridge; 11 struct acpi_device *bridge;
12 char name[16]; 12 char name[16];
13 unsigned int res_num;
14 struct resource *res;
15 resource_size_t *res_offset;
16 struct pci_sysdata sd; 13 struct pci_sysdata sd;
17#ifdef CONFIG_PCI_MMCONFIG 14#ifdef CONFIG_PCI_MMCONFIG
18 bool mcfg_added; 15 bool mcfg_added;
@@ -218,130 +215,41 @@ static void teardown_mcfg_map(struct pci_root_info *info)
218} 215}
219#endif 216#endif
220 217
221static acpi_status resource_to_addr(struct acpi_resource *resource, 218static void validate_resources(struct device *dev, struct list_head *crs_res,
222 struct acpi_resource_address64 *addr) 219 unsigned long type)
223{
224 acpi_status status;
225 struct acpi_resource_memory24 *memory24;
226 struct acpi_resource_memory32 *memory32;
227 struct acpi_resource_fixed_memory32 *fixed_memory32;
228
229 memset(addr, 0, sizeof(*addr));
230 switch (resource->type) {
231 case ACPI_RESOURCE_TYPE_MEMORY24:
232 memory24 = &resource->data.memory24;
233 addr->resource_type = ACPI_MEMORY_RANGE;
234 addr->minimum = memory24->minimum;
235 addr->address_length = memory24->address_length;
236 addr->maximum = addr->minimum + addr->address_length - 1;
237 return AE_OK;
238 case ACPI_RESOURCE_TYPE_MEMORY32:
239 memory32 = &resource->data.memory32;
240 addr->resource_type = ACPI_MEMORY_RANGE;
241 addr->minimum = memory32->minimum;
242 addr->address_length = memory32->address_length;
243 addr->maximum = addr->minimum + addr->address_length - 1;
244 return AE_OK;
245 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
246 fixed_memory32 = &resource->data.fixed_memory32;
247 addr->resource_type = ACPI_MEMORY_RANGE;
248 addr->minimum = fixed_memory32->address;
249 addr->address_length = fixed_memory32->address_length;
250 addr->maximum = addr->minimum + addr->address_length - 1;
251 return AE_OK;
252 case ACPI_RESOURCE_TYPE_ADDRESS16:
253 case ACPI_RESOURCE_TYPE_ADDRESS32:
254 case ACPI_RESOURCE_TYPE_ADDRESS64:
255 status = acpi_resource_to_address64(resource, addr);
256 if (ACPI_SUCCESS(status) &&
257 (addr->resource_type == ACPI_MEMORY_RANGE ||
258 addr->resource_type == ACPI_IO_RANGE) &&
259 addr->address_length > 0) {
260 return AE_OK;
261 }
262 break;
263 }
264 return AE_ERROR;
265}
266
267static acpi_status count_resource(struct acpi_resource *acpi_res, void *data)
268{ 220{
269 struct pci_root_info *info = data; 221 LIST_HEAD(list);
270 struct acpi_resource_address64 addr; 222 struct resource *res1, *res2, *root = NULL;
271 acpi_status status; 223 struct resource_entry *tmp, *entry, *entry2;
272
273 status = resource_to_addr(acpi_res, &addr);
274 if (ACPI_SUCCESS(status))
275 info->res_num++;
276 return AE_OK;
277}
278
279static acpi_status setup_resource(struct acpi_resource *acpi_res, void *data)
280{
281 struct pci_root_info *info = data;
282 struct resource *res;
283 struct acpi_resource_address64 addr;
284 acpi_status status;
285 unsigned long flags;
286 u64 start, orig_end, end;
287
288 status = resource_to_addr(acpi_res, &addr);
289 if (!ACPI_SUCCESS(status))
290 return AE_OK;
291
292 if (addr.resource_type == ACPI_MEMORY_RANGE) {
293 flags = IORESOURCE_MEM;
294 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
295 flags |= IORESOURCE_PREFETCH;
296 } else if (addr.resource_type == ACPI_IO_RANGE) {
297 flags = IORESOURCE_IO;
298 } else
299 return AE_OK;
300
301 start = addr.minimum + addr.translation_offset;
302 orig_end = end = addr.maximum + addr.translation_offset;
303
304 /* Exclude non-addressable range or non-addressable portion of range */
305 end = min(end, (u64)iomem_resource.end);
306 if (end <= start) {
307 dev_info(&info->bridge->dev,
308 "host bridge window [%#llx-%#llx] "
309 "(ignored, not CPU addressable)\n", start, orig_end);
310 return AE_OK;
311 } else if (orig_end != end) {
312 dev_info(&info->bridge->dev,
313 "host bridge window [%#llx-%#llx] "
314 "([%#llx-%#llx] ignored, not CPU addressable)\n",
315 start, orig_end, end + 1, orig_end);
316 }
317 224
318 res = &info->res[info->res_num]; 225 BUG_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0);
319 res->name = info->name; 226 root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource;
320 res->flags = flags;
321 res->start = start;
322 res->end = end;
323 info->res_offset[info->res_num] = addr.translation_offset;
324 info->res_num++;
325 227
326 if (!pci_use_crs) 228 list_splice_init(crs_res, &list);
327 dev_printk(KERN_DEBUG, &info->bridge->dev, 229 resource_list_for_each_entry_safe(entry, tmp, &list) {
328 "host bridge window %pR (ignored)\n", res); 230 bool free = false;
231 resource_size_t end;
329 232
330 return AE_OK; 233 res1 = entry->res;
331}
332
333static void coalesce_windows(struct pci_root_info *info, unsigned long type)
334{
335 int i, j;
336 struct resource *res1, *res2;
337
338 for (i = 0; i < info->res_num; i++) {
339 res1 = &info->res[i];
340 if (!(res1->flags & type)) 234 if (!(res1->flags & type))
341 continue; 235 goto next;
236
237 /* Exclude non-addressable range or non-addressable portion */
238 end = min(res1->end, root->end);
239 if (end <= res1->start) {
240 dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n",
241 res1);
242 free = true;
243 goto next;
244 } else if (res1->end != end) {
245 dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n",
246 res1, (unsigned long long)end + 1,
247 (unsigned long long)res1->end);
248 res1->end = end;
249 }
342 250
343 for (j = i + 1; j < info->res_num; j++) { 251 resource_list_for_each_entry(entry2, crs_res) {
344 res2 = &info->res[j]; 252 res2 = entry2->res;
345 if (!(res2->flags & type)) 253 if (!(res2->flags & type))
346 continue; 254 continue;
347 255
@@ -353,118 +261,92 @@ static void coalesce_windows(struct pci_root_info *info, unsigned long type)
353 if (resource_overlaps(res1, res2)) { 261 if (resource_overlaps(res1, res2)) {
354 res2->start = min(res1->start, res2->start); 262 res2->start = min(res1->start, res2->start);
355 res2->end = max(res1->end, res2->end); 263 res2->end = max(res1->end, res2->end);
356 dev_info(&info->bridge->dev, 264 dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n",
357 "host bridge window expanded to %pR; %pR ignored\n",
358 res2, res1); 265 res2, res1);
359 res1->flags = 0; 266 free = true;
267 goto next;
360 } 268 }
361 } 269 }
270
271next:
272 resource_list_del(entry);
273 if (free)
274 resource_list_free_entry(entry);
275 else
276 resource_list_add_tail(entry, crs_res);
362 } 277 }
363} 278}
364 279
365static void add_resources(struct pci_root_info *info, 280static void add_resources(struct pci_root_info *info,
366 struct list_head *resources) 281 struct list_head *resources,
282 struct list_head *crs_res)
367{ 283{
368 int i; 284 struct resource_entry *entry, *tmp;
369 struct resource *res, *root, *conflict; 285 struct resource *res, *conflict, *root = NULL;
370
371 coalesce_windows(info, IORESOURCE_MEM);
372 coalesce_windows(info, IORESOURCE_IO);
373 286
374 for (i = 0; i < info->res_num; i++) { 287 validate_resources(&info->bridge->dev, crs_res, IORESOURCE_MEM);
375 res = &info->res[i]; 288 validate_resources(&info->bridge->dev, crs_res, IORESOURCE_IO);
376 289
290 resource_list_for_each_entry_safe(entry, tmp, crs_res) {
291 res = entry->res;
377 if (res->flags & IORESOURCE_MEM) 292 if (res->flags & IORESOURCE_MEM)
378 root = &iomem_resource; 293 root = &iomem_resource;
379 else if (res->flags & IORESOURCE_IO) 294 else if (res->flags & IORESOURCE_IO)
380 root = &ioport_resource; 295 root = &ioport_resource;
381 else 296 else
382 continue; 297 BUG_ON(res);
383 298
384 conflict = insert_resource_conflict(root, res); 299 conflict = insert_resource_conflict(root, res);
385 if (conflict) 300 if (conflict) {
386 dev_info(&info->bridge->dev, 301 dev_info(&info->bridge->dev,
387 "ignoring host bridge window %pR (conflicts with %s %pR)\n", 302 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
388 res, conflict->name, conflict); 303 res, conflict->name, conflict);
389 else 304 resource_list_destroy_entry(entry);
390 pci_add_resource_offset(resources, res, 305 }
391 info->res_offset[i]);
392 } 306 }
393}
394 307
395static void free_pci_root_info_res(struct pci_root_info *info) 308 list_splice_tail(crs_res, resources);
396{
397 kfree(info->res);
398 info->res = NULL;
399 kfree(info->res_offset);
400 info->res_offset = NULL;
401 info->res_num = 0;
402} 309}
403 310
404static void __release_pci_root_info(struct pci_root_info *info) 311static void release_pci_root_info(struct pci_host_bridge *bridge)
405{ 312{
406 int i;
407 struct resource *res; 313 struct resource *res;
314 struct resource_entry *entry;
315 struct pci_root_info *info = bridge->release_data;
408 316
409 for (i = 0; i < info->res_num; i++) { 317 resource_list_for_each_entry(entry, &bridge->windows) {
410 res = &info->res[i]; 318 res = entry->res;
411 319 if (res->parent &&
412 if (!res->parent) 320 (res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
413 continue; 321 release_resource(res);
414
415 if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
416 continue;
417
418 release_resource(res);
419 } 322 }
420 323
421 free_pci_root_info_res(info);
422
423 teardown_mcfg_map(info); 324 teardown_mcfg_map(info);
424
425 kfree(info); 325 kfree(info);
426} 326}
427 327
428static void release_pci_root_info(struct pci_host_bridge *bridge)
429{
430 struct pci_root_info *info = bridge->release_data;
431
432 __release_pci_root_info(info);
433}
434
435static void probe_pci_root_info(struct pci_root_info *info, 328static void probe_pci_root_info(struct pci_root_info *info,
436 struct acpi_device *device, 329 struct acpi_device *device,
437 int busnum, int domain) 330 int busnum, int domain,
331 struct list_head *list)
438{ 332{
439 size_t size; 333 int ret;
334 struct resource_entry *entry;
440 335
441 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); 336 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
442 info->bridge = device; 337 info->bridge = device;
443 338 ret = acpi_dev_get_resources(device, list,
444 info->res_num = 0; 339 acpi_dev_filter_resource_type_cb,
445 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource, 340 (void *)(IORESOURCE_IO | IORESOURCE_MEM));
446 info); 341 if (ret < 0)
447 if (!info->res_num) 342 dev_warn(&device->dev,
448 return; 343 "failed to parse _CRS method, error code %d\n", ret);
449 344 else if (ret == 0)
450 size = sizeof(*info->res) * info->res_num; 345 dev_dbg(&device->dev,
451 info->res = kzalloc_node(size, GFP_KERNEL, info->sd.node); 346 "no IO and memory resources present in _CRS\n");
452 if (!info->res) { 347 else
453 info->res_num = 0; 348 resource_list_for_each_entry(entry, list)
454 return; 349 entry->res->name = info->name;
455 }
456
457 size = sizeof(*info->res_offset) * info->res_num;
458 info->res_num = 0;
459 info->res_offset = kzalloc_node(size, GFP_KERNEL, info->sd.node);
460 if (!info->res_offset) {
461 kfree(info->res);
462 info->res = NULL;
463 return;
464 }
465
466 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
467 info);
468} 350}
469 351
470struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) 352struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
@@ -473,6 +355,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
473 struct pci_root_info *info; 355 struct pci_root_info *info;
474 int domain = root->segment; 356 int domain = root->segment;
475 int busnum = root->secondary.start; 357 int busnum = root->secondary.start;
358 struct resource_entry *res_entry;
359 LIST_HEAD(crs_res);
476 LIST_HEAD(resources); 360 LIST_HEAD(resources);
477 struct pci_bus *bus; 361 struct pci_bus *bus;
478 struct pci_sysdata *sd; 362 struct pci_sysdata *sd;
@@ -520,18 +404,22 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
520 memcpy(bus->sysdata, sd, sizeof(*sd)); 404 memcpy(bus->sysdata, sd, sizeof(*sd));
521 kfree(info); 405 kfree(info);
522 } else { 406 } else {
523 probe_pci_root_info(info, device, busnum, domain);
524
525 /* insert busn res at first */ 407 /* insert busn res at first */
526 pci_add_resource(&resources, &root->secondary); 408 pci_add_resource(&resources, &root->secondary);
409
527 /* 410 /*
528 * _CRS with no apertures is normal, so only fall back to 411 * _CRS with no apertures is normal, so only fall back to
529 * defaults or native bridge info if we're ignoring _CRS. 412 * defaults or native bridge info if we're ignoring _CRS.
530 */ 413 */
531 if (pci_use_crs) 414 probe_pci_root_info(info, device, busnum, domain, &crs_res);
532 add_resources(info, &resources); 415 if (pci_use_crs) {
533 else { 416 add_resources(info, &resources, &crs_res);
534 free_pci_root_info_res(info); 417 } else {
418 resource_list_for_each_entry(res_entry, &crs_res)
419 dev_printk(KERN_DEBUG, &device->dev,
420 "host bridge window %pR (ignored)\n",
421 res_entry->res);
422 resource_list_free(&crs_res);
535 x86_pci_root_bus_resources(busnum, &resources); 423 x86_pci_root_bus_resources(busnum, &resources);
536 } 424 }
537 425
@@ -546,8 +434,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
546 to_pci_host_bridge(bus->bridge), 434 to_pci_host_bridge(bus->bridge),
547 release_pci_root_info, info); 435 release_pci_root_info, info);
548 } else { 436 } else {
549 pci_free_resource_list(&resources); 437 resource_list_free(&resources);
550 __release_pci_root_info(info); 438 teardown_mcfg_map(info);
439 kfree(info);
551 } 440 }
552 } 441 }
553 442
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index f3a2cfc14125..7bcf06a7cd12 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -31,7 +31,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
31{ 31{
32 struct pci_root_info *info = x86_find_pci_root_info(bus); 32 struct pci_root_info *info = x86_find_pci_root_info(bus);
33 struct pci_root_res *root_res; 33 struct pci_root_res *root_res;
34 struct pci_host_bridge_window *window; 34 struct resource_entry *window;
35 bool found = false; 35 bool found = false;
36 36
37 if (!info) 37 if (!info)
@@ -41,7 +41,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
41 bus); 41 bus);
42 42
43 /* already added by acpi ? */ 43 /* already added by acpi ? */
44 list_for_each_entry(window, resources, list) 44 resource_list_for_each_entry(window, resources)
45 if (window->res->flags & IORESOURCE_BUS) { 45 if (window->res->flags & IORESOURCE_BUS) {
46 found = true; 46 found = true;
47 break; 47 break;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2fb384724ebb..3d2612b68694 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -513,6 +513,31 @@ void __init pcibios_set_cache_line_size(void)
513 } 513 }
514} 514}
515 515
516/*
517 * Some device drivers assume dev->irq won't change after calling
518 * pci_disable_device(). So delay releasing of IRQ resource to driver
519 * unbinding time. Otherwise it will break PM subsystem and drivers
520 * like xen-pciback etc.
521 */
522static int pci_irq_notifier(struct notifier_block *nb, unsigned long action,
523 void *data)
524{
525 struct pci_dev *dev = to_pci_dev(data);
526
527 if (action != BUS_NOTIFY_UNBOUND_DRIVER)
528 return NOTIFY_DONE;
529
530 if (pcibios_disable_irq)
531 pcibios_disable_irq(dev);
532
533 return NOTIFY_OK;
534}
535
536static struct notifier_block pci_irq_nb = {
537 .notifier_call = pci_irq_notifier,
538 .priority = INT_MIN,
539};
540
516int __init pcibios_init(void) 541int __init pcibios_init(void)
517{ 542{
518 if (!raw_pci_ops) { 543 if (!raw_pci_ops) {
@@ -525,6 +550,9 @@ int __init pcibios_init(void)
525 550
526 if (pci_bf_sort >= pci_force_bf) 551 if (pci_bf_sort >= pci_force_bf)
527 pci_sort_breadthfirst(); 552 pci_sort_breadthfirst();
553
554 bus_register_notifier(&pci_bus_type, &pci_irq_nb);
555
528 return 0; 556 return 0;
529} 557}
530 558
@@ -683,12 +711,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
683 return 0; 711 return 0;
684} 712}
685 713
686void pcibios_disable_device (struct pci_dev *dev)
687{
688 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
689 pcibios_disable_irq(dev);
690}
691
692int pci_ext_cfg_avail(void) 714int pci_ext_cfg_avail(void)
693{ 715{
694 if (raw_pci_ext_ops) 716 if (raw_pci_ext_ops)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 44b9271580b5..efb849323c74 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
234 234
235static void intel_mid_pci_irq_disable(struct pci_dev *dev) 235static void intel_mid_pci_irq_disable(struct pci_dev *dev)
236{ 236{
237 if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && 237 if (dev->irq_managed && dev->irq > 0) {
238 dev->irq > 0) {
239 mp_unmap_irq(dev->irq); 238 mp_unmap_irq(dev->irq);
240 dev->irq_managed = 0; 239 dev->irq_managed = 0;
240 dev->irq = 0;
241 } 241 }
242} 242}
243 243
@@ -293,7 +293,6 @@ static void mrst_power_off_unused_dev(struct pci_dev *dev)
293DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev); 293DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
294DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev); 294DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
295DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev); 295DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev);
296DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0812, mrst_power_off_unused_dev);
297DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev); 296DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev);
298 297
299/* 298/*
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 5dc6ca5e1741..e71b3dbd87b8 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1256,22 +1256,9 @@ static int pirq_enable_irq(struct pci_dev *dev)
1256 return 0; 1256 return 0;
1257} 1257}
1258 1258
1259bool mp_should_keep_irq(struct device *dev)
1260{
1261 if (dev->power.is_prepared)
1262 return true;
1263#ifdef CONFIG_PM
1264 if (dev->power.runtime_status == RPM_SUSPENDING)
1265 return true;
1266#endif
1267
1268 return false;
1269}
1270
1271static void pirq_disable_irq(struct pci_dev *dev) 1259static void pirq_disable_irq(struct pci_dev *dev)
1272{ 1260{
1273 if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && 1261 if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) {
1274 dev->irq_managed && dev->irq) {
1275 mp_unmap_irq(dev->irq); 1262 mp_unmap_irq(dev->irq);
1276 dev->irq = 0; 1263 dev->irq = 0;
1277 dev->irq_managed = 0; 1264 dev->irq_managed = 0;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 326198a4434e..dd30b7e08bc2 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -397,12 +397,12 @@ static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data)
397 397
398 status = acpi_resource_to_address64(res, &address); 398 status = acpi_resource_to_address64(res, &address);
399 if (ACPI_FAILURE(status) || 399 if (ACPI_FAILURE(status) ||
400 (address.address_length <= 0) || 400 (address.address.address_length <= 0) ||
401 (address.resource_type != ACPI_MEMORY_RANGE)) 401 (address.resource_type != ACPI_MEMORY_RANGE))
402 return AE_OK; 402 return AE_OK;
403 403
404 if ((mcfg_res->start >= address.minimum) && 404 if ((mcfg_res->start >= address.address.minimum) &&
405 (mcfg_res->end < (address.minimum + address.address_length))) { 405 (mcfg_res->end < (address.address.minimum + address.address.address_length))) {
406 mcfg_res->flags = 1; 406 mcfg_res->flags = 1;
407 return AE_CTRL_TERMINATE; 407 return AE_CTRL_TERMINATE;
408 } 408 }
@@ -610,6 +610,32 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
610 return 0; 610 return 0;
611} 611}
612 612
613#ifdef CONFIG_ACPI_APEI
614extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
615 void *data), void *data);
616
617static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
618 void *data), void *data)
619{
620 struct pci_mmcfg_region *cfg;
621 int rc;
622
623 if (list_empty(&pci_mmcfg_list))
624 return 0;
625
626 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
627 rc = func(cfg->res.start, resource_size(&cfg->res), data);
628 if (rc)
629 return rc;
630 }
631
632 return 0;
633}
634#define set_apei_filter() (arch_apei_filter_addr = pci_mmcfg_for_each_region)
635#else
636#define set_apei_filter()
637#endif
638
613static void __init __pci_mmcfg_init(int early) 639static void __init __pci_mmcfg_init(int early)
614{ 640{
615 pci_mmcfg_reject_broken(early); 641 pci_mmcfg_reject_broken(early);
@@ -644,6 +670,8 @@ void __init pci_mmcfg_early_init(void)
644 else 670 else
645 acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); 671 acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
646 __pci_mmcfg_init(1); 672 __pci_mmcfg_init(1);
673
674 set_apei_filter();
647 } 675 }
648} 676}
649 677
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 9098d880c476..d22f4b5bbc04 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -298,12 +298,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
298 map_irq.entry_nr = nvec; 298 map_irq.entry_nr = nvec;
299 } else if (type == PCI_CAP_ID_MSIX) { 299 } else if (type == PCI_CAP_ID_MSIX) {
300 int pos; 300 int pos;
301 unsigned long flags;
301 u32 table_offset, bir; 302 u32 table_offset, bir;
302 303
303 pos = dev->msix_cap; 304 pos = dev->msix_cap;
304 pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, 305 pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
305 &table_offset); 306 &table_offset);
306 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); 307 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
308 flags = pci_resource_flags(dev, bir);
309 if (!flags || (flags & IORESOURCE_UNSET))
310 return -EINVAL;
307 311
308 map_irq.table_base = pci_resource_start(dev, bir); 312 map_irq.table_base = pci_resource_start(dev, bir);
309 map_irq.entry_nr = msidesc->msi_attrib.entry_nr; 313 map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 5a4affe025e8..09297c8e1fcd 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -205,4 +205,4 @@ $(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
205PHONY += vdso_install $(vdso_img_insttargets) 205PHONY += vdso_install $(vdso_img_insttargets)
206vdso_install: $(vdso_img_insttargets) FORCE 206vdso_install: $(vdso_img_insttargets) FORCE
207 207
208clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* 208clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* vdso64*
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5c1f9ace7ae7..adca9e2b6553 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1489,7 +1489,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1489 native_set_pte(ptep, pte); 1489 native_set_pte(ptep, pte);
1490} 1490}
1491 1491
1492static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1492static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1493{ 1493{
1494 struct mmuext_op op; 1494 struct mmuext_op op;
1495 op.cmd = cmd; 1495 op.cmd = cmd;
@@ -1657,7 +1657,7 @@ void __init xen_reserve_top(void)
1657 * Like __va(), but returns address in the kernel mapping (which is 1657 * Like __va(), but returns address in the kernel mapping (which is
1658 * all we have until the physical memory mapping has been set up. 1658 * all we have until the physical memory mapping has been set up.
1659 */ 1659 */
1660static void *__ka(phys_addr_t paddr) 1660static void * __init __ka(phys_addr_t paddr)
1661{ 1661{
1662#ifdef CONFIG_X86_64 1662#ifdef CONFIG_X86_64
1663 return (void *)(paddr + __START_KERNEL_map); 1663 return (void *)(paddr + __START_KERNEL_map);
@@ -1667,7 +1667,7 @@ static void *__ka(phys_addr_t paddr)
1667} 1667}
1668 1668
1669/* Convert a machine address to physical address */ 1669/* Convert a machine address to physical address */
1670static unsigned long m2p(phys_addr_t maddr) 1670static unsigned long __init m2p(phys_addr_t maddr)
1671{ 1671{
1672 phys_addr_t paddr; 1672 phys_addr_t paddr;
1673 1673
@@ -1678,13 +1678,14 @@ static unsigned long m2p(phys_addr_t maddr)
1678} 1678}
1679 1679
1680/* Convert a machine address to kernel virtual */ 1680/* Convert a machine address to kernel virtual */
1681static void *m2v(phys_addr_t maddr) 1681static void * __init m2v(phys_addr_t maddr)
1682{ 1682{
1683 return __ka(m2p(maddr)); 1683 return __ka(m2p(maddr));
1684} 1684}
1685 1685
1686/* Set the page permissions on an identity-mapped pages */ 1686/* Set the page permissions on an identity-mapped pages */
1687static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) 1687static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1688 unsigned long flags)
1688{ 1689{
1689 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1690 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1690 pte_t pte = pfn_pte(pfn, prot); 1691 pte_t pte = pfn_pte(pfn, prot);
@@ -1696,7 +1697,7 @@ static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
1696 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) 1697 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1697 BUG(); 1698 BUG();
1698} 1699}
1699static void set_page_prot(void *addr, pgprot_t prot) 1700static void __init set_page_prot(void *addr, pgprot_t prot)
1700{ 1701{
1701 return set_page_prot_flags(addr, prot, UVMF_NONE); 1702 return set_page_prot_flags(addr, prot, UVMF_NONE);
1702} 1703}
@@ -1733,10 +1734,8 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1733 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1734 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1734 pte_t pte; 1735 pte_t pte;
1735 1736
1736#ifdef CONFIG_X86_32
1737 if (pfn > max_pfn_mapped) 1737 if (pfn > max_pfn_mapped)
1738 max_pfn_mapped = pfn; 1738 max_pfn_mapped = pfn;
1739#endif
1740 1739
1741 if (!pte_none(pte_page[pteidx])) 1740 if (!pte_none(pte_page[pteidx]))
1742 continue; 1741 continue;
@@ -1769,7 +1768,7 @@ void __init xen_setup_machphys_mapping(void)
1769} 1768}
1770 1769
1771#ifdef CONFIG_X86_64 1770#ifdef CONFIG_X86_64
1772static void convert_pfn_mfn(void *v) 1771static void __init convert_pfn_mfn(void *v)
1773{ 1772{
1774 pte_t *pte = v; 1773 pte_t *pte = v;
1775 int i; 1774 int i;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 70fb5075c901..f18fd1d411f6 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -84,8 +84,6 @@
84 84
85#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE) 85#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE)
86 86
87static void __init m2p_override_init(void);
88
89unsigned long *xen_p2m_addr __read_mostly; 87unsigned long *xen_p2m_addr __read_mostly;
90EXPORT_SYMBOL_GPL(xen_p2m_addr); 88EXPORT_SYMBOL_GPL(xen_p2m_addr);
91unsigned long xen_p2m_size __read_mostly; 89unsigned long xen_p2m_size __read_mostly;
@@ -402,8 +400,6 @@ void __init xen_vmalloc_p2m_tree(void)
402 xen_p2m_size = xen_max_p2m_pfn; 400 xen_p2m_size = xen_max_p2m_pfn;
403 401
404 xen_inv_extra_mem(); 402 xen_inv_extra_mem();
405
406 m2p_override_init();
407} 403}
408 404
409unsigned long get_phys_to_machine(unsigned long pfn) 405unsigned long get_phys_to_machine(unsigned long pfn)
@@ -652,100 +648,21 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
652 return true; 648 return true;
653} 649}
654 650
655#define M2P_OVERRIDE_HASH_SHIFT 10
656#define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT)
657
658static struct list_head *m2p_overrides;
659static DEFINE_SPINLOCK(m2p_override_lock);
660
661static void __init m2p_override_init(void)
662{
663 unsigned i;
664
665 m2p_overrides = alloc_bootmem_align(
666 sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
667 sizeof(unsigned long));
668
669 for (i = 0; i < M2P_OVERRIDE_HASH; i++)
670 INIT_LIST_HEAD(&m2p_overrides[i]);
671}
672
673static unsigned long mfn_hash(unsigned long mfn)
674{
675 return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
676}
677
678/* Add an MFN override for a particular page */
679static int m2p_add_override(unsigned long mfn, struct page *page,
680 struct gnttab_map_grant_ref *kmap_op)
681{
682 unsigned long flags;
683 unsigned long pfn;
684 unsigned long uninitialized_var(address);
685 unsigned level;
686 pte_t *ptep = NULL;
687
688 pfn = page_to_pfn(page);
689 if (!PageHighMem(page)) {
690 address = (unsigned long)__va(pfn << PAGE_SHIFT);
691 ptep = lookup_address(address, &level);
692 if (WARN(ptep == NULL || level != PG_LEVEL_4K,
693 "m2p_add_override: pfn %lx not mapped", pfn))
694 return -EINVAL;
695 }
696
697 if (kmap_op != NULL) {
698 if (!PageHighMem(page)) {
699 struct multicall_space mcs =
700 xen_mc_entry(sizeof(*kmap_op));
701
702 MULTI_grant_table_op(mcs.mc,
703 GNTTABOP_map_grant_ref, kmap_op, 1);
704
705 xen_mc_issue(PARAVIRT_LAZY_MMU);
706 }
707 }
708 spin_lock_irqsave(&m2p_override_lock, flags);
709 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
710 spin_unlock_irqrestore(&m2p_override_lock, flags);
711
712 /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
713 * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
714 * pfn so that the following mfn_to_pfn(mfn) calls will return the
715 * pfn from the m2p_override (the backend pfn) instead.
716 * We need to do this because the pages shared by the frontend
717 * (xen-blkfront) can be already locked (lock_page, called by
718 * do_read_cache_page); when the userspace backend tries to use them
719 * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
720 * do_blockdev_direct_IO is going to try to lock the same pages
721 * again resulting in a deadlock.
722 * As a side effect get_user_pages_fast might not be safe on the
723 * frontend pages while they are being shared with the backend,
724 * because mfn_to_pfn (that ends up being called by GUPF) will
725 * return the backend pfn rather than the frontend pfn. */
726 pfn = mfn_to_pfn_no_overrides(mfn);
727 if (__pfn_to_mfn(pfn) == mfn)
728 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
729
730 return 0;
731}
732
733int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 651int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
734 struct gnttab_map_grant_ref *kmap_ops, 652 struct gnttab_map_grant_ref *kmap_ops,
735 struct page **pages, unsigned int count) 653 struct page **pages, unsigned int count)
736{ 654{
737 int i, ret = 0; 655 int i, ret = 0;
738 bool lazy = false;
739 pte_t *pte; 656 pte_t *pte;
740 657
741 if (xen_feature(XENFEAT_auto_translated_physmap)) 658 if (xen_feature(XENFEAT_auto_translated_physmap))
742 return 0; 659 return 0;
743 660
744 if (kmap_ops && 661 if (kmap_ops) {
745 !in_interrupt() && 662 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
746 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 663 kmap_ops, count);
747 arch_enter_lazy_mmu_mode(); 664 if (ret)
748 lazy = true; 665 goto out;
749 } 666 }
750 667
751 for (i = 0; i < count; i++) { 668 for (i = 0; i < count; i++) {
@@ -764,170 +681,28 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
764 } 681 }
765 pfn = page_to_pfn(pages[i]); 682 pfn = page_to_pfn(pages[i]);
766 683
767 WARN_ON(PagePrivate(pages[i])); 684 WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
768 SetPagePrivate(pages[i]);
769 set_page_private(pages[i], mfn);
770 pages[i]->index = pfn_to_mfn(pfn);
771 685
772 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { 686 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
773 ret = -ENOMEM; 687 ret = -ENOMEM;
774 goto out; 688 goto out;
775 } 689 }
776
777 if (kmap_ops) {
778 ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
779 if (ret)
780 goto out;
781 }
782 } 690 }
783 691
784out: 692out:
785 if (lazy)
786 arch_leave_lazy_mmu_mode();
787
788 return ret; 693 return ret;
789} 694}
790EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); 695EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
791 696
792static struct page *m2p_find_override(unsigned long mfn)
793{
794 unsigned long flags;
795 struct list_head *bucket;
796 struct page *p, *ret;
797
798 if (unlikely(!m2p_overrides))
799 return NULL;
800
801 ret = NULL;
802 bucket = &m2p_overrides[mfn_hash(mfn)];
803
804 spin_lock_irqsave(&m2p_override_lock, flags);
805
806 list_for_each_entry(p, bucket, lru) {
807 if (page_private(p) == mfn) {
808 ret = p;
809 break;
810 }
811 }
812
813 spin_unlock_irqrestore(&m2p_override_lock, flags);
814
815 return ret;
816}
817
818static int m2p_remove_override(struct page *page,
819 struct gnttab_map_grant_ref *kmap_op,
820 unsigned long mfn)
821{
822 unsigned long flags;
823 unsigned long pfn;
824 unsigned long uninitialized_var(address);
825 unsigned level;
826 pte_t *ptep = NULL;
827
828 pfn = page_to_pfn(page);
829
830 if (!PageHighMem(page)) {
831 address = (unsigned long)__va(pfn << PAGE_SHIFT);
832 ptep = lookup_address(address, &level);
833
834 if (WARN(ptep == NULL || level != PG_LEVEL_4K,
835 "m2p_remove_override: pfn %lx not mapped", pfn))
836 return -EINVAL;
837 }
838
839 spin_lock_irqsave(&m2p_override_lock, flags);
840 list_del(&page->lru);
841 spin_unlock_irqrestore(&m2p_override_lock, flags);
842
843 if (kmap_op != NULL) {
844 if (!PageHighMem(page)) {
845 struct multicall_space mcs;
846 struct gnttab_unmap_and_replace *unmap_op;
847 struct page *scratch_page = get_balloon_scratch_page();
848 unsigned long scratch_page_address = (unsigned long)
849 __va(page_to_pfn(scratch_page) << PAGE_SHIFT);
850
851 /*
852 * It might be that we queued all the m2p grant table
853 * hypercalls in a multicall, then m2p_remove_override
854 * get called before the multicall has actually been
855 * issued. In this case handle is going to -1 because
856 * it hasn't been modified yet.
857 */
858 if (kmap_op->handle == -1)
859 xen_mc_flush();
860 /*
861 * Now if kmap_op->handle is negative it means that the
862 * hypercall actually returned an error.
863 */
864 if (kmap_op->handle == GNTST_general_error) {
865 pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
866 pfn, mfn);
867 put_balloon_scratch_page();
868 return -1;
869 }
870
871 xen_mc_batch();
872
873 mcs = __xen_mc_entry(
874 sizeof(struct gnttab_unmap_and_replace));
875 unmap_op = mcs.args;
876 unmap_op->host_addr = kmap_op->host_addr;
877 unmap_op->new_addr = scratch_page_address;
878 unmap_op->handle = kmap_op->handle;
879
880 MULTI_grant_table_op(mcs.mc,
881 GNTTABOP_unmap_and_replace, unmap_op, 1);
882
883 mcs = __xen_mc_entry(0);
884 MULTI_update_va_mapping(mcs.mc, scratch_page_address,
885 pfn_pte(page_to_pfn(scratch_page),
886 PAGE_KERNEL_RO), 0);
887
888 xen_mc_issue(PARAVIRT_LAZY_MMU);
889
890 kmap_op->host_addr = 0;
891 put_balloon_scratch_page();
892 }
893 }
894
895 /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
896 * somewhere in this domain, even before being added to the
897 * m2p_override (see comment above in m2p_add_override).
898 * If there are no other entries in the m2p_override corresponding
899 * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
900 * the original pfn (the one shared by the frontend): the backend
901 * cannot do any IO on this page anymore because it has been
902 * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
903 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
904 * pfn again. */
905 mfn &= ~FOREIGN_FRAME_BIT;
906 pfn = mfn_to_pfn_no_overrides(mfn);
907 if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) &&
908 m2p_find_override(mfn) == NULL)
909 set_phys_to_machine(pfn, mfn);
910
911 return 0;
912}
913
914int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 697int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
915 struct gnttab_map_grant_ref *kmap_ops, 698 struct gnttab_unmap_grant_ref *kunmap_ops,
916 struct page **pages, unsigned int count) 699 struct page **pages, unsigned int count)
917{ 700{
918 int i, ret = 0; 701 int i, ret = 0;
919 bool lazy = false;
920 702
921 if (xen_feature(XENFEAT_auto_translated_physmap)) 703 if (xen_feature(XENFEAT_auto_translated_physmap))
922 return 0; 704 return 0;
923 705
924 if (kmap_ops &&
925 !in_interrupt() &&
926 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
927 arch_enter_lazy_mmu_mode();
928 lazy = true;
929 }
930
931 for (i = 0; i < count; i++) { 706 for (i = 0; i < count; i++) {
932 unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); 707 unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
933 unsigned long pfn = page_to_pfn(pages[i]); 708 unsigned long pfn = page_to_pfn(pages[i]);
@@ -937,36 +712,16 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
937 goto out; 712 goto out;
938 } 713 }
939 714
940 set_page_private(pages[i], INVALID_P2M_ENTRY); 715 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
941 WARN_ON(!PagePrivate(pages[i]));
942 ClearPagePrivate(pages[i]);
943 set_phys_to_machine(pfn, pages[i]->index);
944
945 if (kmap_ops)
946 ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
947 if (ret)
948 goto out;
949 } 716 }
950 717 if (kunmap_ops)
718 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
719 kunmap_ops, count);
951out: 720out:
952 if (lazy)
953 arch_leave_lazy_mmu_mode();
954 return ret; 721 return ret;
955} 722}
956EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); 723EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
957 724
958unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
959{
960 struct page *p = m2p_find_override(mfn);
961 unsigned long ret = pfn;
962
963 if (p)
964 ret = page_to_pfn(p);
965
966 return ret;
967}
968EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
969
970#ifdef CONFIG_XEN_DEBUG_FS 725#ifdef CONFIG_XEN_DEBUG_FS
971#include <linux/debugfs.h> 726#include <linux/debugfs.h>
972#include "debugfs.h" 727#include "debugfs.h"
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 865e56cea7a0..55f388ef481a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -32,16 +32,6 @@
32#include "p2m.h" 32#include "p2m.h"
33#include "mmu.h" 33#include "mmu.h"
34 34
35/* These are code, but not functions. Defined in entry.S */
36extern const char xen_hypervisor_callback[];
37extern const char xen_failsafe_callback[];
38#ifdef CONFIG_X86_64
39extern asmlinkage void nmi(void);
40#endif
41extern void xen_sysenter_target(void);
42extern void xen_syscall_target(void);
43extern void xen_syscall32_target(void);
44
45/* Amount of extra memory space we add to the e820 ranges */ 35/* Amount of extra memory space we add to the e820 ranges */
46struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 36struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
47 37
@@ -74,7 +64,7 @@ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
74 */ 64 */
75#define EXTRA_MEM_RATIO (10) 65#define EXTRA_MEM_RATIO (10)
76 66
77static void __init xen_add_extra_mem(u64 start, u64 size) 67static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
78{ 68{
79 int i; 69 int i;
80 70
@@ -97,10 +87,10 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
97 memblock_reserve(start, size); 87 memblock_reserve(start, size);
98} 88}
99 89
100static void __init xen_del_extra_mem(u64 start, u64 size) 90static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
101{ 91{
102 int i; 92 int i;
103 u64 start_r, size_r; 93 phys_addr_t start_r, size_r;
104 94
105 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 95 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
106 start_r = xen_extra_mem[i].start; 96 start_r = xen_extra_mem[i].start;
@@ -267,7 +257,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
267static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) 257static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
268{ 258{
269 struct mmu_update update = { 259 struct mmu_update update = {
270 .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, 260 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
271 .val = pfn 261 .val = pfn
272 }; 262 };
273 263
@@ -545,20 +535,21 @@ static unsigned long __init xen_get_max_pages(void)
545 return min(max_pages, MAX_DOMAIN_PAGES); 535 return min(max_pages, MAX_DOMAIN_PAGES);
546} 536}
547 537
548static void xen_align_and_add_e820_region(u64 start, u64 size, int type) 538static void __init xen_align_and_add_e820_region(phys_addr_t start,
539 phys_addr_t size, int type)
549{ 540{
550 u64 end = start + size; 541 phys_addr_t end = start + size;
551 542
552 /* Align RAM regions to page boundaries. */ 543 /* Align RAM regions to page boundaries. */
553 if (type == E820_RAM) { 544 if (type == E820_RAM) {
554 start = PAGE_ALIGN(start); 545 start = PAGE_ALIGN(start);
555 end &= ~((u64)PAGE_SIZE - 1); 546 end &= ~((phys_addr_t)PAGE_SIZE - 1);
556 } 547 }
557 548
558 e820_add_region(start, end - start, type); 549 e820_add_region(start, end - start, type);
559} 550}
560 551
561void xen_ignore_unusable(struct e820entry *list, size_t map_size) 552static void __init xen_ignore_unusable(struct e820entry *list, size_t map_size)
562{ 553{
563 struct e820entry *entry; 554 struct e820entry *entry;
564 unsigned int i; 555 unsigned int i;
@@ -577,7 +568,7 @@ char * __init xen_memory_setup(void)
577 static struct e820entry map[E820MAX] __initdata; 568 static struct e820entry map[E820MAX] __initdata;
578 569
579 unsigned long max_pfn = xen_start_info->nr_pages; 570 unsigned long max_pfn = xen_start_info->nr_pages;
580 unsigned long long mem_end; 571 phys_addr_t mem_end;
581 int rc; 572 int rc;
582 struct xen_memory_map memmap; 573 struct xen_memory_map memmap;
583 unsigned long max_pages; 574 unsigned long max_pages;
@@ -652,16 +643,16 @@ char * __init xen_memory_setup(void)
652 extra_pages); 643 extra_pages);
653 i = 0; 644 i = 0;
654 while (i < memmap.nr_entries) { 645 while (i < memmap.nr_entries) {
655 u64 addr = map[i].addr; 646 phys_addr_t addr = map[i].addr;
656 u64 size = map[i].size; 647 phys_addr_t size = map[i].size;
657 u32 type = map[i].type; 648 u32 type = map[i].type;
658 649
659 if (type == E820_RAM) { 650 if (type == E820_RAM) {
660 if (addr < mem_end) { 651 if (addr < mem_end) {
661 size = min(size, mem_end - addr); 652 size = min(size, mem_end - addr);
662 } else if (extra_pages) { 653 } else if (extra_pages) {
663 size = min(size, (u64)extra_pages * PAGE_SIZE); 654 size = min(size, PFN_PHYS(extra_pages));
664 extra_pages -= size / PAGE_SIZE; 655 extra_pages -= PFN_DOWN(size);
665 xen_add_extra_mem(addr, size); 656 xen_add_extra_mem(addr, size);
666 xen_max_p2m_pfn = PFN_DOWN(addr + size); 657 xen_max_p2m_pfn = PFN_DOWN(addr + size);
667 } else 658 } else
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 4c071aeb8417..08e8489c47f1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -507,7 +507,7 @@ static int xen_cpu_disable(void)
507static void xen_cpu_die(unsigned int cpu) 507static void xen_cpu_die(unsigned int cpu)
508{ 508{
509 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { 509 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
510 current->state = TASK_UNINTERRUPTIBLE; 510 __set_current_state(TASK_UNINTERRUPTIBLE);
511 schedule_timeout(HZ/10); 511 schedule_timeout(HZ/10);
512 } 512 }
513 513
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 69087341d9ae..55da33b1d51c 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -479,6 +479,10 @@ static void __init xen_time_init(void)
479 int cpu = smp_processor_id(); 479 int cpu = smp_processor_id();
480 struct timespec tp; 480 struct timespec tp;
481 481
482 /* As Dom0 is never moved, no penalty on using TSC there */
483 if (xen_initial_domain())
484 xen_clocksource.rating = 275;
485
482 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); 486 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
483 487
484 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { 488 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 5686bd9d58cc..9e195c683549 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -10,6 +10,12 @@
10extern const char xen_hypervisor_callback[]; 10extern const char xen_hypervisor_callback[];
11extern const char xen_failsafe_callback[]; 11extern const char xen_failsafe_callback[];
12 12
13void xen_sysenter_target(void);
14#ifdef CONFIG_X86_64
15void xen_syscall_target(void);
16void xen_syscall32_target(void);
17#endif
18
13extern void *xen_initial_gdt; 19extern void *xen_initial_gdt;
14 20
15struct trap_info; 21struct trap_info;
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 56d08fd75b1a..26cb624ace05 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -715,7 +715,7 @@ int efi_partition(struct parsed_partitions *state)
715 state->parts[i + 1].flags = ADDPART_FLAG_RAID; 715 state->parts[i + 1].flags = ADDPART_FLAG_RAID;
716 716
717 info = &state->parts[i + 1].info; 717 info = &state->parts[i + 1].info;
718 efi_guid_unparse(&ptes[i].unique_partition_guid, info->uuid); 718 efi_guid_to_str(&ptes[i].unique_partition_guid, info->uuid);
719 719
720 /* Naively convert UTF16-LE to 7 bits. */ 720 /* Naively convert UTF16-LE to 7 bits. */
721 label_max = min(ARRAY_SIZE(info->volname) - 1, 721 label_max = min(ARRAY_SIZE(info->volname) - 1,
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 8951cefb0a96..e6c3ddd92665 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -315,6 +315,12 @@ config ACPI_HOTPLUG_MEMORY
315 To compile this driver as a module, choose M here: 315 To compile this driver as a module, choose M here:
316 the module will be called acpi_memhotplug. 316 the module will be called acpi_memhotplug.
317 317
318config ACPI_HOTPLUG_IOAPIC
319 bool
320 depends on PCI
321 depends on X86_IO_APIC
322 default y
323
318config ACPI_SBS 324config ACPI_SBS
319 tristate "Smart Battery System" 325 tristate "Smart Battery System"
320 depends on X86 326 depends on X86
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index f74317cc1ca9..b18cd2151ddb 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -40,7 +40,7 @@ acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
40acpi-y += ec.o 40acpi-y += ec.o
41acpi-$(CONFIG_ACPI_DOCK) += dock.o 41acpi-$(CONFIG_ACPI_DOCK) += dock.o
42acpi-y += pci_root.o pci_link.o pci_irq.o 42acpi-y += pci_root.o pci_link.o pci_irq.o
43acpi-y += acpi_lpss.o 43acpi-y += acpi_lpss.o acpi_apd.o
44acpi-y += acpi_platform.o 44acpi-y += acpi_platform.o
45acpi-y += acpi_pnp.o 45acpi-y += acpi_pnp.o
46acpi-y += int340x_thermal.o 46acpi-y += int340x_thermal.o
@@ -70,6 +70,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
70obj-y += container.o 70obj-y += container.o
71obj-$(CONFIG_ACPI_THERMAL) += thermal.o 71obj-$(CONFIG_ACPI_THERMAL) += thermal.o
72obj-y += acpi_memhotplug.o 72obj-y += acpi_memhotplug.o
73obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
73obj-$(CONFIG_ACPI_BATTERY) += battery.o 74obj-$(CONFIG_ACPI_BATTERY) += battery.o
74obj-$(CONFIG_ACPI_SBS) += sbshc.o 75obj-$(CONFIG_ACPI_SBS) += sbshc.o
75obj-$(CONFIG_ACPI_SBS) += sbs.o 76obj-$(CONFIG_ACPI_SBS) += sbs.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
new file mode 100644
index 000000000000..3984ea96e5f7
--- /dev/null
+++ b/drivers/acpi/acpi_apd.c
@@ -0,0 +1,150 @@
1/*
2 * AMD ACPI support for ACPI2platform device.
3 *
4 * Copyright (c) 2014,2015 AMD Corporation.
5 * Authors: Ken Xue <Ken.Xue@amd.com>
6 * Wu, Jeff <Jeff.Wu@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/clk-provider.h>
14#include <linux/platform_device.h>
15#include <linux/pm_domain.h>
16#include <linux/clkdev.h>
17#include <linux/acpi.h>
18#include <linux/err.h>
19#include <linux/clk.h>
20#include <linux/pm.h>
21
22#include "internal.h"
23
24ACPI_MODULE_NAME("acpi_apd");
25struct apd_private_data;
26
27/**
28 * ACPI_APD_SYSFS : add device attributes in sysfs
29 * ACPI_APD_PM : attach power domain to device
30 */
31#define ACPI_APD_SYSFS BIT(0)
32#define ACPI_APD_PM BIT(1)
33
34/**
35 * struct apd_device_desc - a descriptor for apd device
36 * @flags: device flags like %ACPI_APD_SYSFS, %ACPI_APD_PM
37 * @fixed_clk_rate: fixed rate input clock source for acpi device;
38 * 0 means no fixed rate input clock source
39 * @setup: a hook routine to set device resource during create platform device
40 *
41 * Device description defined as acpi_device_id.driver_data
42 */
43struct apd_device_desc {
44 unsigned int flags;
45 unsigned int fixed_clk_rate;
46 int (*setup)(struct apd_private_data *pdata);
47};
48
49struct apd_private_data {
50 struct clk *clk;
51 struct acpi_device *adev;
52 const struct apd_device_desc *dev_desc;
53};
54
55#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
56#define APD_ADDR(desc) ((unsigned long)&desc)
57
58static int acpi_apd_setup(struct apd_private_data *pdata)
59{
60 const struct apd_device_desc *dev_desc = pdata->dev_desc;
61 struct clk *clk = ERR_PTR(-ENODEV);
62
63 if (dev_desc->fixed_clk_rate) {
64 clk = clk_register_fixed_rate(&pdata->adev->dev,
65 dev_name(&pdata->adev->dev),
66 NULL, CLK_IS_ROOT,
67 dev_desc->fixed_clk_rate);
68 clk_register_clkdev(clk, NULL, dev_name(&pdata->adev->dev));
69 pdata->clk = clk;
70 }
71
72 return 0;
73}
74
75static struct apd_device_desc cz_i2c_desc = {
76 .setup = acpi_apd_setup,
77 .fixed_clk_rate = 133000000,
78};
79
80static struct apd_device_desc cz_uart_desc = {
81 .setup = acpi_apd_setup,
82 .fixed_clk_rate = 48000000,
83};
84
85#else
86
87#define APD_ADDR(desc) (0UL)
88
89#endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */
90
91/**
92* Create platform device during acpi scan attach handle.
93* Return value > 0 on success of creating device.
94*/
95static int acpi_apd_create_device(struct acpi_device *adev,
96 const struct acpi_device_id *id)
97{
98 const struct apd_device_desc *dev_desc = (void *)id->driver_data;
99 struct apd_private_data *pdata;
100 struct platform_device *pdev;
101 int ret;
102
103 if (!dev_desc) {
104 pdev = acpi_create_platform_device(adev);
105 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
106 }
107
108 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
109 if (!pdata)
110 return -ENOMEM;
111
112 pdata->adev = adev;
113 pdata->dev_desc = dev_desc;
114
115 if (dev_desc->setup) {
116 ret = dev_desc->setup(pdata);
117 if (ret)
118 goto err_out;
119 }
120
121 adev->driver_data = pdata;
122 pdev = acpi_create_platform_device(adev);
123 if (!IS_ERR_OR_NULL(pdev))
124 return 1;
125
126 ret = PTR_ERR(pdev);
127 adev->driver_data = NULL;
128
129 err_out:
130 kfree(pdata);
131 return ret;
132}
133
134static const struct acpi_device_id acpi_apd_device_ids[] = {
135 /* Generic apd devices */
136 { "AMD0010", APD_ADDR(cz_i2c_desc) },
137 { "AMD0020", APD_ADDR(cz_uart_desc) },
138 { "AMD0030", },
139 { }
140};
141
142static struct acpi_scan_handler apd_handler = {
143 .ids = acpi_apd_device_ids,
144 .attach = acpi_apd_create_device,
145};
146
147void __init acpi_apd_init(void)
148{
149 acpi_scan_add_handler(&apd_handler);
150}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index e75737fd7eef..02e835f3cf8a 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -125,7 +125,7 @@ static struct lpss_device_desc lpt_dev_desc = {
125}; 125};
126 126
127static struct lpss_device_desc lpt_i2c_dev_desc = { 127static struct lpss_device_desc lpt_i2c_dev_desc = {
128 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, 128 .flags = LPSS_CLK | LPSS_LTR,
129 .prv_offset = 0x800, 129 .prv_offset = 0x800,
130}; 130};
131 131
@@ -307,7 +307,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
307{ 307{
308 struct lpss_device_desc *dev_desc; 308 struct lpss_device_desc *dev_desc;
309 struct lpss_private_data *pdata; 309 struct lpss_private_data *pdata;
310 struct resource_list_entry *rentry; 310 struct resource_entry *rentry;
311 struct list_head resource_list; 311 struct list_head resource_list;
312 struct platform_device *pdev; 312 struct platform_device *pdev;
313 int ret; 313 int ret;
@@ -327,13 +327,15 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
327 goto err_out; 327 goto err_out;
328 328
329 list_for_each_entry(rentry, &resource_list, node) 329 list_for_each_entry(rentry, &resource_list, node)
330 if (resource_type(&rentry->res) == IORESOURCE_MEM) { 330 if (resource_type(rentry->res) == IORESOURCE_MEM) {
331 if (dev_desc->prv_size_override) 331 if (dev_desc->prv_size_override)
332 pdata->mmio_size = dev_desc->prv_size_override; 332 pdata->mmio_size = dev_desc->prv_size_override;
333 else 333 else
334 pdata->mmio_size = resource_size(&rentry->res); 334 pdata->mmio_size = resource_size(rentry->res);
335 pdata->mmio_base = ioremap(rentry->res.start, 335 pdata->mmio_base = ioremap(rentry->res->start,
336 pdata->mmio_size); 336 pdata->mmio_size);
337 if (!pdata->mmio_base)
338 goto err_out;
337 break; 339 break;
338 } 340 }
339 341
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 23e2319ead41..ee28f4d15625 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -101,8 +101,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
101 /* Can we combine the resource range information? */ 101 /* Can we combine the resource range information? */
102 if ((info->caching == address64.info.mem.caching) && 102 if ((info->caching == address64.info.mem.caching) &&
103 (info->write_protect == address64.info.mem.write_protect) && 103 (info->write_protect == address64.info.mem.write_protect) &&
104 (info->start_addr + info->length == address64.minimum)) { 104 (info->start_addr + info->length == address64.address.minimum)) {
105 info->length += address64.address_length; 105 info->length += address64.address.address_length;
106 return AE_OK; 106 return AE_OK;
107 } 107 }
108 } 108 }
@@ -114,8 +114,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
114 INIT_LIST_HEAD(&new->list); 114 INIT_LIST_HEAD(&new->list);
115 new->caching = address64.info.mem.caching; 115 new->caching = address64.info.mem.caching;
116 new->write_protect = address64.info.mem.write_protect; 116 new->write_protect = address64.info.mem.write_protect;
117 new->start_addr = address64.minimum; 117 new->start_addr = address64.address.minimum;
118 new->length = address64.address_length; 118 new->length = address64.address.address_length;
119 list_add_tail(&new->list, &mem_device->res_list); 119 list_add_tail(&new->list, &mem_device->res_list);
120 120
121 return AE_OK; 121 return AE_OK;
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 6ba8beb6b9d2..1284138e42ab 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -45,7 +45,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
45 struct platform_device *pdev = NULL; 45 struct platform_device *pdev = NULL;
46 struct acpi_device *acpi_parent; 46 struct acpi_device *acpi_parent;
47 struct platform_device_info pdevinfo; 47 struct platform_device_info pdevinfo;
48 struct resource_list_entry *rentry; 48 struct resource_entry *rentry;
49 struct list_head resource_list; 49 struct list_head resource_list;
50 struct resource *resources = NULL; 50 struct resource *resources = NULL;
51 int count; 51 int count;
@@ -71,7 +71,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
71 } 71 }
72 count = 0; 72 count = 0;
73 list_for_each_entry(rentry, &resource_list, node) 73 list_for_each_entry(rentry, &resource_list, node)
74 resources[count++] = rentry->res; 74 resources[count++] = *rentry->res;
75 75
76 acpi_dev_free_resource_list(&resource_list); 76 acpi_dev_free_resource_list(&resource_list);
77 } 77 }
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 3d2c88289da9..d863016565b5 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
47/* Common info for tool signons */ 47/* Common info for tool signons */
48 48
49#define ACPICA_NAME "Intel ACPI Component Architecture" 49#define ACPICA_NAME "Intel ACPI Component Architecture"
50#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2014 Intel Corporation" 50#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2015 Intel Corporation"
51 51
52#if ACPI_MACHINE_WIDTH == 64 52#if ACPI_MACHINE_WIDTH == 64
53#define ACPI_WIDTH "-64" 53#define ACPI_WIDTH "-64"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 6f1c616910ac..853aa2dbdb61 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 1d026ff1683f..4169bb87a996 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index d3e2cc395d7f..408f04bcaab4 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 7a7811a9fc26..228704b78657 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -143,8 +143,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
143acpi_status 143acpi_status
144acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); 144acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
145 145
146u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
147
148acpi_status 146acpi_status
149acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 147acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
150 struct acpi_gpe_block_info *gpe_block, void *context); 148 struct acpi_gpe_block_info *gpe_block, void *context);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 7f60582d0c8c..a165d25343e8 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index c318d3e27893..196a55244559 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index b01f71ce0523..1886bde54b5d 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 680d23bbae7c..7add32e5d8c5 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 4bceb11c7380..cf607fe69dbd 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index ee1c040f321c..952fbe0b7231 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8abb393dafab..3e9720e1f34f 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index dda0e6affcf1..a5f17de45ac6 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6168b85463ed..74a390c6db16 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index bd3908d26c4f..a972d11c97c9 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 4b008e8884a1..efc4c7124ccc 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index cf7346110bd8..d14b547b7cd5 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 1afe46e44dac..1c127a43017b 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 486d342e74b6..c2f03e8774ad 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 5908ccec6aea..3a95068fc119 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2014, Intel Corp. 10 * Copyright (C) 2000 - 2015, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 3a0beeb86ba5..ee0cdd60b93d 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 720b1cdda711..3e6989738e85 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 8daf9de82b73..39da9da62bbf 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index c57666196672..43b40de90484 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index aee5e45f6d35..bbe74bcebbae 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 3c7f7378b94d..d72565a3c646 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index b67522df01ac..2e4c42b377ec 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index a1e7e6b6fcf7..8a7b07b6adc8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 6c0759c0db47..77244182ff02 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 9f74795e2268..e5ff89bcb3f5 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index f7f5107e754d..df54d46225cd 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 15623da26200..843942fb4be5 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 2ac28d297305..fcaa30c611fb 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 9d6e2c1de1f8..43b3ea40c0b6 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 24f7d5ea678a..89ac2022465e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c7bffff9ed32..bf6873f95e72 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 3393a73ca0d6..b78dc7c6d5d7 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index aa70154cf4fa..5ed064e8673c 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -114,17 +114,6 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
114 114
115 ACPI_FUNCTION_TRACE(ev_enable_gpe); 115 ACPI_FUNCTION_TRACE(ev_enable_gpe);
116 116
117 /*
118 * We will only allow a GPE to be enabled if it has either an associated
119 * method (_Lxx/_Exx) or a handler, or is using the implicit notify
120 * feature. Otherwise, the GPE will be immediately disabled by
121 * acpi_ev_gpe_dispatch the first time it fires.
122 */
123 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
124 ACPI_GPE_DISPATCH_NONE) {
125 return_ACPI_STATUS(AE_NO_HANDLER);
126 }
127
128 /* Clear the GPE (of stale events) */ 117 /* Clear the GPE (of stale events) */
129 118
130 status = acpi_hw_clear_gpe(gpe_event_info); 119 status = acpi_hw_clear_gpe(gpe_event_info);
@@ -339,7 +328,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
339{ 328{
340 acpi_status status; 329 acpi_status status;
341 struct acpi_gpe_block_info *gpe_block; 330 struct acpi_gpe_block_info *gpe_block;
331 struct acpi_namespace_node *gpe_device;
342 struct acpi_gpe_register_info *gpe_register_info; 332 struct acpi_gpe_register_info *gpe_register_info;
333 struct acpi_gpe_event_info *gpe_event_info;
334 u32 gpe_number;
335 struct acpi_gpe_handler_info *gpe_handler_info;
343 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; 336 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
344 u8 enabled_status_byte; 337 u8 enabled_status_byte;
345 u32 status_reg; 338 u32 status_reg;
@@ -367,6 +360,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
367 360
368 gpe_block = gpe_xrupt_list->gpe_block_list_head; 361 gpe_block = gpe_xrupt_list->gpe_block_list_head;
369 while (gpe_block) { 362 while (gpe_block) {
363 gpe_device = gpe_block->node;
364
370 /* 365 /*
371 * Read all of the 8-bit GPE status and enable registers in this GPE 366 * Read all of the 8-bit GPE status and enable registers in this GPE
372 * block, saving all of them. Find all currently active GP events. 367 * block, saving all of them. Find all currently active GP events.
@@ -442,16 +437,68 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
442 437
443 /* Examine one GPE bit */ 438 /* Examine one GPE bit */
444 439
440 gpe_event_info =
441 &gpe_block->
442 event_info[((acpi_size) i *
443 ACPI_GPE_REGISTER_WIDTH) + j];
444 gpe_number =
445 j + gpe_register_info->base_gpe_number;
446
445 if (enabled_status_byte & (1 << j)) { 447 if (enabled_status_byte & (1 << j)) {
446 /* 448
447 * Found an active GPE. Dispatch the event to a handler 449 /* Invoke global event handler if present */
448 * or method. 450
449 */ 451 acpi_gpe_count++;
450 int_status |= 452 if (acpi_gbl_global_event_handler) {
451 acpi_ev_gpe_dispatch(gpe_block-> 453 acpi_gbl_global_event_handler
452 node, 454 (ACPI_EVENT_TYPE_GPE,
453 &gpe_block-> 455 gpe_device, gpe_number,
454 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); 456 acpi_gbl_global_event_handler_context);
457 }
458
459 /* Found an active GPE */
460
461 if (ACPI_GPE_DISPATCH_TYPE
462 (gpe_event_info->flags) ==
463 ACPI_GPE_DISPATCH_RAW_HANDLER) {
464
465 /* Dispatch the event to a raw handler */
466
467 gpe_handler_info =
468 gpe_event_info->dispatch.
469 handler;
470
471 /*
472 * There is no protection around the namespace node
473 * and the GPE handler to ensure a safe destruction
474 * because:
475 * 1. The namespace node is expected to always
476 * exist after loading a table.
477 * 2. The GPE handler is expected to be flushed by
478 * acpi_os_wait_events_complete() before the
479 * destruction.
480 */
481 acpi_os_release_lock
482 (acpi_gbl_gpe_lock, flags);
483 int_status |=
484 gpe_handler_info->
485 address(gpe_device,
486 gpe_number,
487 gpe_handler_info->
488 context);
489 flags =
490 acpi_os_acquire_lock
491 (acpi_gbl_gpe_lock);
492 } else {
493 /*
494 * Dispatch the event to a standard handler or
495 * method.
496 */
497 int_status |=
498 acpi_ev_gpe_dispatch
499 (gpe_device, gpe_event_info,
500 gpe_number);
501 }
455 } 502 }
456 } 503 }
457 } 504 }
@@ -484,52 +531,15 @@ unlock_and_exit:
484static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) 531static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
485{ 532{
486 struct acpi_gpe_event_info *gpe_event_info = context; 533 struct acpi_gpe_event_info *gpe_event_info = context;
487 acpi_status status; 534 acpi_status status = AE_OK;
488 struct acpi_gpe_event_info *local_gpe_event_info;
489 struct acpi_evaluate_info *info; 535 struct acpi_evaluate_info *info;
490 struct acpi_gpe_notify_info *notify; 536 struct acpi_gpe_notify_info *notify;
491 537
492 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 538 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
493 539
494 /* Allocate a local GPE block */
495
496 local_gpe_event_info =
497 ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
498 if (!local_gpe_event_info) {
499 ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
500 return_VOID;
501 }
502
503 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
504 if (ACPI_FAILURE(status)) {
505 ACPI_FREE(local_gpe_event_info);
506 return_VOID;
507 }
508
509 /* Must revalidate the gpe_number/gpe_block */
510
511 if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
512 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
513 ACPI_FREE(local_gpe_event_info);
514 return_VOID;
515 }
516
517 /*
518 * Take a snapshot of the GPE info for this level - we copy the info to
519 * prevent a race condition with remove_handler/remove_block.
520 */
521 ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
522 sizeof(struct acpi_gpe_event_info));
523
524 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
525 if (ACPI_FAILURE(status)) {
526 ACPI_FREE(local_gpe_event_info);
527 return_VOID;
528 }
529
530 /* Do the correct dispatch - normal method or implicit notify */ 540 /* Do the correct dispatch - normal method or implicit notify */
531 541
532 switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 542 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
533 case ACPI_GPE_DISPATCH_NOTIFY: 543 case ACPI_GPE_DISPATCH_NOTIFY:
534 /* 544 /*
535 * Implicit notify. 545 * Implicit notify.
@@ -542,7 +552,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
542 * June 2012: Expand implicit notify mechanism to support 552 * June 2012: Expand implicit notify mechanism to support
543 * notifies on multiple device objects. 553 * notifies on multiple device objects.
544 */ 554 */
545 notify = local_gpe_event_info->dispatch.notify_list; 555 notify = gpe_event_info->dispatch.notify_list;
546 while (ACPI_SUCCESS(status) && notify) { 556 while (ACPI_SUCCESS(status) && notify) {
547 status = 557 status =
548 acpi_ev_queue_notify_request(notify->device_node, 558 acpi_ev_queue_notify_request(notify->device_node,
@@ -566,7 +576,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
566 * _Lxx/_Exx control method that corresponds to this GPE 576 * _Lxx/_Exx control method that corresponds to this GPE
567 */ 577 */
568 info->prefix_node = 578 info->prefix_node =
569 local_gpe_event_info->dispatch.method_node; 579 gpe_event_info->dispatch.method_node;
570 info->flags = ACPI_IGNORE_RETURN_VALUE; 580 info->flags = ACPI_IGNORE_RETURN_VALUE;
571 581
572 status = acpi_ns_evaluate(info); 582 status = acpi_ns_evaluate(info);
@@ -576,25 +586,27 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
576 if (ACPI_FAILURE(status)) { 586 if (ACPI_FAILURE(status)) {
577 ACPI_EXCEPTION((AE_INFO, status, 587 ACPI_EXCEPTION((AE_INFO, status,
578 "while evaluating GPE method [%4.4s]", 588 "while evaluating GPE method [%4.4s]",
579 acpi_ut_get_node_name 589 acpi_ut_get_node_name(gpe_event_info->
580 (local_gpe_event_info->dispatch. 590 dispatch.
581 method_node))); 591 method_node)));
582 } 592 }
583 break; 593 break;
584 594
585 default: 595 default:
586 596
587 return_VOID; /* Should never happen */ 597 goto error_exit; /* Should never happen */
588 } 598 }
589 599
590 /* Defer enabling of GPE until all notify handlers are done */ 600 /* Defer enabling of GPE until all notify handlers are done */
591 601
592 status = acpi_os_execute(OSL_NOTIFY_HANDLER, 602 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
593 acpi_ev_asynch_enable_gpe, 603 acpi_ev_asynch_enable_gpe, gpe_event_info);
594 local_gpe_event_info); 604 if (ACPI_SUCCESS(status)) {
595 if (ACPI_FAILURE(status)) { 605 return_VOID;
596 ACPI_FREE(local_gpe_event_info);
597 } 606 }
607
608error_exit:
609 acpi_ev_asynch_enable_gpe(gpe_event_info);
598 return_VOID; 610 return_VOID;
599} 611}
600 612
@@ -622,7 +634,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
622 (void)acpi_ev_finish_gpe(gpe_event_info); 634 (void)acpi_ev_finish_gpe(gpe_event_info);
623 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 635 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
624 636
625 ACPI_FREE(gpe_event_info);
626 return; 637 return;
627} 638}
628 639
@@ -692,15 +703,6 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
692 703
693 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 704 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
694 705
695 /* Invoke global event handler if present */
696
697 acpi_gpe_count++;
698 if (acpi_gbl_global_event_handler) {
699 acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
700 gpe_number,
701 acpi_gbl_global_event_handler_context);
702 }
703
704 /* 706 /*
705 * Always disable the GPE so that it does not keep firing before 707 * Always disable the GPE so that it does not keep firing before
706 * any asynchronous activity completes (either from the execution 708 * any asynchronous activity completes (either from the execution
@@ -741,7 +743,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
741 * If there is neither a handler nor a method, leave the GPE 743 * If there is neither a handler nor a method, leave the GPE
742 * disabled. 744 * disabled.
743 */ 745 */
744 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 746 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
745 case ACPI_GPE_DISPATCH_HANDLER: 747 case ACPI_GPE_DISPATCH_HANDLER:
746 748
747 /* Invoke the installed handler (at interrupt level) */ 749 /* Invoke the installed handler (at interrupt level) */
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index d86699eea33c..e0f24c504513 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -474,10 +474,12 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
474 * Ignore GPEs that have no corresponding _Lxx/_Exx method 474 * Ignore GPEs that have no corresponding _Lxx/_Exx method
475 * and GPEs that are used to wake the system 475 * and GPEs that are used to wake the system
476 */ 476 */
477 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 477 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
478 ACPI_GPE_DISPATCH_NONE) 478 ACPI_GPE_DISPATCH_NONE)
479 || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) 479 || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
480 == ACPI_GPE_DISPATCH_HANDLER) 480 ACPI_GPE_DISPATCH_HANDLER)
481 || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
482 ACPI_GPE_DISPATCH_RAW_HANDLER)
481 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 483 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
482 continue; 484 continue;
483 } 485 }
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 7be928379879..8840296d5b20 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -401,15 +401,17 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
401 return_ACPI_STATUS(AE_OK); 401 return_ACPI_STATUS(AE_OK);
402 } 402 }
403 403
404 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 404 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
405 ACPI_GPE_DISPATCH_HANDLER) { 405 ACPI_GPE_DISPATCH_HANDLER) ||
406 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
407 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
406 408
407 /* If there is already a handler, ignore this GPE method */ 409 /* If there is already a handler, ignore this GPE method */
408 410
409 return_ACPI_STATUS(AE_OK); 411 return_ACPI_STATUS(AE_OK);
410 } 412 }
411 413
412 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 414 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
413 ACPI_GPE_DISPATCH_METHOD) { 415 ACPI_GPE_DISPATCH_METHOD) {
414 /* 416 /*
415 * If there is already a method, ignore this method. But check 417 * If there is already a method, ignore this method. But check
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 17e4bbfdb096..3a958f3612fe 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -108,53 +108,6 @@ unlock_and_exit:
108 108
109/******************************************************************************* 109/*******************************************************************************
110 * 110 *
111 * FUNCTION: acpi_ev_valid_gpe_event
112 *
113 * PARAMETERS: gpe_event_info - Info for this GPE
114 *
115 * RETURN: TRUE if the gpe_event is valid
116 *
117 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
118 * Should be called only when the GPE lists are semaphore locked
119 * and not subject to change.
120 *
121 ******************************************************************************/
122
123u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
124{
125 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
126 struct acpi_gpe_block_info *gpe_block;
127
128 ACPI_FUNCTION_ENTRY();
129
130 /* No need for spin lock since we are not changing any list elements */
131
132 /* Walk the GPE interrupt levels */
133
134 gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
135 while (gpe_xrupt_block) {
136 gpe_block = gpe_xrupt_block->gpe_block_list_head;
137
138 /* Walk the GPE blocks on this interrupt level */
139
140 while (gpe_block) {
141 if ((&gpe_block->event_info[0] <= gpe_event_info) &&
142 (&gpe_block->event_info[gpe_block->gpe_count] >
143 gpe_event_info)) {
144 return (TRUE);
145 }
146
147 gpe_block = gpe_block->next;
148 }
149
150 gpe_xrupt_block = gpe_xrupt_block->next;
151 }
152
153 return (FALSE);
154}
155
156/*******************************************************************************
157 *
158 * FUNCTION: acpi_ev_get_gpe_device 111 * FUNCTION: acpi_ev_get_gpe_device
159 * 112 *
160 * PARAMETERS: GPE_WALK_CALLBACK 113 * PARAMETERS: GPE_WALK_CALLBACK
@@ -371,8 +324,10 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
371 ACPI_GPE_REGISTER_WIDTH) 324 ACPI_GPE_REGISTER_WIDTH)
372 + j]; 325 + j];
373 326
374 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 327 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
375 ACPI_GPE_DISPATCH_HANDLER) { 328 ACPI_GPE_DISPATCH_HANDLER) ||
329 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
330 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
376 331
377 /* Delete an installed handler block */ 332 /* Delete an installed handler block */
378 333
@@ -380,10 +335,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
380 gpe_event_info->dispatch.handler = NULL; 335 gpe_event_info->dispatch.handler = NULL;
381 gpe_event_info->flags &= 336 gpe_event_info->flags &=
382 ~ACPI_GPE_DISPATCH_MASK; 337 ~ACPI_GPE_DISPATCH_MASK;
383 } else 338 } else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)
384 if ((gpe_event_info-> 339 == ACPI_GPE_DISPATCH_NOTIFY) {
385 flags & ACPI_GPE_DISPATCH_MASK) ==
386 ACPI_GPE_DISPATCH_NOTIFY) {
387 340
388 /* Delete the implicit notification device list */ 341 /* Delete the implicit notification device list */
389 342
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 78ac29351c9e..74e8595f5a2b 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 24ea3424981b..f7c9dfe7b990 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 8eb8575e8c16..9abace3401f9 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 1b148a440d67..da323390bb70 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 29630e303829..0366703d2970 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 55a58f3ec8df..81f2d9e87fad 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,16 @@
51 51
52#define _COMPONENT ACPI_EVENTS 52#define _COMPONENT ACPI_EVENTS
53ACPI_MODULE_NAME("evxface") 53ACPI_MODULE_NAME("evxface")
54#if (!ACPI_REDUCED_HARDWARE)
55/* Local prototypes */
56static acpi_status
57acpi_ev_install_gpe_handler(acpi_handle gpe_device,
58 u32 gpe_number,
59 u32 type,
60 u8 is_raw_handler,
61 acpi_gpe_handler address, void *context);
62
63#endif
54 64
55 65
56/******************************************************************************* 66/*******************************************************************************
@@ -76,6 +86,7 @@ ACPI_MODULE_NAME("evxface")
76 * handlers. 86 * handlers.
77 * 87 *
78 ******************************************************************************/ 88 ******************************************************************************/
89
79acpi_status 90acpi_status
80acpi_install_notify_handler(acpi_handle device, 91acpi_install_notify_handler(acpi_handle device,
81 u32 handler_type, 92 u32 handler_type,
@@ -717,32 +728,37 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
717 728
718/******************************************************************************* 729/*******************************************************************************
719 * 730 *
720 * FUNCTION: acpi_install_gpe_handler 731 * FUNCTION: acpi_ev_install_gpe_handler
721 * 732 *
722 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT 733 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
723 * defined GPEs) 734 * defined GPEs)
724 * gpe_number - The GPE number within the GPE block 735 * gpe_number - The GPE number within the GPE block
725 * type - Whether this GPE should be treated as an 736 * type - Whether this GPE should be treated as an
726 * edge- or level-triggered interrupt. 737 * edge- or level-triggered interrupt.
738 * is_raw_handler - Whether this GPE should be handled using
739 * the special GPE handler mode.
727 * address - Address of the handler 740 * address - Address of the handler
728 * context - Value passed to the handler on each GPE 741 * context - Value passed to the handler on each GPE
729 * 742 *
730 * RETURN: Status 743 * RETURN: Status
731 * 744 *
732 * DESCRIPTION: Install a handler for a General Purpose Event. 745 * DESCRIPTION: Internal function to install a handler for a General Purpose
746 * Event.
733 * 747 *
734 ******************************************************************************/ 748 ******************************************************************************/
735acpi_status 749static acpi_status
736acpi_install_gpe_handler(acpi_handle gpe_device, 750acpi_ev_install_gpe_handler(acpi_handle gpe_device,
737 u32 gpe_number, 751 u32 gpe_number,
738 u32 type, acpi_gpe_handler address, void *context) 752 u32 type,
753 u8 is_raw_handler,
754 acpi_gpe_handler address, void *context)
739{ 755{
740 struct acpi_gpe_event_info *gpe_event_info; 756 struct acpi_gpe_event_info *gpe_event_info;
741 struct acpi_gpe_handler_info *handler; 757 struct acpi_gpe_handler_info *handler;
742 acpi_status status; 758 acpi_status status;
743 acpi_cpu_flags flags; 759 acpi_cpu_flags flags;
744 760
745 ACPI_FUNCTION_TRACE(acpi_install_gpe_handler); 761 ACPI_FUNCTION_TRACE(ev_install_gpe_handler);
746 762
747 /* Parameter validation */ 763 /* Parameter validation */
748 764
@@ -775,8 +791,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
775 791
776 /* Make sure that there isn't a handler there already */ 792 /* Make sure that there isn't a handler there already */
777 793
778 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 794 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
779 ACPI_GPE_DISPATCH_HANDLER) { 795 ACPI_GPE_DISPATCH_HANDLER) ||
796 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
797 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
780 status = AE_ALREADY_EXISTS; 798 status = AE_ALREADY_EXISTS;
781 goto free_and_exit; 799 goto free_and_exit;
782 } 800 }
@@ -793,9 +811,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
793 * automatically during initialization, in which case it has to be 811 * automatically during initialization, in which case it has to be
794 * disabled now to avoid spurious execution of the handler. 812 * disabled now to avoid spurious execution of the handler.
795 */ 813 */
796 if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || 814 if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
797 (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && 815 ACPI_GPE_DISPATCH_METHOD) ||
798 gpe_event_info->runtime_count) { 816 (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
817 ACPI_GPE_DISPATCH_NOTIFY)) && gpe_event_info->runtime_count) {
799 handler->originally_enabled = TRUE; 818 handler->originally_enabled = TRUE;
800 (void)acpi_ev_remove_gpe_reference(gpe_event_info); 819 (void)acpi_ev_remove_gpe_reference(gpe_event_info);
801 820
@@ -816,7 +835,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
816 835
817 gpe_event_info->flags &= 836 gpe_event_info->flags &=
818 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 837 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
819 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_HANDLER); 838 gpe_event_info->flags |=
839 (u8)(type |
840 (is_raw_handler ? ACPI_GPE_DISPATCH_RAW_HANDLER :
841 ACPI_GPE_DISPATCH_HANDLER));
820 842
821 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 843 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
822 844
@@ -830,10 +852,78 @@ free_and_exit:
830 goto unlock_and_exit; 852 goto unlock_and_exit;
831} 853}
832 854
855/*******************************************************************************
856 *
857 * FUNCTION: acpi_install_gpe_handler
858 *
859 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
860 * defined GPEs)
861 * gpe_number - The GPE number within the GPE block
862 * type - Whether this GPE should be treated as an
863 * edge- or level-triggered interrupt.
864 * address - Address of the handler
865 * context - Value passed to the handler on each GPE
866 *
867 * RETURN: Status
868 *
869 * DESCRIPTION: Install a handler for a General Purpose Event.
870 *
871 ******************************************************************************/
872
873acpi_status
874acpi_install_gpe_handler(acpi_handle gpe_device,
875 u32 gpe_number,
876 u32 type, acpi_gpe_handler address, void *context)
877{
878 acpi_status status;
879
880 ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
881
882 status =
883 acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, FALSE,
884 address, context);
885
886 return_ACPI_STATUS(status);
887}
888
833ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) 889ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
834 890
835/******************************************************************************* 891/*******************************************************************************
836 * 892 *
893 * FUNCTION: acpi_install_gpe_raw_handler
894 *
895 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
896 * defined GPEs)
897 * gpe_number - The GPE number within the GPE block
898 * type - Whether this GPE should be treated as an
899 * edge- or level-triggered interrupt.
900 * address - Address of the handler
901 * context - Value passed to the handler on each GPE
902 *
903 * RETURN: Status
904 *
905 * DESCRIPTION: Install a handler for a General Purpose Event.
906 *
907 ******************************************************************************/
908acpi_status
909acpi_install_gpe_raw_handler(acpi_handle gpe_device,
910 u32 gpe_number,
911 u32 type, acpi_gpe_handler address, void *context)
912{
913 acpi_status status;
914
915 ACPI_FUNCTION_TRACE(acpi_install_gpe_raw_handler);
916
917 status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, TRUE,
918 address, context);
919
920 return_ACPI_STATUS(status);
921}
922
923ACPI_EXPORT_SYMBOL(acpi_install_gpe_raw_handler)
924
925/*******************************************************************************
926 *
837 * FUNCTION: acpi_remove_gpe_handler 927 * FUNCTION: acpi_remove_gpe_handler
838 * 928 *
839 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT 929 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
@@ -880,8 +970,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
880 970
881 /* Make sure that a handler is indeed installed */ 971 /* Make sure that a handler is indeed installed */
882 972
883 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != 973 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
884 ACPI_GPE_DISPATCH_HANDLER) { 974 ACPI_GPE_DISPATCH_HANDLER) &&
975 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
976 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
885 status = AE_NOT_EXIST; 977 status = AE_NOT_EXIST;
886 goto unlock_and_exit; 978 goto unlock_and_exit;
887 } 979 }
@@ -896,6 +988,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
896 /* Remove the handler */ 988 /* Remove the handler */
897 989
898 handler = gpe_event_info->dispatch.handler; 990 handler = gpe_event_info->dispatch.handler;
991 gpe_event_info->dispatch.handler = NULL;
899 992
900 /* Restore Method node (if any), set dispatch flags */ 993 /* Restore Method node (if any), set dispatch flags */
901 994
@@ -909,9 +1002,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
909 * enabled, it should be enabled at this point to restore the 1002 * enabled, it should be enabled at this point to restore the
910 * post-initialization configuration. 1003 * post-initialization configuration.
911 */ 1004 */
912 if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || 1005 if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
913 (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && 1006 ACPI_GPE_DISPATCH_METHOD) ||
914 handler->originally_enabled) { 1007 (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
1008 ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) {
915 (void)acpi_ev_add_gpe_reference(gpe_event_info); 1009 (void)acpi_ev_add_gpe_reference(gpe_event_info);
916 } 1010 }
917 1011
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index bb8cbf5961bf..df06a23c4197 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index e889a5304abd..70eb47e3d724 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -132,7 +132,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
132 */ 132 */
133 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 133 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
134 if (gpe_event_info) { 134 if (gpe_event_info) {
135 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != 135 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
136 ACPI_GPE_DISPATCH_NONE) { 136 ACPI_GPE_DISPATCH_NONE) {
137 status = acpi_ev_add_gpe_reference(gpe_event_info); 137 status = acpi_ev_add_gpe_reference(gpe_event_info);
138 } else { 138 } else {
@@ -183,6 +183,77 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
183 183
184ACPI_EXPORT_SYMBOL(acpi_disable_gpe) 184ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
185 185
186/*******************************************************************************
187 *
188 * FUNCTION: acpi_set_gpe
189 *
190 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
191 * gpe_number - GPE level within the GPE block
192 * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
193 *
194 * RETURN: Status
195 *
196 * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
197 * the reference count mechanism used in the acpi_enable_gpe(),
198 * acpi_disable_gpe() interfaces.
199 * This API is typically used by the GPE raw handler mode driver
200 * to switch between the polling mode and the interrupt mode after
201 * the driver has enabled the GPE.
202 * The APIs should be invoked in this order:
203 * acpi_enable_gpe() <- Ensure the reference count > 0
204 * acpi_set_gpe(ACPI_GPE_DISABLE) <- Enter polling mode
205 * acpi_set_gpe(ACPI_GPE_ENABLE) <- Leave polling mode
206 * acpi_disable_gpe() <- Decrease the reference count
207 *
208 * Note: If a GPE is shared by 2 silicon components, then both the drivers
209 * should support GPE polling mode or disabling the GPE for long period
210 * for one driver may break the other. So use it with care since all
211 * firmware _Lxx/_Exx handlers currently rely on the GPE interrupt mode.
212 *
213 ******************************************************************************/
214acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
215{
216 struct acpi_gpe_event_info *gpe_event_info;
217 acpi_status status;
218 acpi_cpu_flags flags;
219
220 ACPI_FUNCTION_TRACE(acpi_set_gpe);
221
222 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
223
224 /* Ensure that we have a valid GPE number */
225
226 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
227 if (!gpe_event_info) {
228 status = AE_BAD_PARAMETER;
229 goto unlock_and_exit;
230 }
231
232 /* Perform the action */
233
234 switch (action) {
235 case ACPI_GPE_ENABLE:
236
237 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
238 break;
239
240 case ACPI_GPE_DISABLE:
241
242 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
243 break;
244
245 default:
246
247 status = AE_BAD_PARAMETER;
248 break;
249 }
250
251unlock_and_exit:
252 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
253 return_ACPI_STATUS(status);
254}
255
256ACPI_EXPORT_SYMBOL(acpi_set_gpe)
186 257
187/******************************************************************************* 258/*******************************************************************************
188 * 259 *
@@ -313,7 +384,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
313 * known as an "implicit notify". Note: The GPE is assumed to be 384 * known as an "implicit notify". Note: The GPE is assumed to be
314 * level-triggered (for windows compatibility). 385 * level-triggered (for windows compatibility).
315 */ 386 */
316 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 387 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
317 ACPI_GPE_DISPATCH_NONE) { 388 ACPI_GPE_DISPATCH_NONE) {
318 /* 389 /*
319 * This is the first device for implicit notify on this GPE. 390 * This is the first device for implicit notify on this GPE.
@@ -327,7 +398,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
327 * If we already have an implicit notify on this GPE, add 398 * If we already have an implicit notify on this GPE, add
328 * this device to the notify list. 399 * this device to the notify list.
329 */ 400 */
330 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 401 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
331 ACPI_GPE_DISPATCH_NOTIFY) { 402 ACPI_GPE_DISPATCH_NOTIFY) {
332 403
333 /* Ensure that the device is not already in the list */ 404 /* Ensure that the device is not already in the list */
@@ -530,6 +601,49 @@ unlock_and_exit:
530 601
531ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) 602ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
532 603
604/*******************************************************************************
605 *
606 * FUNCTION: acpi_finish_gpe
607 *
608 * PARAMETERS: gpe_device - Namespace node for the GPE Block
609 * (NULL for FADT defined GPEs)
610 * gpe_number - GPE level within the GPE block
611 *
612 * RETURN: Status
613 *
614 * DESCRIPTION: Clear and conditionally reenable a GPE. This completes the GPE
615 * processing. Intended for use by asynchronous host-installed
616 * GPE handlers. The GPE is only reenabled if the enable_for_run bit
617 * is set in the GPE info.
618 *
619 ******************************************************************************/
620acpi_status acpi_finish_gpe(acpi_handle gpe_device, u32 gpe_number)
621{
622 struct acpi_gpe_event_info *gpe_event_info;
623 acpi_status status;
624 acpi_cpu_flags flags;
625
626 ACPI_FUNCTION_TRACE(acpi_finish_gpe);
627
628 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
629
630 /* Ensure that we have a valid GPE number */
631
632 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
633 if (!gpe_event_info) {
634 status = AE_BAD_PARAMETER;
635 goto unlock_and_exit;
636 }
637
638 status = acpi_ev_finish_gpe(gpe_event_info);
639
640unlock_and_exit:
641 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
642 return_ACPI_STATUS(status);
643}
644
645ACPI_EXPORT_SYMBOL(acpi_finish_gpe)
646
533/****************************************************************************** 647/******************************************************************************
534 * 648 *
535 * FUNCTION: acpi_disable_all_gpes 649 * FUNCTION: acpi_disable_all_gpes
@@ -604,7 +718,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
604 * all GPE blocks. 718 * all GPE blocks.
605 * 719 *
606 ******************************************************************************/ 720 ******************************************************************************/
607
608acpi_status acpi_enable_all_wakeup_gpes(void) 721acpi_status acpi_enable_all_wakeup_gpes(void)
609{ 722{
610 acpi_status status; 723 acpi_status status;
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 2d6f187939c7..f21afbab03f7 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 7d2949420db7..6e0df2b9d5a4 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index c545386fee96..89a976b4ccf2 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 95d23dabcfbb..aaeea4840aaa 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 6fbfad47518c..e67d0aca3fe6 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 0f23c3f2678e..7c213b6b6472 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index b994845ed359..c161dd974f74 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 1d1b27a96c5b..49479927e7f7 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 2207e624f538..b56fc9d6f48e 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index b49ea2a95f4f..472030f2b5bb 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index dbb03b544e8c..453b00c30177 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 1b8e94104407..77930683ab7d 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 2ede656ee26a..fcc618aa2061 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 363767cf01e5..b813fed95e56 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 29e9e99f7fe3..c930edda3f65 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 118e942005e5..4c2836dc825b 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index cd5288a257a9..0fe188e238ef 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index ab060261b43e..c7e3b929aa85 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 3cde553bcbe1..b6b7f3af29e4 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 3af8de3fcea4..d2964af9ad4d 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index daf49f7ea311..a7eee2400ce0 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 04bd16c08f9e..3101607b4efe 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index fd11018b0168..6fa3c8d8fc5f 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index f7da64123ed5..05450656fe3d 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index d9d72dff2a76..3f4225e95d93 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 1e66d960fc11..e5c5949f9081 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 858fdd6be598..e5599f610808 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 494027f5c067..84bc550f4f1d 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -54,6 +54,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
54 struct acpi_gpe_block_info *gpe_block, 54 struct acpi_gpe_block_info *gpe_block,
55 void *context); 55 void *context);
56 56
57static acpi_status
58acpi_hw_gpe_enable_write(u8 enable_mask,
59 struct acpi_gpe_register_info *gpe_register_info);
60
57/****************************************************************************** 61/******************************************************************************
58 * 62 *
59 * FUNCTION: acpi_hw_get_gpe_register_bit 63 * FUNCTION: acpi_hw_get_gpe_register_bit
@@ -146,7 +150,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
146 150
147 status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); 151 status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
148 if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) { 152 if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
149 gpe_register_info->enable_mask = enable_mask; 153 gpe_register_info->enable_mask = (u8)enable_mask;
150 } 154 }
151 return (status); 155 return (status);
152} 156}
@@ -221,7 +225,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
221 225
222 /* GPE currently handled? */ 226 /* GPE currently handled? */
223 227
224 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != 228 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
225 ACPI_GPE_DISPATCH_NONE) { 229 ACPI_GPE_DISPATCH_NONE) {
226 local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER; 230 local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
227 } 231 }
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 6aade8e1d2a1..c5214dec4988 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index a4c34d2c556b..3cf77afd142c 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d590693eb54e..7d21cae6d602 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 76ab5c1a814e..675c709a300b 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 6b919127cd9d..2bd33fe56cb3 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 96d007df65ec..5f97468df8ff 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 6921c7f3d208..3b3767698827 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index f1249e3463be..24fa19a76d70 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 607eb9e5150d..e107f929d9cf 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 80fcfc8c9c1b..5d347a71bd0b 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index b55642c4ee58..1a8b39c8d969 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 3d88ef4a3e0d..80f097eb7381 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 42d37109aa5d..7dc367e6fe09 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index e634a05974db..7bcc68f57afa 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index a3fb7e4c0809..4a85c4517988 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 7c9d0181f341..bd6cd4a81316 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 7eee0a6f02f6..d293d9748036 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index a42ee9d6970d..677bc9330e64 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index e83cff31754b..c95a119767b5 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 392910ffbed9..0eb54315b4be 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 1b13b921dda9..8b79958b7aca 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 7e417aa5c91e..151fcd95ba84 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index b09e6bef72b8..c30672d23878 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index af1cc42a8aa1..4a9d4a66016e 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 4a5e3f5c0ff7..6ad02008c0c2 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 4758a1f2ce22..c68609a2bc1b 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 4bd558bf10d2..b6030a2deee1 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 8c6c11ce9760..d66c326485d8 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index dae9401be7a2..793383501f81 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2014, Intel Corp. 9 * Copyright (C) 2000 - 2015, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -53,50 +53,6 @@ ACPI_MODULE_NAME("nsxfobj")
53 53
54/******************************************************************************* 54/*******************************************************************************
55 * 55 *
56 * FUNCTION: acpi_get_id
57 *
58 * PARAMETERS: Handle - Handle of object whose id is desired
59 * ret_id - Where the id will be placed
60 *
61 * RETURN: Status
62 *
63 * DESCRIPTION: This routine returns the owner id associated with a handle
64 *
65 ******************************************************************************/
66acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id)
67{
68 struct acpi_namespace_node *node;
69 acpi_status status;
70
71 /* Parameter Validation */
72
73 if (!ret_id) {
74 return (AE_BAD_PARAMETER);
75 }
76
77 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
78 if (ACPI_FAILURE(status)) {
79 return (status);
80 }
81
82 /* Convert and validate the handle */
83
84 node = acpi_ns_validate_handle(handle);
85 if (!node) {
86 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
87 return (AE_BAD_PARAMETER);
88 }
89
90 *ret_id = node->owner_id;
91
92 status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
93 return (status);
94}
95
96ACPI_EXPORT_SYMBOL(acpi_get_id)
97
98/*******************************************************************************
99 *
100 * FUNCTION: acpi_get_type 56 * FUNCTION: acpi_get_type
101 * 57 *
102 * PARAMETERS: handle - Handle of object whose type is desired 58 * PARAMETERS: handle - Handle of object whose type is desired
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 314d314340ae..6d038770577b 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index b058e2390fdd..90437227d790 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index a6885077d59e..2f5ddd806c58 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1755d2ac5656..1af4a405e351 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 0d8d37ffd04d..e18e7c47f482 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 6d27b597394e..a555f7f7b9a2 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 32d250feea21..9d669cc6cb62 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 0b64181e7720..89984f30addc 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 3cd48802eede..960505ab409a 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 9cb07e1e76d9..ba5f69171288 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index e135acaa5e1c..841a5ea06094 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 916fd095ff34..66d406e8fe36 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -74,7 +74,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address16[5] = {
74 * Address Translation Offset 74 * Address Translation Offset
75 * Address Length 75 * Address Length
76 */ 76 */
77 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.granularity), 77 {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.address.granularity),
78 AML_OFFSET(address16.granularity), 78 AML_OFFSET(address16.granularity),
79 5}, 79 5},
80 80
@@ -112,7 +112,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address32[5] = {
112 * Address Translation Offset 112 * Address Translation Offset
113 * Address Length 113 * Address Length
114 */ 114 */
115 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.granularity), 115 {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.address.granularity),
116 AML_OFFSET(address32.granularity), 116 AML_OFFSET(address32.granularity),
117 5}, 117 5},
118 118
@@ -150,7 +150,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address64[5] = {
150 * Address Translation Offset 150 * Address Translation Offset
151 * Address Length 151 * Address Length
152 */ 152 */
153 {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.granularity), 153 {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.address.granularity),
154 AML_OFFSET(address64.granularity), 154 AML_OFFSET(address64.granularity),
155 5}, 155 5},
156 156
@@ -194,7 +194,8 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
194 * Address Length 194 * Address Length
195 * Type-Specific Attribute 195 * Type-Specific Attribute
196 */ 196 */
197 {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.ext_address64.granularity), 197 {ACPI_RSC_MOVE64,
198 ACPI_RS_OFFSET(data.ext_address64.address.granularity),
198 AML_OFFSET(ext_address64.granularity), 199 AML_OFFSET(ext_address64.granularity),
199 6} 200 6}
200}; 201};
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 689556744b03..cb739a694931 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 049d9c22a0f9..15434e4c9b34 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index c3c56b5a9788..1539394c8c52 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 2f9332d5c973..b29d9ec63d1b 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -183,15 +183,15 @@ struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
183 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16), 183 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
184 "16-Bit WORD Address Space", NULL}, 184 "16-Bit WORD Address Space", NULL},
185 {ACPI_RSD_ADDRESS, 0, NULL, NULL}, 185 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
186 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity", 186 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.granularity),
187 NULL}, 187 "Granularity", NULL},
188 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum", 188 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.minimum),
189 NULL}, 189 "Address Minimum", NULL},
190 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum", 190 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.maximum),
191 NULL}, 191 "Address Maximum", NULL},
192 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset), 192 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.translation_offset),
193 "Translation Offset", NULL}, 193 "Translation Offset", NULL},
194 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length), 194 {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.address_length),
195 "Address Length", NULL}, 195 "Address Length", NULL},
196 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL} 196 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
197}; 197};
@@ -200,15 +200,15 @@ struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
200 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32), 200 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
201 "32-Bit DWORD Address Space", NULL}, 201 "32-Bit DWORD Address Space", NULL},
202 {ACPI_RSD_ADDRESS, 0, NULL, NULL}, 202 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
203 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity", 203 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.granularity),
204 NULL}, 204 "Granularity", NULL},
205 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum", 205 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.minimum),
206 NULL}, 206 "Address Minimum", NULL},
207 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum", 207 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.maximum),
208 NULL}, 208 "Address Maximum", NULL},
209 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset), 209 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.translation_offset),
210 "Translation Offset", NULL}, 210 "Translation Offset", NULL},
211 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length), 211 {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.address_length),
212 "Address Length", NULL}, 212 "Address Length", NULL},
213 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL} 213 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
214}; 214};
@@ -217,15 +217,15 @@ struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
217 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64), 217 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
218 "64-Bit QWORD Address Space", NULL}, 218 "64-Bit QWORD Address Space", NULL},
219 {ACPI_RSD_ADDRESS, 0, NULL, NULL}, 219 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
220 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity", 220 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.granularity),
221 NULL}, 221 "Granularity", NULL},
222 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum", 222 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.minimum),
223 NULL}, 223 "Address Minimum", NULL},
224 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum", 224 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.maximum),
225 NULL}, 225 "Address Maximum", NULL},
226 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset), 226 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.translation_offset),
227 "Translation Offset", NULL}, 227 "Translation Offset", NULL},
228 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length), 228 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.address_length),
229 "Address Length", NULL}, 229 "Address Length", NULL},
230 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL} 230 {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
231}; 231};
@@ -234,15 +234,16 @@ struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
234 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64), 234 {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
235 "64-Bit Extended Address Space", NULL}, 235 "64-Bit Extended Address Space", NULL},
236 {ACPI_RSD_ADDRESS, 0, NULL, NULL}, 236 {ACPI_RSD_ADDRESS, 0, NULL, NULL},
237 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity), 237 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.granularity),
238 "Granularity", NULL}, 238 "Granularity", NULL},
239 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum), 239 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.minimum),
240 "Address Minimum", NULL}, 240 "Address Minimum", NULL},
241 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum), 241 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.maximum),
242 "Address Maximum", NULL}, 242 "Address Maximum", NULL},
243 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset), 243 {ACPI_RSD_UINT64,
244 ACPI_RSD_OFFSET(ext_address64.address.translation_offset),
244 "Translation Offset", NULL}, 245 "Translation Offset", NULL},
245 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length), 246 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.address_length),
246 "Address Length", NULL}, 247 "Address Length", NULL},
247 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific), 248 {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
248 "Type-Specific Attribute", NULL} 249 "Type-Specific Attribute", NULL}
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 9d3f8a9a24bd..edecfc675979 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 19d64873290a..5adba018bab0 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 3461f7db26df..07cfa70a475b 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 77291293af64..50d5be2ee062 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index eab4483ff5f8..c6b80862030e 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 41eea4bc089c..1fe49d223663 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 9e8407223d95..4c8c6fe6ea74 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 897a5ceb0420..ece3cd60cc6a 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 877ab9202133..8e6276df0226 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -60,11 +60,11 @@ ACPI_MODULE_NAME("rsxface")
60 ACPI_COPY_FIELD(out, in, min_address_fixed); \ 60 ACPI_COPY_FIELD(out, in, min_address_fixed); \
61 ACPI_COPY_FIELD(out, in, max_address_fixed); \ 61 ACPI_COPY_FIELD(out, in, max_address_fixed); \
62 ACPI_COPY_FIELD(out, in, info); \ 62 ACPI_COPY_FIELD(out, in, info); \
63 ACPI_COPY_FIELD(out, in, granularity); \ 63 ACPI_COPY_FIELD(out, in, address.granularity); \
64 ACPI_COPY_FIELD(out, in, minimum); \ 64 ACPI_COPY_FIELD(out, in, address.minimum); \
65 ACPI_COPY_FIELD(out, in, maximum); \ 65 ACPI_COPY_FIELD(out, in, address.maximum); \
66 ACPI_COPY_FIELD(out, in, translation_offset); \ 66 ACPI_COPY_FIELD(out, in, address.translation_offset); \
67 ACPI_COPY_FIELD(out, in, address_length); \ 67 ACPI_COPY_FIELD(out, in, address.address_length); \
68 ACPI_COPY_FIELD(out, in, resource_source); 68 ACPI_COPY_FIELD(out, in, resource_source);
69/* Local prototypes */ 69/* Local prototypes */
70static acpi_status 70static acpi_status
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index f499c10ceb4a..6a144957aadd 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 41519a958083..7d2486005e3f 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index cb947700206c..0b879fcfef67 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 755b90c40ddf..9bad45e63a45 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index df3bb20ea325..ef16c06e5091 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 6b1ca9991b90..6559a58439c5 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 6482b0ded652..60e94f87f27a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -265,45 +265,6 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
265 265
266/******************************************************************************* 266/*******************************************************************************
267 * 267 *
268 * FUNCTION: acpi_unload_table_id
269 *
270 * PARAMETERS: id - Owner ID of the table to be removed.
271 *
272 * RETURN: Status
273 *
274 * DESCRIPTION: This routine is used to force the unload of a table (by id)
275 *
276 ******************************************************************************/
277acpi_status acpi_unload_table_id(acpi_owner_id id)
278{
279 int i;
280 acpi_status status = AE_NOT_EXIST;
281
282 ACPI_FUNCTION_TRACE(acpi_unload_table_id);
283
284 /* Find table in the global table list */
285 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
286 if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
287 continue;
288 }
289 /*
290 * Delete all namespace objects owned by this table. Note that these
291 * objects can appear anywhere in the namespace by virtue of the AML
292 * "Scope" operator. Thus, we need to track ownership by an ID, not
293 * simply a position within the hierarchy
294 */
295 acpi_tb_delete_namespace_by_owner(i);
296 status = acpi_tb_release_owner_id(i);
297 acpi_tb_set_table_loaded_flag(i, FALSE);
298 break;
299 }
300 return_ACPI_STATUS(status);
301}
302
303ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
304
305/*******************************************************************************
306 *
307 * FUNCTION: acpi_get_table_with_size 268 * FUNCTION: acpi_get_table_with_size
308 * 269 *
309 * PARAMETERS: signature - ACPI signature of needed table 270 * PARAMETERS: signature - ACPI signature of needed table
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index ab5308b81aa8..aadb3002a2dd 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 43a54af2b548..eac52cf14f1a 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index a1acec9d2ef3..1279f50da757 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index efac83c606dc..61d8f6d186d1 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 038ea887f562..242bd071f007 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 78fde0aac487..eacc5eee362e 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index ff601c0f7c7a..c37ec5035f4c 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index e516254c63b2..57078e3ea9b7 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 40e923e675fc..988e23b7795c 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index a3516de213fa..71fce389fd48 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 8e544d4688cd..9ef80f2828e3 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 8fed1482d228..6c738fa0cd42 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index 0403dcaabaf2..743a0ae9fb17 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
index 4e263a8cc6f0..7e1168be39fa 100644
--- a/drivers/acpi/acpica/utfileio.c
+++ b/drivers/acpi/acpica/utfileio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 77ceac715f28..5e8df9177da4 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 9afa9441b183..aa448278ba28 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 4b12880e5b11..27431cfc1c44 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 77120ec9ea86..e402e07b4846 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index dc6e96547f18..089f78bbd59b 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index d44dee6ee10a..f9ff100f0159 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 2e2bb14e1099..56bbacd576f2 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 82717fff9ffc..37b8b58fcd56 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index dfa9009bfc87..7d83efe1ea29 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 685766fc6ca8..574cd3118313 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 36bec57ebd23..2959217067cb 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index db30caff130a..29e449935a82 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 0ce3f5a0dd67..82ca9142e10d 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index bc1ff820c7dd..b3505dbc715e 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 1cc97a752c15..8274cc16edc3 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 6dc54b3c28b0..83b6c52490dc 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 7d0ee969d781..130dd9f96f0f 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 4dc33130f134..c6149a212149 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 49c873c68756..0929187bdce0 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 88ef77f3cf88..306e785f9418 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index b1fd6886e439..083a76891889 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 2a0f9e04d3a4..f2606af3364c 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 2cd7bdd6c8b3..a85ac07f3da3 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -449,7 +449,7 @@ int apei_resources_sub(struct apei_resources *resources1,
449} 449}
450EXPORT_SYMBOL_GPL(apei_resources_sub); 450EXPORT_SYMBOL_GPL(apei_resources_sub);
451 451
452static int apei_get_nvs_callback(__u64 start, __u64 size, void *data) 452static int apei_get_res_callback(__u64 start, __u64 size, void *data)
453{ 453{
454 struct apei_resources *resources = data; 454 struct apei_resources *resources = data;
455 return apei_res_add(&resources->iomem, start, size); 455 return apei_res_add(&resources->iomem, start, size);
@@ -457,7 +457,15 @@ static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
457 457
458static int apei_get_nvs_resources(struct apei_resources *resources) 458static int apei_get_nvs_resources(struct apei_resources *resources)
459{ 459{
460 return acpi_nvs_for_each_region(apei_get_nvs_callback, resources); 460 return acpi_nvs_for_each_region(apei_get_res_callback, resources);
461}
462
463int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
464 void *data), void *data);
465static int apei_get_arch_resources(struct apei_resources *resources)
466
467{
468 return arch_apei_filter_addr(apei_get_res_callback, resources);
461} 469}
462 470
463/* 471/*
@@ -470,7 +478,7 @@ int apei_resources_request(struct apei_resources *resources,
470{ 478{
471 struct apei_res *res, *res_bak = NULL; 479 struct apei_res *res, *res_bak = NULL;
472 struct resource *r; 480 struct resource *r;
473 struct apei_resources nvs_resources; 481 struct apei_resources nvs_resources, arch_res;
474 int rc; 482 int rc;
475 483
476 rc = apei_resources_sub(resources, &apei_resources_all); 484 rc = apei_resources_sub(resources, &apei_resources_all);
@@ -485,10 +493,20 @@ int apei_resources_request(struct apei_resources *resources,
485 apei_resources_init(&nvs_resources); 493 apei_resources_init(&nvs_resources);
486 rc = apei_get_nvs_resources(&nvs_resources); 494 rc = apei_get_nvs_resources(&nvs_resources);
487 if (rc) 495 if (rc)
488 goto res_fini; 496 goto nvs_res_fini;
489 rc = apei_resources_sub(resources, &nvs_resources); 497 rc = apei_resources_sub(resources, &nvs_resources);
490 if (rc) 498 if (rc)
491 goto res_fini; 499 goto nvs_res_fini;
500
501 if (arch_apei_filter_addr) {
502 apei_resources_init(&arch_res);
503 rc = apei_get_arch_resources(&arch_res);
504 if (rc)
505 goto arch_res_fini;
506 rc = apei_resources_sub(resources, &arch_res);
507 if (rc)
508 goto arch_res_fini;
509 }
492 510
493 rc = -EINVAL; 511 rc = -EINVAL;
494 list_for_each_entry(res, &resources->iomem, list) { 512 list_for_each_entry(res, &resources->iomem, list) {
@@ -536,7 +554,9 @@ err_unmap_iomem:
536 break; 554 break;
537 release_mem_region(res->start, res->end - res->start); 555 release_mem_region(res->start, res->end - res->start);
538 } 556 }
539res_fini: 557arch_res_fini:
558 apei_resources_fini(&arch_res);
559nvs_res_fini:
540 apei_resources_fini(&nvs_resources); 560 apei_resources_fini(&nvs_resources);
541 return rc; 561 return rc;
542} 562}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c0d44d394ca3..735db11a9b00 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1027,7 +1027,6 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
1027 1027
1028static struct dev_pm_domain acpi_general_pm_domain = { 1028static struct dev_pm_domain acpi_general_pm_domain = {
1029 .ops = { 1029 .ops = {
1030#ifdef CONFIG_PM
1031 .runtime_suspend = acpi_subsys_runtime_suspend, 1030 .runtime_suspend = acpi_subsys_runtime_suspend,
1032 .runtime_resume = acpi_subsys_runtime_resume, 1031 .runtime_resume = acpi_subsys_runtime_resume,
1033#ifdef CONFIG_PM_SLEEP 1032#ifdef CONFIG_PM_SLEEP
@@ -1041,7 +1040,6 @@ static struct dev_pm_domain acpi_general_pm_domain = {
1041 .poweroff_late = acpi_subsys_suspend_late, 1040 .poweroff_late = acpi_subsys_suspend_late,
1042 .restore_early = acpi_subsys_resume_early, 1041 .restore_early = acpi_subsys_resume_early,
1043#endif 1042#endif
1044#endif
1045 }, 1043 },
1046}; 1044};
1047 1045
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 1b5853f384e2..982b67faaaf3 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * ec.c - ACPI Embedded Controller Driver (v2.2) 2 * ec.c - ACPI Embedded Controller Driver (v3)
3 * 3 *
4 * Copyright (C) 2001-2014 Intel Corporation 4 * Copyright (C) 2001-2015 Intel Corporation
5 * Author: 2014 Lv Zheng <lv.zheng@intel.com> 5 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
6 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> 6 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
7 * 2006 Denis Sadykov <denis.m.sadykov@intel.com> 7 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
8 * 2004 Luming Yu <luming.yu@intel.com> 8 * 2004 Luming Yu <luming.yu@intel.com>
@@ -71,15 +71,18 @@ enum ec_command {
71#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 71#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
72#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 72#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
73#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ 73#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
74#define ACPI_EC_UDELAY_POLL 1000 /* Wait 1ms for EC transaction polling */
74#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query 75#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
75 * when trying to clear the EC */ 76 * when trying to clear the EC */
76 77
77enum { 78enum {
78 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 79 EC_FLAGS_QUERY_PENDING, /* Query is pending */
79 EC_FLAGS_GPE_STORM, /* GPE storm detected */
80 EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and 80 EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
81 * OpReg are installed */ 81 * OpReg are installed */
82 EC_FLAGS_BLOCKED, /* Transactions are blocked */ 82 EC_FLAGS_STARTED, /* Driver is started */
83 EC_FLAGS_STOPPED, /* Driver is stopped */
84 EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
85 * current command processing */
83}; 86};
84 87
85#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ 88#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
@@ -105,6 +108,7 @@ struct acpi_ec_query_handler {
105 acpi_handle handle; 108 acpi_handle handle;
106 void *data; 109 void *data;
107 u8 query_bit; 110 u8 query_bit;
111 struct kref kref;
108}; 112};
109 113
110struct transaction { 114struct transaction {
@@ -117,8 +121,12 @@ struct transaction {
117 u8 wlen; 121 u8 wlen;
118 u8 rlen; 122 u8 rlen;
119 u8 flags; 123 u8 flags;
124 unsigned long timestamp;
120}; 125};
121 126
127static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
128static void advance_transaction(struct acpi_ec *ec);
129
122struct acpi_ec *boot_ec, *first_ec; 130struct acpi_ec *boot_ec, *first_ec;
123EXPORT_SYMBOL(first_ec); 131EXPORT_SYMBOL(first_ec);
124 132
@@ -129,7 +137,22 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
129static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ 137static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
130 138
131/* -------------------------------------------------------------------------- 139/* --------------------------------------------------------------------------
132 * Transaction Management 140 * Device Flags
141 * -------------------------------------------------------------------------- */
142
143static bool acpi_ec_started(struct acpi_ec *ec)
144{
145 return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
146 !test_bit(EC_FLAGS_STOPPED, &ec->flags);
147}
148
149static bool acpi_ec_flushed(struct acpi_ec *ec)
150{
151 return ec->reference_count == 1;
152}
153
154/* --------------------------------------------------------------------------
155 * EC Registers
133 * -------------------------------------------------------------------------- */ 156 * -------------------------------------------------------------------------- */
134 157
135static inline u8 acpi_ec_read_status(struct acpi_ec *ec) 158static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
@@ -151,6 +174,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
151{ 174{
152 u8 x = inb(ec->data_addr); 175 u8 x = inb(ec->data_addr);
153 176
177 ec->curr->timestamp = jiffies;
154 pr_debug("EC_DATA(R) = 0x%2.2x\n", x); 178 pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
155 return x; 179 return x;
156} 180}
@@ -159,12 +183,14 @@ static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
159{ 183{
160 pr_debug("EC_SC(W) = 0x%2.2x\n", command); 184 pr_debug("EC_SC(W) = 0x%2.2x\n", command);
161 outb(command, ec->command_addr); 185 outb(command, ec->command_addr);
186 ec->curr->timestamp = jiffies;
162} 187}
163 188
164static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) 189static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
165{ 190{
166 pr_debug("EC_DATA(W) = 0x%2.2x\n", data); 191 pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
167 outb(data, ec->data_addr); 192 outb(data, ec->data_addr);
193 ec->curr->timestamp = jiffies;
168} 194}
169 195
170#ifdef DEBUG 196#ifdef DEBUG
@@ -188,6 +214,140 @@ static const char *acpi_ec_cmd_string(u8 cmd)
188#define acpi_ec_cmd_string(cmd) "UNDEF" 214#define acpi_ec_cmd_string(cmd) "UNDEF"
189#endif 215#endif
190 216
217/* --------------------------------------------------------------------------
218 * GPE Registers
219 * -------------------------------------------------------------------------- */
220
221static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
222{
223 acpi_event_status gpe_status = 0;
224
225 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
226 return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false;
227}
228
229static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
230{
231 if (open)
232 acpi_enable_gpe(NULL, ec->gpe);
233 else {
234 BUG_ON(ec->reference_count < 1);
235 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
236 }
237 if (acpi_ec_is_gpe_raised(ec)) {
238 /*
239 * On some platforms, EN=1 writes cannot trigger GPE. So
240 * software need to manually trigger a pseudo GPE event on
241 * EN=1 writes.
242 */
243 pr_debug("***** Polling quirk *****\n");
244 advance_transaction(ec);
245 }
246}
247
248static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
249{
250 if (close)
251 acpi_disable_gpe(NULL, ec->gpe);
252 else {
253 BUG_ON(ec->reference_count < 1);
254 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
255 }
256}
257
258static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
259{
260 /*
261 * GPE STS is a W1C register, which means:
262 * 1. Software can clear it without worrying about clearing other
263 * GPEs' STS bits when the hardware sets them in parallel.
264 * 2. As long as software can ensure only clearing it when it is
265 * set, hardware won't set it in parallel.
266 * So software can clear GPE in any contexts.
267 * Warning: do not move the check into advance_transaction() as the
268 * EC commands will be sent without GPE raised.
269 */
270 if (!acpi_ec_is_gpe_raised(ec))
271 return;
272 acpi_clear_gpe(NULL, ec->gpe);
273}
274
275/* --------------------------------------------------------------------------
276 * Transaction Management
277 * -------------------------------------------------------------------------- */
278
279static void acpi_ec_submit_request(struct acpi_ec *ec)
280{
281 ec->reference_count++;
282 if (ec->reference_count == 1)
283 acpi_ec_enable_gpe(ec, true);
284}
285
286static void acpi_ec_complete_request(struct acpi_ec *ec)
287{
288 bool flushed = false;
289
290 ec->reference_count--;
291 if (ec->reference_count == 0)
292 acpi_ec_disable_gpe(ec, true);
293 flushed = acpi_ec_flushed(ec);
294 if (flushed)
295 wake_up(&ec->wait);
296}
297
298static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
299{
300 if (!test_bit(flag, &ec->flags)) {
301 acpi_ec_disable_gpe(ec, false);
302 pr_debug("+++++ Polling enabled +++++\n");
303 set_bit(flag, &ec->flags);
304 }
305}
306
307static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
308{
309 if (test_bit(flag, &ec->flags)) {
310 clear_bit(flag, &ec->flags);
311 acpi_ec_enable_gpe(ec, false);
312 pr_debug("+++++ Polling disabled +++++\n");
313 }
314}
315
316/*
317 * acpi_ec_submit_flushable_request() - Increase the reference count unless
318 * the flush operation is not in
319 * progress
320 * @ec: the EC device
321 *
322 * This function must be used before taking a new action that should hold
323 * the reference count. If this function returns false, then the action
324 * must be discarded or it will prevent the flush operation from being
325 * completed.
326 */
327static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
328{
329 if (!acpi_ec_started(ec))
330 return false;
331 acpi_ec_submit_request(ec);
332 return true;
333}
334
335static void acpi_ec_submit_query(struct acpi_ec *ec)
336{
337 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
338 pr_debug("***** Event started *****\n");
339 schedule_work(&ec->work);
340 }
341}
342
343static void acpi_ec_complete_query(struct acpi_ec *ec)
344{
345 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
346 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
347 pr_debug("***** Event stopped *****\n");
348 }
349}
350
191static int ec_transaction_completed(struct acpi_ec *ec) 351static int ec_transaction_completed(struct acpi_ec *ec)
192{ 352{
193 unsigned long flags; 353 unsigned long flags;
@@ -200,7 +360,7 @@ static int ec_transaction_completed(struct acpi_ec *ec)
200 return ret; 360 return ret;
201} 361}
202 362
203static bool advance_transaction(struct acpi_ec *ec) 363static void advance_transaction(struct acpi_ec *ec)
204{ 364{
205 struct transaction *t; 365 struct transaction *t;
206 u8 status; 366 u8 status;
@@ -208,6 +368,12 @@ static bool advance_transaction(struct acpi_ec *ec)
208 368
209 pr_debug("===== %s (%d) =====\n", 369 pr_debug("===== %s (%d) =====\n",
210 in_interrupt() ? "IRQ" : "TASK", smp_processor_id()); 370 in_interrupt() ? "IRQ" : "TASK", smp_processor_id());
371 /*
372 * By always clearing STS before handling all indications, we can
373 * ensure a hardware STS 0->1 change after this clearing can always
374 * trigger a GPE interrupt.
375 */
376 acpi_ec_clear_gpe(ec);
211 status = acpi_ec_read_status(ec); 377 status = acpi_ec_read_status(ec);
212 t = ec->curr; 378 t = ec->curr;
213 if (!t) 379 if (!t)
@@ -235,12 +401,13 @@ static bool advance_transaction(struct acpi_ec *ec)
235 t->flags |= ACPI_EC_COMMAND_COMPLETE; 401 t->flags |= ACPI_EC_COMMAND_COMPLETE;
236 wakeup = true; 402 wakeup = true;
237 } 403 }
238 return wakeup; 404 goto out;
239 } else { 405 } else {
240 if (EC_FLAGS_QUERY_HANDSHAKE && 406 if (EC_FLAGS_QUERY_HANDSHAKE &&
241 !(status & ACPI_EC_FLAG_SCI) && 407 !(status & ACPI_EC_FLAG_SCI) &&
242 (t->command == ACPI_EC_COMMAND_QUERY)) { 408 (t->command == ACPI_EC_COMMAND_QUERY)) {
243 t->flags |= ACPI_EC_COMMAND_POLL; 409 t->flags |= ACPI_EC_COMMAND_POLL;
410 acpi_ec_complete_query(ec);
244 t->rdata[t->ri++] = 0x00; 411 t->rdata[t->ri++] = 0x00;
245 t->flags |= ACPI_EC_COMMAND_COMPLETE; 412 t->flags |= ACPI_EC_COMMAND_COMPLETE;
246 pr_debug("***** Command(%s) software completion *****\n", 413 pr_debug("***** Command(%s) software completion *****\n",
@@ -249,9 +416,10 @@ static bool advance_transaction(struct acpi_ec *ec)
249 } else if ((status & ACPI_EC_FLAG_IBF) == 0) { 416 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
250 acpi_ec_write_cmd(ec, t->command); 417 acpi_ec_write_cmd(ec, t->command);
251 t->flags |= ACPI_EC_COMMAND_POLL; 418 t->flags |= ACPI_EC_COMMAND_POLL;
419 acpi_ec_complete_query(ec);
252 } else 420 } else
253 goto err; 421 goto err;
254 return wakeup; 422 goto out;
255 } 423 }
256err: 424err:
257 /* 425 /*
@@ -259,28 +427,27 @@ err:
259 * otherwise will take a not handled IRQ as a false one. 427 * otherwise will take a not handled IRQ as a false one.
260 */ 428 */
261 if (!(status & ACPI_EC_FLAG_SCI)) { 429 if (!(status & ACPI_EC_FLAG_SCI)) {
262 if (in_interrupt() && t) 430 if (in_interrupt() && t) {
263 ++t->irq_count; 431 if (t->irq_count < ec_storm_threshold)
432 ++t->irq_count;
433 /* Allow triggering on 0 threshold */
434 if (t->irq_count == ec_storm_threshold)
435 acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
436 }
264 } 437 }
265 return wakeup; 438out:
439 if (status & ACPI_EC_FLAG_SCI)
440 acpi_ec_submit_query(ec);
441 if (wakeup && in_interrupt())
442 wake_up(&ec->wait);
266} 443}
267 444
268static void start_transaction(struct acpi_ec *ec) 445static void start_transaction(struct acpi_ec *ec)
269{ 446{
270 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; 447 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
271 ec->curr->flags = 0; 448 ec->curr->flags = 0;
272 (void)advance_transaction(ec); 449 ec->curr->timestamp = jiffies;
273} 450 advance_transaction(ec);
274
275static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
276
277static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
278{
279 if (state & ACPI_EC_FLAG_SCI) {
280 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
281 return acpi_ec_sync_query(ec, NULL);
282 }
283 return 0;
284} 451}
285 452
286static int ec_poll(struct acpi_ec *ec) 453static int ec_poll(struct acpi_ec *ec)
@@ -291,20 +458,25 @@ static int ec_poll(struct acpi_ec *ec)
291 while (repeat--) { 458 while (repeat--) {
292 unsigned long delay = jiffies + 459 unsigned long delay = jiffies +
293 msecs_to_jiffies(ec_delay); 460 msecs_to_jiffies(ec_delay);
461 unsigned long usecs = ACPI_EC_UDELAY_POLL;
294 do { 462 do {
295 /* don't sleep with disabled interrupts */ 463 /* don't sleep with disabled interrupts */
296 if (EC_FLAGS_MSI || irqs_disabled()) { 464 if (EC_FLAGS_MSI || irqs_disabled()) {
297 udelay(ACPI_EC_MSI_UDELAY); 465 usecs = ACPI_EC_MSI_UDELAY;
466 udelay(usecs);
298 if (ec_transaction_completed(ec)) 467 if (ec_transaction_completed(ec))
299 return 0; 468 return 0;
300 } else { 469 } else {
301 if (wait_event_timeout(ec->wait, 470 if (wait_event_timeout(ec->wait,
302 ec_transaction_completed(ec), 471 ec_transaction_completed(ec),
303 msecs_to_jiffies(1))) 472 usecs_to_jiffies(usecs)))
304 return 0; 473 return 0;
305 } 474 }
306 spin_lock_irqsave(&ec->lock, flags); 475 spin_lock_irqsave(&ec->lock, flags);
307 (void)advance_transaction(ec); 476 if (time_after(jiffies,
477 ec->curr->timestamp +
478 usecs_to_jiffies(usecs)))
479 advance_transaction(ec);
308 spin_unlock_irqrestore(&ec->lock, flags); 480 spin_unlock_irqrestore(&ec->lock, flags);
309 } while (time_before(jiffies, delay)); 481 } while (time_before(jiffies, delay));
310 pr_debug("controller reset, restart transaction\n"); 482 pr_debug("controller reset, restart transaction\n");
@@ -325,21 +497,27 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
325 udelay(ACPI_EC_MSI_UDELAY); 497 udelay(ACPI_EC_MSI_UDELAY);
326 /* start transaction */ 498 /* start transaction */
327 spin_lock_irqsave(&ec->lock, tmp); 499 spin_lock_irqsave(&ec->lock, tmp);
500 /* Enable GPE for command processing (IBF=0/OBF=1) */
501 if (!acpi_ec_submit_flushable_request(ec)) {
502 ret = -EINVAL;
503 goto unlock;
504 }
328 /* following two actions should be kept atomic */ 505 /* following two actions should be kept atomic */
329 ec->curr = t; 506 ec->curr = t;
330 pr_debug("***** Command(%s) started *****\n", 507 pr_debug("***** Command(%s) started *****\n",
331 acpi_ec_cmd_string(t->command)); 508 acpi_ec_cmd_string(t->command));
332 start_transaction(ec); 509 start_transaction(ec);
333 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
334 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
335 pr_debug("***** Event stopped *****\n");
336 }
337 spin_unlock_irqrestore(&ec->lock, tmp); 510 spin_unlock_irqrestore(&ec->lock, tmp);
338 ret = ec_poll(ec); 511 ret = ec_poll(ec);
339 spin_lock_irqsave(&ec->lock, tmp); 512 spin_lock_irqsave(&ec->lock, tmp);
513 if (t->irq_count == ec_storm_threshold)
514 acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
340 pr_debug("***** Command(%s) stopped *****\n", 515 pr_debug("***** Command(%s) stopped *****\n",
341 acpi_ec_cmd_string(t->command)); 516 acpi_ec_cmd_string(t->command));
342 ec->curr = NULL; 517 ec->curr = NULL;
518 /* Disable GPE for command processing (IBF=0/OBF=1) */
519 acpi_ec_complete_request(ec);
520unlock:
343 spin_unlock_irqrestore(&ec->lock, tmp); 521 spin_unlock_irqrestore(&ec->lock, tmp);
344 return ret; 522 return ret;
345} 523}
@@ -354,10 +532,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
354 if (t->rdata) 532 if (t->rdata)
355 memset(t->rdata, 0, t->rlen); 533 memset(t->rdata, 0, t->rlen);
356 mutex_lock(&ec->mutex); 534 mutex_lock(&ec->mutex);
357 if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) {
358 status = -EINVAL;
359 goto unlock;
360 }
361 if (ec->global_lock) { 535 if (ec->global_lock) {
362 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 536 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
363 if (ACPI_FAILURE(status)) { 537 if (ACPI_FAILURE(status)) {
@@ -365,26 +539,11 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
365 goto unlock; 539 goto unlock;
366 } 540 }
367 } 541 }
368 /* disable GPE during transaction if storm is detected */
369 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
370 /* It has to be disabled, so that it doesn't trigger. */
371 acpi_disable_gpe(NULL, ec->gpe);
372 }
373 542
374 status = acpi_ec_transaction_unlocked(ec, t); 543 status = acpi_ec_transaction_unlocked(ec, t);
375 544
376 /* check if we received SCI during transaction */ 545 if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags))
377 ec_check_sci_sync(ec, acpi_ec_read_status(ec));
378 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
379 msleep(1); 546 msleep(1);
380 /* It is safe to enable the GPE outside of the transaction. */
381 acpi_enable_gpe(NULL, ec->gpe);
382 } else if (t->irq_count > ec_storm_threshold) {
383 pr_info("GPE storm detected(%d GPEs), "
384 "transactions will use polling mode\n",
385 t->irq_count);
386 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
387 }
388 if (ec->global_lock) 547 if (ec->global_lock)
389 acpi_release_global_lock(glk); 548 acpi_release_global_lock(glk);
390unlock: 549unlock:
@@ -500,7 +659,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
500 u8 value = 0; 659 u8 value = 0;
501 660
502 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { 661 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
503 status = acpi_ec_sync_query(ec, &value); 662 status = acpi_ec_query(ec, &value);
504 if (status || !value) 663 if (status || !value)
505 break; 664 break;
506 } 665 }
@@ -511,6 +670,53 @@ static void acpi_ec_clear(struct acpi_ec *ec)
511 pr_info("%d stale EC events cleared\n", i); 670 pr_info("%d stale EC events cleared\n", i);
512} 671}
513 672
673static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
674{
675 unsigned long flags;
676
677 spin_lock_irqsave(&ec->lock, flags);
678 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
679 pr_debug("+++++ Starting EC +++++\n");
680 /* Enable GPE for event processing (SCI_EVT=1) */
681 if (!resuming)
682 acpi_ec_submit_request(ec);
683 pr_info("+++++ EC started +++++\n");
684 }
685 spin_unlock_irqrestore(&ec->lock, flags);
686}
687
688static bool acpi_ec_stopped(struct acpi_ec *ec)
689{
690 unsigned long flags;
691 bool flushed;
692
693 spin_lock_irqsave(&ec->lock, flags);
694 flushed = acpi_ec_flushed(ec);
695 spin_unlock_irqrestore(&ec->lock, flags);
696 return flushed;
697}
698
699static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
700{
701 unsigned long flags;
702
703 spin_lock_irqsave(&ec->lock, flags);
704 if (acpi_ec_started(ec)) {
705 pr_debug("+++++ Stopping EC +++++\n");
706 set_bit(EC_FLAGS_STOPPED, &ec->flags);
707 spin_unlock_irqrestore(&ec->lock, flags);
708 wait_event(ec->wait, acpi_ec_stopped(ec));
709 spin_lock_irqsave(&ec->lock, flags);
710 /* Disable GPE for event processing (SCI_EVT=1) */
711 if (!suspending)
712 acpi_ec_complete_request(ec);
713 clear_bit(EC_FLAGS_STARTED, &ec->flags);
714 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
715 pr_info("+++++ EC stopped +++++\n");
716 }
717 spin_unlock_irqrestore(&ec->lock, flags);
718}
719
514void acpi_ec_block_transactions(void) 720void acpi_ec_block_transactions(void)
515{ 721{
516 struct acpi_ec *ec = first_ec; 722 struct acpi_ec *ec = first_ec;
@@ -520,7 +726,7 @@ void acpi_ec_block_transactions(void)
520 726
521 mutex_lock(&ec->mutex); 727 mutex_lock(&ec->mutex);
522 /* Prevent transactions from being carried out */ 728 /* Prevent transactions from being carried out */
523 set_bit(EC_FLAGS_BLOCKED, &ec->flags); 729 acpi_ec_stop(ec, true);
524 mutex_unlock(&ec->mutex); 730 mutex_unlock(&ec->mutex);
525} 731}
526 732
@@ -531,14 +737,11 @@ void acpi_ec_unblock_transactions(void)
531 if (!ec) 737 if (!ec)
532 return; 738 return;
533 739
534 mutex_lock(&ec->mutex);
535 /* Allow transactions to be carried out again */ 740 /* Allow transactions to be carried out again */
536 clear_bit(EC_FLAGS_BLOCKED, &ec->flags); 741 acpi_ec_start(ec, true);
537 742
538 if (EC_FLAGS_CLEAR_ON_RESUME) 743 if (EC_FLAGS_CLEAR_ON_RESUME)
539 acpi_ec_clear(ec); 744 acpi_ec_clear(ec);
540
541 mutex_unlock(&ec->mutex);
542} 745}
543 746
544void acpi_ec_unblock_transactions_early(void) 747void acpi_ec_unblock_transactions_early(void)
@@ -548,36 +751,33 @@ void acpi_ec_unblock_transactions_early(void)
548 * atomic context during wakeup, so we don't need to acquire the mutex). 751 * atomic context during wakeup, so we don't need to acquire the mutex).
549 */ 752 */
550 if (first_ec) 753 if (first_ec)
551 clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags); 754 acpi_ec_start(first_ec, true);
552} 755}
553 756
554static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data) 757/* --------------------------------------------------------------------------
758 Event Management
759 -------------------------------------------------------------------------- */
760static struct acpi_ec_query_handler *
761acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
555{ 762{
556 int result; 763 if (handler)
557 u8 d; 764 kref_get(&handler->kref);
558 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, 765 return handler;
559 .wdata = NULL, .rdata = &d, 766}
560 .wlen = 0, .rlen = 1};
561 767
562 if (!ec || !data) 768static void acpi_ec_query_handler_release(struct kref *kref)
563 return -EINVAL; 769{
564 /* 770 struct acpi_ec_query_handler *handler =
565 * Query the EC to find out which _Qxx method we need to evaluate. 771 container_of(kref, struct acpi_ec_query_handler, kref);
566 * Note that successful completion of the query causes the ACPI_EC_SCI 772
567 * bit to be cleared (and thus clearing the interrupt source). 773 kfree(handler);
568 */ 774}
569 result = acpi_ec_transaction_unlocked(ec, &t); 775
570 if (result) 776static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
571 return result; 777{
572 if (!d) 778 kref_put(&handler->kref, acpi_ec_query_handler_release);
573 return -ENODATA;
574 *data = d;
575 return 0;
576} 779}
577 780
578/* --------------------------------------------------------------------------
579 Event Management
580 -------------------------------------------------------------------------- */
581int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, 781int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
582 acpi_handle handle, acpi_ec_query_func func, 782 acpi_handle handle, acpi_ec_query_func func,
583 void *data) 783 void *data)
@@ -593,6 +793,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
593 handler->func = func; 793 handler->func = func;
594 handler->data = data; 794 handler->data = data;
595 mutex_lock(&ec->mutex); 795 mutex_lock(&ec->mutex);
796 kref_init(&handler->kref);
596 list_add(&handler->node, &ec->list); 797 list_add(&handler->node, &ec->list);
597 mutex_unlock(&ec->mutex); 798 mutex_unlock(&ec->mutex);
598 return 0; 799 return 0;
@@ -602,15 +803,18 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
602void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) 803void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
603{ 804{
604 struct acpi_ec_query_handler *handler, *tmp; 805 struct acpi_ec_query_handler *handler, *tmp;
806 LIST_HEAD(free_list);
605 807
606 mutex_lock(&ec->mutex); 808 mutex_lock(&ec->mutex);
607 list_for_each_entry_safe(handler, tmp, &ec->list, node) { 809 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
608 if (query_bit == handler->query_bit) { 810 if (query_bit == handler->query_bit) {
609 list_del(&handler->node); 811 list_del_init(&handler->node);
610 kfree(handler); 812 list_add(&handler->node, &free_list);
611 } 813 }
612 } 814 }
613 mutex_unlock(&ec->mutex); 815 mutex_unlock(&ec->mutex);
816 list_for_each_entry(handler, &free_list, node)
817 acpi_ec_put_query_handler(handler);
614} 818}
615EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); 819EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
616 820
@@ -626,59 +830,56 @@ static void acpi_ec_run(void *cxt)
626 else if (handler->handle) 830 else if (handler->handle)
627 acpi_evaluate_object(handler->handle, NULL, NULL, NULL); 831 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
628 pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit); 832 pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit);
629 kfree(handler); 833 acpi_ec_put_query_handler(handler);
630} 834}
631 835
632static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) 836static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
633{ 837{
634 u8 value = 0; 838 u8 value = 0;
635 int status; 839 int result;
636 struct acpi_ec_query_handler *handler, *copy; 840 acpi_status status;
841 struct acpi_ec_query_handler *handler;
842 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
843 .wdata = NULL, .rdata = &value,
844 .wlen = 0, .rlen = 1};
637 845
638 status = acpi_ec_query_unlocked(ec, &value); 846 /*
847 * Query the EC to find out which _Qxx method we need to evaluate.
848 * Note that successful completion of the query causes the ACPI_EC_SCI
849 * bit to be cleared (and thus clearing the interrupt source).
850 */
851 result = acpi_ec_transaction(ec, &t);
852 if (result)
853 return result;
639 if (data) 854 if (data)
640 *data = value; 855 *data = value;
641 if (status) 856 if (!value)
642 return status; 857 return -ENODATA;
643 858
859 mutex_lock(&ec->mutex);
644 list_for_each_entry(handler, &ec->list, node) { 860 list_for_each_entry(handler, &ec->list, node) {
645 if (value == handler->query_bit) { 861 if (value == handler->query_bit) {
646 /* have custom handler for this bit */ 862 /* have custom handler for this bit */
647 copy = kmalloc(sizeof(*handler), GFP_KERNEL); 863 handler = acpi_ec_get_query_handler(handler);
648 if (!copy)
649 return -ENOMEM;
650 memcpy(copy, handler, sizeof(*copy));
651 pr_debug("##### Query(0x%02x) scheduled #####\n", 864 pr_debug("##### Query(0x%02x) scheduled #####\n",
652 handler->query_bit); 865 handler->query_bit);
653 return acpi_os_execute((copy->func) ? 866 status = acpi_os_execute((handler->func) ?
654 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, 867 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
655 acpi_ec_run, copy); 868 acpi_ec_run, handler);
869 if (ACPI_FAILURE(status))
870 result = -EBUSY;
871 break;
656 } 872 }
657 } 873 }
658 return 0;
659}
660
661static void acpi_ec_gpe_query(void *ec_cxt)
662{
663 struct acpi_ec *ec = ec_cxt;
664
665 if (!ec)
666 return;
667 mutex_lock(&ec->mutex);
668 acpi_ec_sync_query(ec, NULL);
669 mutex_unlock(&ec->mutex); 874 mutex_unlock(&ec->mutex);
875 return result;
670} 876}
671 877
672static int ec_check_sci(struct acpi_ec *ec, u8 state) 878static void acpi_ec_gpe_poller(struct work_struct *work)
673{ 879{
674 if (state & ACPI_EC_FLAG_SCI) { 880 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
675 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { 881
676 pr_debug("***** Event started *****\n"); 882 acpi_ec_query(ec, NULL);
677 return acpi_os_execute(OSL_NOTIFY_HANDLER,
678 acpi_ec_gpe_query, ec);
679 }
680 }
681 return 0;
682} 883}
683 884
684static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, 885static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -688,11 +889,9 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
688 struct acpi_ec *ec = data; 889 struct acpi_ec *ec = data;
689 890
690 spin_lock_irqsave(&ec->lock, flags); 891 spin_lock_irqsave(&ec->lock, flags);
691 if (advance_transaction(ec)) 892 advance_transaction(ec);
692 wake_up(&ec->wait);
693 spin_unlock_irqrestore(&ec->lock, flags); 893 spin_unlock_irqrestore(&ec->lock, flags);
694 ec_check_sci(ec, acpi_ec_read_status(ec)); 894 return ACPI_INTERRUPT_HANDLED;
695 return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
696} 895}
697 896
698/* -------------------------------------------------------------------------- 897/* --------------------------------------------------------------------------
@@ -755,6 +954,7 @@ static struct acpi_ec *make_acpi_ec(void)
755 init_waitqueue_head(&ec->wait); 954 init_waitqueue_head(&ec->wait);
756 INIT_LIST_HEAD(&ec->list); 955 INIT_LIST_HEAD(&ec->list);
757 spin_lock_init(&ec->lock); 956 spin_lock_init(&ec->lock);
957 INIT_WORK(&ec->work, acpi_ec_gpe_poller);
758 return ec; 958 return ec;
759} 959}
760 960
@@ -810,13 +1010,13 @@ static int ec_install_handlers(struct acpi_ec *ec)
810 1010
811 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) 1011 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
812 return 0; 1012 return 0;
813 status = acpi_install_gpe_handler(NULL, ec->gpe, 1013 status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
814 ACPI_GPE_EDGE_TRIGGERED, 1014 ACPI_GPE_EDGE_TRIGGERED,
815 &acpi_ec_gpe_handler, ec); 1015 &acpi_ec_gpe_handler, ec);
816 if (ACPI_FAILURE(status)) 1016 if (ACPI_FAILURE(status))
817 return -ENODEV; 1017 return -ENODEV;
818 1018
819 acpi_enable_gpe(NULL, ec->gpe); 1019 acpi_ec_start(ec, false);
820 status = acpi_install_address_space_handler(ec->handle, 1020 status = acpi_install_address_space_handler(ec->handle,
821 ACPI_ADR_SPACE_EC, 1021 ACPI_ADR_SPACE_EC,
822 &acpi_ec_space_handler, 1022 &acpi_ec_space_handler,
@@ -831,7 +1031,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
831 pr_err("Fail in evaluating the _REG object" 1031 pr_err("Fail in evaluating the _REG object"
832 " of EC device. Broken bios is suspected.\n"); 1032 " of EC device. Broken bios is suspected.\n");
833 } else { 1033 } else {
834 acpi_disable_gpe(NULL, ec->gpe); 1034 acpi_ec_stop(ec, false);
835 acpi_remove_gpe_handler(NULL, ec->gpe, 1035 acpi_remove_gpe_handler(NULL, ec->gpe,
836 &acpi_ec_gpe_handler); 1036 &acpi_ec_gpe_handler);
837 return -ENODEV; 1037 return -ENODEV;
@@ -846,7 +1046,7 @@ static void ec_remove_handlers(struct acpi_ec *ec)
846{ 1046{
847 if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) 1047 if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
848 return; 1048 return;
849 acpi_disable_gpe(NULL, ec->gpe); 1049 acpi_ec_stop(ec, false);
850 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, 1050 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
851 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) 1051 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
852 pr_err("failed to remove space handler\n"); 1052 pr_err("failed to remove space handler\n");
@@ -903,11 +1103,8 @@ static int acpi_ec_add(struct acpi_device *device)
903 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 1103 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
904 1104
905 /* Clear stale _Q events if hardware might require that */ 1105 /* Clear stale _Q events if hardware might require that */
906 if (EC_FLAGS_CLEAR_ON_RESUME) { 1106 if (EC_FLAGS_CLEAR_ON_RESUME)
907 mutex_lock(&ec->mutex);
908 acpi_ec_clear(ec); 1107 acpi_ec_clear(ec);
909 mutex_unlock(&ec->mutex);
910 }
911 return ret; 1108 return ret;
912} 1109}
913 1110
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 163e82f536fa..56b321aa2b1c 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -35,6 +35,13 @@ void acpi_int340x_thermal_init(void);
35int acpi_sysfs_init(void); 35int acpi_sysfs_init(void);
36void acpi_container_init(void); 36void acpi_container_init(void);
37void acpi_memory_hotplug_init(void); 37void acpi_memory_hotplug_init(void);
38#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
39int acpi_ioapic_add(struct acpi_pci_root *root);
40int acpi_ioapic_remove(struct acpi_pci_root *root);
41#else
42static inline int acpi_ioapic_add(struct acpi_pci_root *root) { return 0; }
43static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
44#endif
38#ifdef CONFIG_ACPI_DOCK 45#ifdef CONFIG_ACPI_DOCK
39void register_dock_dependent_device(struct acpi_device *adev, 46void register_dock_dependent_device(struct acpi_device *adev,
40 acpi_handle dshandle); 47 acpi_handle dshandle);
@@ -68,6 +75,8 @@ static inline void acpi_debugfs_init(void) { return; }
68#endif 75#endif
69void acpi_lpss_init(void); 76void acpi_lpss_init(void);
70 77
78void acpi_apd_init(void);
79
71acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src); 80acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src);
72bool acpi_queue_hotplug_work(struct work_struct *work); 81bool acpi_queue_hotplug_work(struct work_struct *work);
73void acpi_device_hotplug(struct acpi_device *adev, u32 src); 82void acpi_device_hotplug(struct acpi_device *adev, u32 src);
@@ -122,11 +131,13 @@ struct acpi_ec {
122 unsigned long data_addr; 131 unsigned long data_addr;
123 unsigned long global_lock; 132 unsigned long global_lock;
124 unsigned long flags; 133 unsigned long flags;
134 unsigned long reference_count;
125 struct mutex mutex; 135 struct mutex mutex;
126 wait_queue_head_t wait; 136 wait_queue_head_t wait;
127 struct list_head list; 137 struct list_head list;
128 struct transaction *curr; 138 struct transaction *curr;
129 spinlock_t lock; 139 spinlock_t lock;
140 struct work_struct work;
130}; 141};
131 142
132extern struct acpi_ec *first_ec; 143extern struct acpi_ec *first_ec;
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
new file mode 100644
index 000000000000..ccdc8db16bb8
--- /dev/null
+++ b/drivers/acpi/ioapic.c
@@ -0,0 +1,229 @@
1/*
2 * IOAPIC/IOxAPIC/IOSAPIC driver
3 *
4 * Copyright (C) 2009 Fujitsu Limited.
5 * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
6 *
7 * Copyright (C) 2014 Intel Corporation
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Based on original drivers/pci/ioapic.c
14 * Yinghai Lu <yinghai@kernel.org>
15 * Jiang Liu <jiang.liu@intel.com>
16 */
17
18/*
19 * This driver manages I/O APICs added by hotplug after boot.
20 * We try to claim all I/O APIC devices, but those present at boot were
21 * registered when we parsed the ACPI MADT.
22 */
23
24#define pr_fmt(fmt) "ACPI : IOAPIC: " fmt
25
26#include <linux/slab.h>
27#include <linux/acpi.h>
28#include <linux/pci.h>
29#include <acpi/acpi.h>
30
31struct acpi_pci_ioapic {
32 acpi_handle root_handle;
33 acpi_handle handle;
34 u32 gsi_base;
35 struct resource res;
36 struct pci_dev *pdev;
37 struct list_head list;
38};
39
40static LIST_HEAD(ioapic_list);
41static DEFINE_MUTEX(ioapic_list_lock);
42
43static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
44{
45 struct resource *res = data;
46 struct resource_win win;
47
48 res->flags = 0;
49 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0)
50 return AE_OK;
51
52 if (!acpi_dev_resource_memory(acpi_res, res)) {
53 if (acpi_dev_resource_address_space(acpi_res, &win) ||
54 acpi_dev_resource_ext_address_space(acpi_res, &win))
55 *res = win.res;
56 }
57 if ((res->flags & IORESOURCE_PREFETCH) ||
58 (res->flags & IORESOURCE_DISABLED))
59 res->flags = 0;
60
61 return AE_CTRL_TERMINATE;
62}
63
64static bool acpi_is_ioapic(acpi_handle handle, char **type)
65{
66 acpi_status status;
67 struct acpi_device_info *info;
68 char *hid = NULL;
69 bool match = false;
70
71 if (!acpi_has_method(handle, "_GSB"))
72 return false;
73
74 status = acpi_get_object_info(handle, &info);
75 if (ACPI_SUCCESS(status)) {
76 if (info->valid & ACPI_VALID_HID)
77 hid = info->hardware_id.string;
78 if (hid) {
79 if (strcmp(hid, "ACPI0009") == 0) {
80 *type = "IOxAPIC";
81 match = true;
82 } else if (strcmp(hid, "ACPI000A") == 0) {
83 *type = "IOAPIC";
84 match = true;
85 }
86 }
87 kfree(info);
88 }
89
90 return match;
91}
92
93static acpi_status handle_ioapic_add(acpi_handle handle, u32 lvl,
94 void *context, void **rv)
95{
96 acpi_status status;
97 unsigned long long gsi_base;
98 struct acpi_pci_ioapic *ioapic;
99 struct pci_dev *dev = NULL;
100 struct resource *res = NULL;
101 char *type = NULL;
102
103 if (!acpi_is_ioapic(handle, &type))
104 return AE_OK;
105
106 mutex_lock(&ioapic_list_lock);
107 list_for_each_entry(ioapic, &ioapic_list, list)
108 if (ioapic->handle == handle) {
109 mutex_unlock(&ioapic_list_lock);
110 return AE_OK;
111 }
112
113 status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsi_base);
114 if (ACPI_FAILURE(status)) {
115 acpi_handle_warn(handle, "failed to evaluate _GSB method\n");
116 goto exit;
117 }
118
119 ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
120 if (!ioapic) {
121 pr_err("cannot allocate memory for new IOAPIC\n");
122 goto exit;
123 } else {
124 ioapic->root_handle = (acpi_handle)context;
125 ioapic->handle = handle;
126 ioapic->gsi_base = (u32)gsi_base;
127 INIT_LIST_HEAD(&ioapic->list);
128 }
129
130 if (acpi_ioapic_registered(handle, (u32)gsi_base))
131 goto done;
132
133 dev = acpi_get_pci_dev(handle);
134 if (dev && pci_resource_len(dev, 0)) {
135 if (pci_enable_device(dev) < 0)
136 goto exit_put;
137 pci_set_master(dev);
138 if (pci_request_region(dev, 0, type))
139 goto exit_disable;
140 res = &dev->resource[0];
141 ioapic->pdev = dev;
142 } else {
143 pci_dev_put(dev);
144 dev = NULL;
145
146 res = &ioapic->res;
147 acpi_walk_resources(handle, METHOD_NAME__CRS, setup_res, res);
148 if (res->flags == 0) {
149 acpi_handle_warn(handle, "failed to get resource\n");
150 goto exit_free;
151 } else if (request_resource(&iomem_resource, res)) {
152 acpi_handle_warn(handle, "failed to insert resource\n");
153 goto exit_free;
154 }
155 }
156
157 if (acpi_register_ioapic(handle, res->start, (u32)gsi_base)) {
158 acpi_handle_warn(handle, "failed to register IOAPIC\n");
159 goto exit_release;
160 }
161done:
162 list_add(&ioapic->list, &ioapic_list);
163 mutex_unlock(&ioapic_list_lock);
164
165 if (dev)
166 dev_info(&dev->dev, "%s at %pR, GSI %u\n",
167 type, res, (u32)gsi_base);
168 else
169 acpi_handle_info(handle, "%s at %pR, GSI %u\n",
170 type, res, (u32)gsi_base);
171
172 return AE_OK;
173
174exit_release:
175 if (dev)
176 pci_release_region(dev, 0);
177 else
178 release_resource(res);
179exit_disable:
180 if (dev)
181 pci_disable_device(dev);
182exit_put:
183 pci_dev_put(dev);
184exit_free:
185 kfree(ioapic);
186exit:
187 mutex_unlock(&ioapic_list_lock);
188 *(acpi_status *)rv = AE_ERROR;
189 return AE_OK;
190}
191
192int acpi_ioapic_add(struct acpi_pci_root *root)
193{
194 acpi_status status, retval = AE_OK;
195
196 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, root->device->handle,
197 UINT_MAX, handle_ioapic_add, NULL,
198 root->device->handle, (void **)&retval);
199
200 return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
201}
202
203int acpi_ioapic_remove(struct acpi_pci_root *root)
204{
205 int retval = 0;
206 struct acpi_pci_ioapic *ioapic, *tmp;
207
208 mutex_lock(&ioapic_list_lock);
209 list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
210 if (root->device->handle != ioapic->root_handle)
211 continue;
212
213 if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
214 retval = -EBUSY;
215
216 if (ioapic->pdev) {
217 pci_release_region(ioapic->pdev, 0);
218 pci_disable_device(ioapic->pdev);
219 pci_dev_put(ioapic->pdev);
220 } else if (ioapic->res.flags && ioapic->res.parent) {
221 release_resource(&ioapic->res);
222 }
223 list_del(&ioapic->list);
224 kfree(ioapic);
225 }
226 mutex_unlock(&ioapic_list_lock);
227
228 return retval;
229}
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 24b5476449a1..1333cbdc3ea2 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -177,12 +177,7 @@ static int __init slit_valid(struct acpi_table_slit *slit)
177 177
178static int __init acpi_parse_slit(struct acpi_table_header *table) 178static int __init acpi_parse_slit(struct acpi_table_header *table)
179{ 179{
180 struct acpi_table_slit *slit; 180 struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
181
182 if (!table)
183 return -EINVAL;
184
185 slit = (struct acpi_table_slit *)table;
186 181
187 if (!slit_valid(slit)) { 182 if (!slit_valid(slit)) {
188 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); 183 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
@@ -260,11 +255,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
260 255
261static int __init acpi_parse_srat(struct acpi_table_header *table) 256static int __init acpi_parse_srat(struct acpi_table_header *table)
262{ 257{
263 struct acpi_table_srat *srat; 258 struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
264 if (!table)
265 return -EINVAL;
266 259
267 srat = (struct acpi_table_srat *)table;
268 acpi_srat_revision = srat->header.revision; 260 acpi_srat_revision = srat->header.revision;
269 261
270 /* Real work done in acpi_table_parse_srat below. */ 262 /* Real work done in acpi_table_parse_srat below. */
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b1def411c0b8..e7f718d6918a 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -485,14 +485,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
485 if (!pin || !dev->irq_managed || dev->irq <= 0) 485 if (!pin || !dev->irq_managed || dev->irq <= 0)
486 return; 486 return;
487 487
488 /* Keep IOAPIC pin configuration when suspending */
489 if (dev->dev.power.is_prepared)
490 return;
491#ifdef CONFIG_PM
492 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
493 return;
494#endif
495
496 entry = acpi_pci_irq_lookup(dev, pin); 488 entry = acpi_pci_irq_lookup(dev, pin);
497 if (!entry) 489 if (!entry)
498 return; 490 return;
@@ -513,5 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
513 if (gsi >= 0) { 505 if (gsi >= 0) {
514 acpi_unregister_gsi(gsi); 506 acpi_unregister_gsi(gsi);
515 dev->irq_managed = 0; 507 dev->irq_managed = 0;
508 dev->irq = 0;
516 } 509 }
517} 510}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index c6bcb8c719d8..68a5f712cd19 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -112,10 +112,10 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
112 if (ACPI_FAILURE(status)) 112 if (ACPI_FAILURE(status))
113 return AE_OK; 113 return AE_OK;
114 114
115 if ((address.address_length > 0) && 115 if ((address.address.address_length > 0) &&
116 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) { 116 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
117 res->start = address.minimum; 117 res->start = address.address.minimum;
118 res->end = address.minimum + address.address_length - 1; 118 res->end = address.address.minimum + address.address.address_length - 1;
119 } 119 }
120 120
121 return AE_OK; 121 return AE_OK;
@@ -621,6 +621,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
621 if (hotadd) { 621 if (hotadd) {
622 pcibios_resource_survey_bus(root->bus); 622 pcibios_resource_survey_bus(root->bus);
623 pci_assign_unassigned_root_bus_resources(root->bus); 623 pci_assign_unassigned_root_bus_resources(root->bus);
624 acpi_ioapic_add(root);
624 } 625 }
625 626
626 pci_lock_rescan_remove(); 627 pci_lock_rescan_remove();
@@ -644,6 +645,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
644 645
645 pci_stop_root_bus(root->bus); 646 pci_stop_root_bus(root->bus);
646 647
648 WARN_ON(acpi_ioapic_remove(root));
649
647 device_set_run_wake(root->bus->bridge, false); 650 device_set_run_wake(root->bus->bridge, false);
648 pci_acpi_remove_bus_pm_notifier(device); 651 pci_acpi_remove_bus_pm_notifier(device);
649 652
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 02e48394276c..7962651cdbd4 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -4,6 +4,10 @@
4 * 4 *
5 * Alex Chiang <achiang@hp.com> 5 * Alex Chiang <achiang@hp.com>
6 * - Unified x86/ia64 implementations 6 * - Unified x86/ia64 implementations
7 *
8 * I/O APIC hotplug support
9 * Yinghai Lu <yinghai@kernel.org>
10 * Jiang Liu <jiang.liu@intel.com>
7 */ 11 */
8#include <linux/export.h> 12#include <linux/export.h>
9#include <linux/acpi.h> 13#include <linux/acpi.h>
@@ -12,6 +16,21 @@
12#define _COMPONENT ACPI_PROCESSOR_COMPONENT 16#define _COMPONENT ACPI_PROCESSOR_COMPONENT
13ACPI_MODULE_NAME("processor_core"); 17ACPI_MODULE_NAME("processor_core");
14 18
19static struct acpi_table_madt *get_madt_table(void)
20{
21 static struct acpi_table_madt *madt;
22 static int read_madt;
23
24 if (!read_madt) {
25 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
26 (struct acpi_table_header **)&madt)))
27 madt = NULL;
28 read_madt++;
29 }
30
31 return madt;
32}
33
15static int map_lapic_id(struct acpi_subtable_header *entry, 34static int map_lapic_id(struct acpi_subtable_header *entry,
16 u32 acpi_id, int *apic_id) 35 u32 acpi_id, int *apic_id)
17{ 36{
@@ -67,17 +86,10 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
67static int map_madt_entry(int type, u32 acpi_id) 86static int map_madt_entry(int type, u32 acpi_id)
68{ 87{
69 unsigned long madt_end, entry; 88 unsigned long madt_end, entry;
70 static struct acpi_table_madt *madt;
71 static int read_madt;
72 int phys_id = -1; /* CPU hardware ID */ 89 int phys_id = -1; /* CPU hardware ID */
90 struct acpi_table_madt *madt;
73 91
74 if (!read_madt) { 92 madt = get_madt_table();
75 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
76 (struct acpi_table_header **)&madt)))
77 madt = NULL;
78 read_madt++;
79 }
80
81 if (!madt) 93 if (!madt)
82 return phys_id; 94 return phys_id;
83 95
@@ -203,3 +215,96 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
203 return acpi_map_cpuid(phys_id, acpi_id); 215 return acpi_map_cpuid(phys_id, acpi_id);
204} 216}
205EXPORT_SYMBOL_GPL(acpi_get_cpuid); 217EXPORT_SYMBOL_GPL(acpi_get_cpuid);
218
219#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
220static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
221 u64 *phys_addr, int *ioapic_id)
222{
223 struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry;
224
225 if (ioapic->global_irq_base != gsi_base)
226 return 0;
227
228 *phys_addr = ioapic->address;
229 *ioapic_id = ioapic->id;
230 return 1;
231}
232
233static int parse_madt_ioapic_entry(u32 gsi_base, u64 *phys_addr)
234{
235 struct acpi_subtable_header *hdr;
236 unsigned long madt_end, entry;
237 struct acpi_table_madt *madt;
238 int apic_id = -1;
239
240 madt = get_madt_table();
241 if (!madt)
242 return apic_id;
243
244 entry = (unsigned long)madt;
245 madt_end = entry + madt->header.length;
246
247 /* Parse all entries looking for a match. */
248 entry += sizeof(struct acpi_table_madt);
249 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
250 hdr = (struct acpi_subtable_header *)entry;
251 if (hdr->type == ACPI_MADT_TYPE_IO_APIC &&
252 get_ioapic_id(hdr, gsi_base, phys_addr, &apic_id))
253 break;
254 else
255 entry += hdr->length;
256 }
257
258 return apic_id;
259}
260
261static int parse_mat_ioapic_entry(acpi_handle handle, u32 gsi_base,
262 u64 *phys_addr)
263{
264 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
265 struct acpi_subtable_header *header;
266 union acpi_object *obj;
267 int apic_id = -1;
268
269 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
270 goto exit;
271
272 if (!buffer.length || !buffer.pointer)
273 goto exit;
274
275 obj = buffer.pointer;
276 if (obj->type != ACPI_TYPE_BUFFER ||
277 obj->buffer.length < sizeof(struct acpi_subtable_header))
278 goto exit;
279
280 header = (struct acpi_subtable_header *)obj->buffer.pointer;
281 if (header->type == ACPI_MADT_TYPE_IO_APIC)
282 get_ioapic_id(header, gsi_base, phys_addr, &apic_id);
283
284exit:
285 kfree(buffer.pointer);
286 return apic_id;
287}
288
289/**
290 * acpi_get_ioapic_id - Get IOAPIC ID and physical address matching @gsi_base
291 * @handle: ACPI object for IOAPIC device
292 * @gsi_base: GSI base to match with
293 * @phys_addr: Pointer to store physical address of matching IOAPIC record
294 *
295 * Walk resources returned by ACPI_MAT method, then ACPI MADT table, to search
296 * for an ACPI IOAPIC record matching @gsi_base.
297 * Return IOAPIC id and store physical address in @phys_addr if found a match,
298 * otherwise return <0.
299 */
300int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr)
301{
302 int apic_id;
303
304 apic_id = parse_mat_ioapic_entry(handle, gsi_base, phys_addr);
305 if (apic_id == -1)
306 apic_id = parse_madt_ioapic_entry(gsi_base, phys_addr);
307
308 return apic_id;
309}
310#endif /* CONFIG_ACPI_HOTPLUG_IOAPIC */
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 87b704e41877..c256bd7fbd78 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -681,15 +681,13 @@ static int acpi_idle_bm_check(void)
681} 681}
682 682
683/** 683/**
684 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 684 * acpi_idle_do_entry - enter idle state using the appropriate method
685 * @cx: cstate data 685 * @cx: cstate data
686 * 686 *
687 * Caller disables interrupt before call and enables interrupt after return. 687 * Caller disables interrupt before call and enables interrupt after return.
688 */ 688 */
689static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 689static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
690{ 690{
691 /* Don't trace irqs off for idle */
692 stop_critical_timings();
693 if (cx->entry_method == ACPI_CSTATE_FFH) { 691 if (cx->entry_method == ACPI_CSTATE_FFH) {
694 /* Call into architectural FFH based C-state */ 692 /* Call into architectural FFH based C-state */
695 acpi_processor_ffh_cstate_enter(cx); 693 acpi_processor_ffh_cstate_enter(cx);
@@ -703,38 +701,9 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
703 gets asserted in time to freeze execution properly. */ 701 gets asserted in time to freeze execution properly. */
704 inl(acpi_gbl_FADT.xpm_timer_block.address); 702 inl(acpi_gbl_FADT.xpm_timer_block.address);
705 } 703 }
706 start_critical_timings();
707} 704}
708 705
709/** 706/**
710 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
711 * @dev: the target CPU
712 * @drv: cpuidle driver containing cpuidle state info
713 * @index: index of target state
714 *
715 * This is equivalent to the HALT instruction.
716 */
717static int acpi_idle_enter_c1(struct cpuidle_device *dev,
718 struct cpuidle_driver *drv, int index)
719{
720 struct acpi_processor *pr;
721 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
722
723 pr = __this_cpu_read(processors);
724
725 if (unlikely(!pr))
726 return -EINVAL;
727
728 lapic_timer_state_broadcast(pr, cx, 1);
729 acpi_idle_do_entry(cx);
730
731 lapic_timer_state_broadcast(pr, cx, 0);
732
733 return index;
734}
735
736
737/**
738 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) 707 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
739 * @dev: the target CPU 708 * @dev: the target CPU
740 * @index: the index of suggested state 709 * @index: the index of suggested state
@@ -761,47 +730,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
761 return 0; 730 return 0;
762} 731}
763 732
764/** 733static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
765 * acpi_idle_enter_simple - enters an ACPI state without BM handling
766 * @dev: the target CPU
767 * @drv: cpuidle driver with cpuidle state information
768 * @index: the index of suggested state
769 */
770static int acpi_idle_enter_simple(struct cpuidle_device *dev,
771 struct cpuidle_driver *drv, int index)
772{ 734{
773 struct acpi_processor *pr; 735 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 &&
774 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 736 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) &&
775 737 !pr->flags.has_cst;
776 pr = __this_cpu_read(processors);
777
778 if (unlikely(!pr))
779 return -EINVAL;
780
781#ifdef CONFIG_HOTPLUG_CPU
782 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
783 !pr->flags.has_cst &&
784 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
785 return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
786#endif
787
788 /*
789 * Must be done before busmaster disable as we might need to
790 * access HPET !
791 */
792 lapic_timer_state_broadcast(pr, cx, 1);
793
794 if (cx->type == ACPI_STATE_C3)
795 ACPI_FLUSH_CPU_CACHE();
796
797 /* Tell the scheduler that we are going deep-idle: */
798 sched_clock_idle_sleep_event();
799 acpi_idle_do_entry(cx);
800
801 sched_clock_idle_wakeup_event(0);
802
803 lapic_timer_state_broadcast(pr, cx, 0);
804 return index;
805} 738}
806 739
807static int c3_cpu_count; 740static int c3_cpu_count;
@@ -809,44 +742,14 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
809 742
810/** 743/**
811 * acpi_idle_enter_bm - enters C3 with proper BM handling 744 * acpi_idle_enter_bm - enters C3 with proper BM handling
812 * @dev: the target CPU 745 * @pr: Target processor
813 * @drv: cpuidle driver containing state data 746 * @cx: Target state context
814 * @index: the index of suggested state
815 *
816 * If BM is detected, the deepest non-C3 idle state is entered instead.
817 */ 747 */
818static int acpi_idle_enter_bm(struct cpuidle_device *dev, 748static void acpi_idle_enter_bm(struct acpi_processor *pr,
819 struct cpuidle_driver *drv, int index) 749 struct acpi_processor_cx *cx)
820{ 750{
821 struct acpi_processor *pr;
822 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
823
824 pr = __this_cpu_read(processors);
825
826 if (unlikely(!pr))
827 return -EINVAL;
828
829#ifdef CONFIG_HOTPLUG_CPU
830 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
831 !pr->flags.has_cst &&
832 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
833 return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
834#endif
835
836 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
837 if (drv->safe_state_index >= 0) {
838 return drv->states[drv->safe_state_index].enter(dev,
839 drv, drv->safe_state_index);
840 } else {
841 acpi_safe_halt();
842 return -EBUSY;
843 }
844 }
845
846 acpi_unlazy_tlb(smp_processor_id()); 751 acpi_unlazy_tlb(smp_processor_id());
847 752
848 /* Tell the scheduler that we are going deep-idle: */
849 sched_clock_idle_sleep_event();
850 /* 753 /*
851 * Must be done before busmaster disable as we might need to 754 * Must be done before busmaster disable as we might need to
852 * access HPET ! 755 * access HPET !
@@ -856,37 +759,71 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
856 /* 759 /*
857 * disable bus master 760 * disable bus master
858 * bm_check implies we need ARB_DIS 761 * bm_check implies we need ARB_DIS
859 * !bm_check implies we need cache flush
860 * bm_control implies whether we can do ARB_DIS 762 * bm_control implies whether we can do ARB_DIS
861 * 763 *
862 * That leaves a case where bm_check is set and bm_control is 764 * That leaves a case where bm_check is set and bm_control is
863 * not set. In that case we cannot do much, we enter C3 765 * not set. In that case we cannot do much, we enter C3
864 * without doing anything. 766 * without doing anything.
865 */ 767 */
866 if (pr->flags.bm_check && pr->flags.bm_control) { 768 if (pr->flags.bm_control) {
867 raw_spin_lock(&c3_lock); 769 raw_spin_lock(&c3_lock);
868 c3_cpu_count++; 770 c3_cpu_count++;
869 /* Disable bus master arbitration when all CPUs are in C3 */ 771 /* Disable bus master arbitration when all CPUs are in C3 */
870 if (c3_cpu_count == num_online_cpus()) 772 if (c3_cpu_count == num_online_cpus())
871 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 773 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
872 raw_spin_unlock(&c3_lock); 774 raw_spin_unlock(&c3_lock);
873 } else if (!pr->flags.bm_check) {
874 ACPI_FLUSH_CPU_CACHE();
875 } 775 }
876 776
877 acpi_idle_do_entry(cx); 777 acpi_idle_do_entry(cx);
878 778
879 /* Re-enable bus master arbitration */ 779 /* Re-enable bus master arbitration */
880 if (pr->flags.bm_check && pr->flags.bm_control) { 780 if (pr->flags.bm_control) {
881 raw_spin_lock(&c3_lock); 781 raw_spin_lock(&c3_lock);
882 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 782 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
883 c3_cpu_count--; 783 c3_cpu_count--;
884 raw_spin_unlock(&c3_lock); 784 raw_spin_unlock(&c3_lock);
885 } 785 }
886 786
887 sched_clock_idle_wakeup_event(0); 787 lapic_timer_state_broadcast(pr, cx, 0);
788}
789
790static int acpi_idle_enter(struct cpuidle_device *dev,
791 struct cpuidle_driver *drv, int index)
792{
793 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
794 struct acpi_processor *pr;
795
796 pr = __this_cpu_read(processors);
797 if (unlikely(!pr))
798 return -EINVAL;
799
800 if (cx->type != ACPI_STATE_C1) {
801 if (acpi_idle_fallback_to_c1(pr)) {
802 index = CPUIDLE_DRIVER_STATE_START;
803 cx = per_cpu(acpi_cstate[index], dev->cpu);
804 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
805 if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
806 acpi_idle_enter_bm(pr, cx);
807 return index;
808 } else if (drv->safe_state_index >= 0) {
809 index = drv->safe_state_index;
810 cx = per_cpu(acpi_cstate[index], dev->cpu);
811 } else {
812 acpi_safe_halt();
813 return -EBUSY;
814 }
815 }
816 }
817
818 lapic_timer_state_broadcast(pr, cx, 1);
819
820 if (cx->type == ACPI_STATE_C3)
821 ACPI_FLUSH_CPU_CACHE();
822
823 acpi_idle_do_entry(cx);
888 824
889 lapic_timer_state_broadcast(pr, cx, 0); 825 lapic_timer_state_broadcast(pr, cx, 0);
826
890 return index; 827 return index;
891} 828}
892 829
@@ -981,27 +918,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
981 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 918 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
982 state->exit_latency = cx->latency; 919 state->exit_latency = cx->latency;
983 state->target_residency = cx->latency * latency_factor; 920 state->target_residency = cx->latency * latency_factor;
921 state->enter = acpi_idle_enter;
984 922
985 state->flags = 0; 923 state->flags = 0;
986 switch (cx->type) { 924 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
987 case ACPI_STATE_C1:
988
989 state->enter = acpi_idle_enter_c1;
990 state->enter_dead = acpi_idle_play_dead;
991 drv->safe_state_index = count;
992 break;
993
994 case ACPI_STATE_C2:
995 state->enter = acpi_idle_enter_simple;
996 state->enter_dead = acpi_idle_play_dead; 925 state->enter_dead = acpi_idle_play_dead;
997 drv->safe_state_index = count; 926 drv->safe_state_index = count;
998 break;
999
1000 case ACPI_STATE_C3:
1001 state->enter = pr->flags.bm_check ?
1002 acpi_idle_enter_bm :
1003 acpi_idle_enter_simple;
1004 break;
1005 } 927 }
1006 928
1007 count++; 929 count++;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 782a0d15c25f..4752b9939987 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -34,21 +34,34 @@
34#define valid_IRQ(i) (true) 34#define valid_IRQ(i) (true)
35#endif 35#endif
36 36
37static unsigned long acpi_dev_memresource_flags(u64 len, u8 write_protect, 37static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
38 bool window)
39{ 38{
40 unsigned long flags = IORESOURCE_MEM; 39 u64 reslen = end - start + 1;
41 40
42 if (len == 0) 41 /*
43 flags |= IORESOURCE_DISABLED; 42 * CHECKME: len might be required to check versus a minimum
43 * length as well. 1 for io is fine, but for memory it does
44 * not make any sense at all.
45 */
46 if (len && reslen && reslen == len && start <= end)
47 return true;
44 48
45 if (write_protect == ACPI_READ_WRITE_MEMORY) 49 pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
46 flags |= IORESOURCE_MEM_WRITEABLE; 50 io ? "io" : "mem", start, end, len);
51
52 return false;
53}
54
55static void acpi_dev_memresource_flags(struct resource *res, u64 len,
56 u8 write_protect)
57{
58 res->flags = IORESOURCE_MEM;
47 59
48 if (window) 60 if (!acpi_dev_resource_len_valid(res->start, res->end, len, false))
49 flags |= IORESOURCE_WINDOW; 61 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
50 62
51 return flags; 63 if (write_protect == ACPI_READ_WRITE_MEMORY)
64 res->flags |= IORESOURCE_MEM_WRITEABLE;
52} 65}
53 66
54static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, 67static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
@@ -56,7 +69,7 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
56{ 69{
57 res->start = start; 70 res->start = start;
58 res->end = start + len - 1; 71 res->end = start + len - 1;
59 res->flags = acpi_dev_memresource_flags(len, write_protect, false); 72 acpi_dev_memresource_flags(res, len, write_protect);
60} 73}
61 74
62/** 75/**
@@ -67,6 +80,11 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
67 * Check if the given ACPI resource object represents a memory resource and 80 * Check if the given ACPI resource object represents a memory resource and
68 * if that's the case, use the information in it to populate the generic 81 * if that's the case, use the information in it to populate the generic
69 * resource object pointed to by @res. 82 * resource object pointed to by @res.
83 *
84 * Return:
85 * 1) false with res->flags setting to zero: not the expected resource type
86 * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
87 * 3) true: valid assigned resource
70 */ 88 */
71bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) 89bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
72{ 90{
@@ -77,60 +95,52 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
77 switch (ares->type) { 95 switch (ares->type) {
78 case ACPI_RESOURCE_TYPE_MEMORY24: 96 case ACPI_RESOURCE_TYPE_MEMORY24:
79 memory24 = &ares->data.memory24; 97 memory24 = &ares->data.memory24;
80 if (!memory24->minimum && !memory24->address_length) 98 acpi_dev_get_memresource(res, memory24->minimum << 8,
81 return false; 99 memory24->address_length << 8,
82 acpi_dev_get_memresource(res, memory24->minimum,
83 memory24->address_length,
84 memory24->write_protect); 100 memory24->write_protect);
85 break; 101 break;
86 case ACPI_RESOURCE_TYPE_MEMORY32: 102 case ACPI_RESOURCE_TYPE_MEMORY32:
87 memory32 = &ares->data.memory32; 103 memory32 = &ares->data.memory32;
88 if (!memory32->minimum && !memory32->address_length)
89 return false;
90 acpi_dev_get_memresource(res, memory32->minimum, 104 acpi_dev_get_memresource(res, memory32->minimum,
91 memory32->address_length, 105 memory32->address_length,
92 memory32->write_protect); 106 memory32->write_protect);
93 break; 107 break;
94 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 108 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
95 fixed_memory32 = &ares->data.fixed_memory32; 109 fixed_memory32 = &ares->data.fixed_memory32;
96 if (!fixed_memory32->address && !fixed_memory32->address_length)
97 return false;
98 acpi_dev_get_memresource(res, fixed_memory32->address, 110 acpi_dev_get_memresource(res, fixed_memory32->address,
99 fixed_memory32->address_length, 111 fixed_memory32->address_length,
100 fixed_memory32->write_protect); 112 fixed_memory32->write_protect);
101 break; 113 break;
102 default: 114 default:
115 res->flags = 0;
103 return false; 116 return false;
104 } 117 }
105 return true; 118
119 return !(res->flags & IORESOURCE_DISABLED);
106} 120}
107EXPORT_SYMBOL_GPL(acpi_dev_resource_memory); 121EXPORT_SYMBOL_GPL(acpi_dev_resource_memory);
108 122
109static unsigned int acpi_dev_ioresource_flags(u64 start, u64 end, u8 io_decode, 123static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
110 bool window) 124 u8 io_decode)
111{ 125{
112 int flags = IORESOURCE_IO; 126 res->flags = IORESOURCE_IO;
113 127
114 if (io_decode == ACPI_DECODE_16) 128 if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
115 flags |= IORESOURCE_IO_16BIT_ADDR; 129 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
116 130
117 if (start > end || end >= 0x10003) 131 if (res->end >= 0x10003)
118 flags |= IORESOURCE_DISABLED; 132 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
119 133
120 if (window) 134 if (io_decode == ACPI_DECODE_16)
121 flags |= IORESOURCE_WINDOW; 135 res->flags |= IORESOURCE_IO_16BIT_ADDR;
122
123 return flags;
124} 136}
125 137
126static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len, 138static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
127 u8 io_decode) 139 u8 io_decode)
128{ 140{
129 u64 end = start + len - 1;
130
131 res->start = start; 141 res->start = start;
132 res->end = end; 142 res->end = start + len - 1;
133 res->flags = acpi_dev_ioresource_flags(start, end, io_decode, false); 143 acpi_dev_ioresource_flags(res, len, io_decode);
134} 144}
135 145
136/** 146/**
@@ -141,6 +151,11 @@ static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
141 * Check if the given ACPI resource object represents an I/O resource and 151 * Check if the given ACPI resource object represents an I/O resource and
142 * if that's the case, use the information in it to populate the generic 152 * if that's the case, use the information in it to populate the generic
143 * resource object pointed to by @res. 153 * resource object pointed to by @res.
154 *
155 * Return:
156 * 1) false with res->flags setting to zero: not the expected resource type
157 * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
158 * 3) true: valid assigned resource
144 */ 159 */
145bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) 160bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
146{ 161{
@@ -150,135 +165,143 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
150 switch (ares->type) { 165 switch (ares->type) {
151 case ACPI_RESOURCE_TYPE_IO: 166 case ACPI_RESOURCE_TYPE_IO:
152 io = &ares->data.io; 167 io = &ares->data.io;
153 if (!io->minimum && !io->address_length)
154 return false;
155 acpi_dev_get_ioresource(res, io->minimum, 168 acpi_dev_get_ioresource(res, io->minimum,
156 io->address_length, 169 io->address_length,
157 io->io_decode); 170 io->io_decode);
158 break; 171 break;
159 case ACPI_RESOURCE_TYPE_FIXED_IO: 172 case ACPI_RESOURCE_TYPE_FIXED_IO:
160 fixed_io = &ares->data.fixed_io; 173 fixed_io = &ares->data.fixed_io;
161 if (!fixed_io->address && !fixed_io->address_length)
162 return false;
163 acpi_dev_get_ioresource(res, fixed_io->address, 174 acpi_dev_get_ioresource(res, fixed_io->address,
164 fixed_io->address_length, 175 fixed_io->address_length,
165 ACPI_DECODE_10); 176 ACPI_DECODE_10);
166 break; 177 break;
167 default: 178 default:
179 res->flags = 0;
168 return false; 180 return false;
169 } 181 }
170 return true; 182
183 return !(res->flags & IORESOURCE_DISABLED);
171} 184}
172EXPORT_SYMBOL_GPL(acpi_dev_resource_io); 185EXPORT_SYMBOL_GPL(acpi_dev_resource_io);
173 186
174/** 187static bool acpi_decode_space(struct resource_win *win,
175 * acpi_dev_resource_address_space - Extract ACPI address space information. 188 struct acpi_resource_address *addr,
176 * @ares: Input ACPI resource object. 189 struct acpi_address64_attribute *attr)
177 * @res: Output generic resource object.
178 *
179 * Check if the given ACPI resource object represents an address space resource
180 * and if that's the case, use the information in it to populate the generic
181 * resource object pointed to by @res.
182 */
183bool acpi_dev_resource_address_space(struct acpi_resource *ares,
184 struct resource *res)
185{ 190{
186 acpi_status status; 191 u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
187 struct acpi_resource_address64 addr; 192 bool wp = addr->info.mem.write_protect;
188 bool window; 193 u64 len = attr->address_length;
189 u64 len; 194 struct resource *res = &win->res;
190 u8 io_decode;
191 195
192 switch (ares->type) { 196 /*
193 case ACPI_RESOURCE_TYPE_ADDRESS16: 197 * Filter out invalid descriptor according to ACPI Spec 5.0, section
194 case ACPI_RESOURCE_TYPE_ADDRESS32: 198 * 6.4.3.5 Address Space Resource Descriptors.
195 case ACPI_RESOURCE_TYPE_ADDRESS64: 199 */
196 break; 200 if ((addr->min_address_fixed != addr->max_address_fixed && len) ||
197 default: 201 (addr->min_address_fixed && addr->max_address_fixed && !len))
198 return false; 202 pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
199 } 203 addr->min_address_fixed, addr->max_address_fixed, len);
200 204
201 status = acpi_resource_to_address64(ares, &addr); 205 res->start = attr->minimum;
202 if (ACPI_FAILURE(status)) 206 res->end = attr->maximum;
203 return false;
204 207
205 res->start = addr.minimum; 208 /*
206 res->end = addr.maximum; 209 * For bridges that translate addresses across the bridge,
207 window = addr.producer_consumer == ACPI_PRODUCER; 210 * translation_offset is the offset that must be added to the
211 * address on the secondary side to obtain the address on the
212 * primary side. Non-bridge devices must list 0 for all Address
213 * Translation offset bits.
214 */
215 if (addr->producer_consumer == ACPI_PRODUCER) {
216 res->start += attr->translation_offset;
217 res->end += attr->translation_offset;
218 } else if (attr->translation_offset) {
219 pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
220 attr->translation_offset);
221 }
208 222
209 switch(addr.resource_type) { 223 switch (addr->resource_type) {
210 case ACPI_MEMORY_RANGE: 224 case ACPI_MEMORY_RANGE:
211 len = addr.maximum - addr.minimum + 1; 225 acpi_dev_memresource_flags(res, len, wp);
212 res->flags = acpi_dev_memresource_flags(len,
213 addr.info.mem.write_protect,
214 window);
215 break; 226 break;
216 case ACPI_IO_RANGE: 227 case ACPI_IO_RANGE:
217 io_decode = addr.granularity == 0xfff ? 228 acpi_dev_ioresource_flags(res, len, iodec);
218 ACPI_DECODE_10 : ACPI_DECODE_16;
219 res->flags = acpi_dev_ioresource_flags(addr.minimum,
220 addr.maximum,
221 io_decode, window);
222 break; 229 break;
223 case ACPI_BUS_NUMBER_RANGE: 230 case ACPI_BUS_NUMBER_RANGE:
224 res->flags = IORESOURCE_BUS; 231 res->flags = IORESOURCE_BUS;
225 break; 232 break;
226 default: 233 default:
227 res->flags = 0; 234 return false;
228 } 235 }
229 236
230 return true; 237 win->offset = attr->translation_offset;
238
239 if (addr->producer_consumer == ACPI_PRODUCER)
240 res->flags |= IORESOURCE_WINDOW;
241
242 if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
243 res->flags |= IORESOURCE_PREFETCH;
244
245 return !(res->flags & IORESOURCE_DISABLED);
246}
247
248/**
249 * acpi_dev_resource_address_space - Extract ACPI address space information.
250 * @ares: Input ACPI resource object.
251 * @win: Output generic resource object.
252 *
253 * Check if the given ACPI resource object represents an address space resource
254 * and if that's the case, use the information in it to populate the generic
255 * resource object pointed to by @win.
256 *
257 * Return:
258 * 1) false with win->res.flags setting to zero: not the expected resource type
259 * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
260 * resource
261 * 3) true: valid assigned resource
262 */
263bool acpi_dev_resource_address_space(struct acpi_resource *ares,
264 struct resource_win *win)
265{
266 struct acpi_resource_address64 addr;
267
268 win->res.flags = 0;
269 if (ACPI_FAILURE(acpi_resource_to_address64(ares, &addr)))
270 return false;
271
272 return acpi_decode_space(win, (struct acpi_resource_address *)&addr,
273 &addr.address);
231} 274}
232EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space); 275EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space);
233 276
234/** 277/**
235 * acpi_dev_resource_ext_address_space - Extract ACPI address space information. 278 * acpi_dev_resource_ext_address_space - Extract ACPI address space information.
236 * @ares: Input ACPI resource object. 279 * @ares: Input ACPI resource object.
237 * @res: Output generic resource object. 280 * @win: Output generic resource object.
238 * 281 *
239 * Check if the given ACPI resource object represents an extended address space 282 * Check if the given ACPI resource object represents an extended address space
240 * resource and if that's the case, use the information in it to populate the 283 * resource and if that's the case, use the information in it to populate the
241 * generic resource object pointed to by @res. 284 * generic resource object pointed to by @win.
285 *
286 * Return:
287 * 1) false with win->res.flags setting to zero: not the expected resource type
288 * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
289 * resource
290 * 3) true: valid assigned resource
242 */ 291 */
243bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, 292bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
244 struct resource *res) 293 struct resource_win *win)
245{ 294{
246 struct acpi_resource_extended_address64 *ext_addr; 295 struct acpi_resource_extended_address64 *ext_addr;
247 bool window;
248 u64 len;
249 u8 io_decode;
250 296
297 win->res.flags = 0;
251 if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) 298 if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64)
252 return false; 299 return false;
253 300
254 ext_addr = &ares->data.ext_address64; 301 ext_addr = &ares->data.ext_address64;
255 302
256 res->start = ext_addr->minimum; 303 return acpi_decode_space(win, (struct acpi_resource_address *)ext_addr,
257 res->end = ext_addr->maximum; 304 &ext_addr->address);
258 window = ext_addr->producer_consumer == ACPI_PRODUCER;
259
260 switch(ext_addr->resource_type) {
261 case ACPI_MEMORY_RANGE:
262 len = ext_addr->maximum - ext_addr->minimum + 1;
263 res->flags = acpi_dev_memresource_flags(len,
264 ext_addr->info.mem.write_protect,
265 window);
266 break;
267 case ACPI_IO_RANGE:
268 io_decode = ext_addr->granularity == 0xfff ?
269 ACPI_DECODE_10 : ACPI_DECODE_16;
270 res->flags = acpi_dev_ioresource_flags(ext_addr->minimum,
271 ext_addr->maximum,
272 io_decode, window);
273 break;
274 case ACPI_BUS_NUMBER_RANGE:
275 res->flags = IORESOURCE_BUS;
276 break;
277 default:
278 res->flags = 0;
279 }
280
281 return true;
282} 305}
283EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space); 306EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
284 307
@@ -310,7 +333,7 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
310{ 333{
311 res->start = gsi; 334 res->start = gsi;
312 res->end = gsi; 335 res->end = gsi;
313 res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED; 336 res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
314} 337}
315 338
316static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, 339static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
@@ -369,6 +392,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
369 * represented by the resource and populate the generic resource object pointed 392 * represented by the resource and populate the generic resource object pointed
370 * to by @res accordingly. If the registration of the GSI is not successful, 393 * to by @res accordingly. If the registration of the GSI is not successful,
371 * IORESOURCE_DISABLED will be set it that object's flags. 394 * IORESOURCE_DISABLED will be set it that object's flags.
395 *
396 * Return:
397 * 1) false with res->flags setting to zero: not the expected resource type
398 * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
399 * 3) true: valid assigned resource
372 */ 400 */
373bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, 401bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
374 struct resource *res) 402 struct resource *res)
@@ -402,6 +430,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
402 ext_irq->sharable, false); 430 ext_irq->sharable, false);
403 break; 431 break;
404 default: 432 default:
433 res->flags = 0;
405 return false; 434 return false;
406 } 435 }
407 436
@@ -415,12 +444,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt);
415 */ 444 */
416void acpi_dev_free_resource_list(struct list_head *list) 445void acpi_dev_free_resource_list(struct list_head *list)
417{ 446{
418 struct resource_list_entry *rentry, *re; 447 resource_list_free(list);
419
420 list_for_each_entry_safe(rentry, re, list, node) {
421 list_del(&rentry->node);
422 kfree(rentry);
423 }
424} 448}
425EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); 449EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list);
426 450
@@ -432,18 +456,19 @@ struct res_proc_context {
432 int error; 456 int error;
433}; 457};
434 458
435static acpi_status acpi_dev_new_resource_entry(struct resource *r, 459static acpi_status acpi_dev_new_resource_entry(struct resource_win *win,
436 struct res_proc_context *c) 460 struct res_proc_context *c)
437{ 461{
438 struct resource_list_entry *rentry; 462 struct resource_entry *rentry;
439 463
440 rentry = kmalloc(sizeof(*rentry), GFP_KERNEL); 464 rentry = resource_list_create_entry(NULL, 0);
441 if (!rentry) { 465 if (!rentry) {
442 c->error = -ENOMEM; 466 c->error = -ENOMEM;
443 return AE_NO_MEMORY; 467 return AE_NO_MEMORY;
444 } 468 }
445 rentry->res = *r; 469 *rentry->res = win->res;
446 list_add_tail(&rentry->node, c->list); 470 rentry->offset = win->offset;
471 resource_list_add_tail(rentry, c->list);
447 c->count++; 472 c->count++;
448 return AE_OK; 473 return AE_OK;
449} 474}
@@ -452,7 +477,8 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
452 void *context) 477 void *context)
453{ 478{
454 struct res_proc_context *c = context; 479 struct res_proc_context *c = context;
455 struct resource r; 480 struct resource_win win;
481 struct resource *res = &win.res;
456 int i; 482 int i;
457 483
458 if (c->preproc) { 484 if (c->preproc) {
@@ -467,18 +493,18 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
467 } 493 }
468 } 494 }
469 495
470 memset(&r, 0, sizeof(r)); 496 memset(&win, 0, sizeof(win));
471 497
472 if (acpi_dev_resource_memory(ares, &r) 498 if (acpi_dev_resource_memory(ares, res)
473 || acpi_dev_resource_io(ares, &r) 499 || acpi_dev_resource_io(ares, res)
474 || acpi_dev_resource_address_space(ares, &r) 500 || acpi_dev_resource_address_space(ares, &win)
475 || acpi_dev_resource_ext_address_space(ares, &r)) 501 || acpi_dev_resource_ext_address_space(ares, &win))
476 return acpi_dev_new_resource_entry(&r, c); 502 return acpi_dev_new_resource_entry(&win, c);
477 503
478 for (i = 0; acpi_dev_resource_interrupt(ares, i, &r); i++) { 504 for (i = 0; acpi_dev_resource_interrupt(ares, i, res); i++) {
479 acpi_status status; 505 acpi_status status;
480 506
481 status = acpi_dev_new_resource_entry(&r, c); 507 status = acpi_dev_new_resource_entry(&win, c);
482 if (ACPI_FAILURE(status)) 508 if (ACPI_FAILURE(status))
483 return status; 509 return status;
484 } 510 }
@@ -503,7 +529,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
503 * returned as the final error code. 529 * returned as the final error code.
504 * 530 *
505 * The resultant struct resource objects are put on the list pointed to by 531 * The resultant struct resource objects are put on the list pointed to by
506 * @list, that must be empty initially, as members of struct resource_list_entry 532 * @list, that must be empty initially, as members of struct resource_entry
507 * objects. Callers of this routine should use %acpi_dev_free_resource_list() to 533 * objects. Callers of this routine should use %acpi_dev_free_resource_list() to
508 * free that list. 534 * free that list.
509 * 535 *
@@ -538,3 +564,58 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
538 return c.count; 564 return c.count;
539} 565}
540EXPORT_SYMBOL_GPL(acpi_dev_get_resources); 566EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
567
568/**
569 * acpi_dev_filter_resource_type - Filter ACPI resource according to resource
570 * types
571 * @ares: Input ACPI resource object.
572 * @types: Valid resource types of IORESOURCE_XXX
573 *
574 * This is a hepler function to support acpi_dev_get_resources(), which filters
575 * ACPI resource objects according to resource types.
576 */
577int acpi_dev_filter_resource_type(struct acpi_resource *ares,
578 unsigned long types)
579{
580 unsigned long type = 0;
581
582 switch (ares->type) {
583 case ACPI_RESOURCE_TYPE_MEMORY24:
584 case ACPI_RESOURCE_TYPE_MEMORY32:
585 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
586 type = IORESOURCE_MEM;
587 break;
588 case ACPI_RESOURCE_TYPE_IO:
589 case ACPI_RESOURCE_TYPE_FIXED_IO:
590 type = IORESOURCE_IO;
591 break;
592 case ACPI_RESOURCE_TYPE_IRQ:
593 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
594 type = IORESOURCE_IRQ;
595 break;
596 case ACPI_RESOURCE_TYPE_DMA:
597 case ACPI_RESOURCE_TYPE_FIXED_DMA:
598 type = IORESOURCE_DMA;
599 break;
600 case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
601 type = IORESOURCE_REG;
602 break;
603 case ACPI_RESOURCE_TYPE_ADDRESS16:
604 case ACPI_RESOURCE_TYPE_ADDRESS32:
605 case ACPI_RESOURCE_TYPE_ADDRESS64:
606 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
607 if (ares->data.address.resource_type == ACPI_MEMORY_RANGE)
608 type = IORESOURCE_MEM;
609 else if (ares->data.address.resource_type == ACPI_IO_RANGE)
610 type = IORESOURCE_IO;
611 else if (ares->data.address.resource_type ==
612 ACPI_BUS_NUMBER_RANGE)
613 type = IORESOURCE_BUS;
614 break;
615 default:
616 break;
617 }
618
619 return (type & types) ? 0 : 1;
620}
621EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index dc4d8960684a..bbca7830e18a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2544,6 +2544,7 @@ int __init acpi_scan_init(void)
2544 acpi_pci_link_init(); 2544 acpi_pci_link_init();
2545 acpi_processor_init(); 2545 acpi_processor_init();
2546 acpi_lpss_init(); 2546 acpi_lpss_init();
2547 acpi_apd_init();
2547 acpi_cmos_rtc_init(); 2548 acpi_cmos_rtc_init();
2548 acpi_container_init(); 2549 acpi_container_init();
2549 acpi_memory_hotplug_init(); 2550 acpi_memory_hotplug_init();
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 8aa9254a387f..7f251dd1a687 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -321,7 +321,7 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
321 {}, 321 {},
322}; 322};
323 323
324static void acpi_sleep_dmi_check(void) 324static void __init acpi_sleep_dmi_check(void)
325{ 325{
326 int year; 326 int year;
327 327
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 032db459370f..88a4f99dd2a7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -522,6 +522,24 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
522 DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), 522 DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
523 }, 523 },
524 }, 524 },
525 {
526 /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */
527 .callback = video_disable_native_backlight,
528 .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV",
529 .matches = {
530 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
531 DMI_MATCH(DMI_PRODUCT_NAME, "3570R/370R/470R/450R/510R/4450RV"),
532 },
533 },
534 {
535 /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
536 .callback = video_disable_native_backlight,
537 .ident = "SAMSUNG 730U3E/740U3E",
538 .matches = {
539 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
540 DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
541 },
542 },
525 543
526 { 544 {
527 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ 545 /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 5f601553b9b0..e7f338a3a3c2 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -269,7 +269,7 @@ config ATA_PIIX
269 269
270config SATA_DWC 270config SATA_DWC
271 tristate "DesignWare Cores SATA support" 271 tristate "DesignWare Cores SATA support"
272 depends on 460EX 272 depends on 460EX || (COMPILE_TEST && !(ARM || ARM64))
273 help 273 help
274 This option enables support for the on-chip SATA controller of the 274 This option enables support for the on-chip SATA controller of the
275 AppliedMicro processor 460EX. 275 AppliedMicro processor 460EX.
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 40f0e34f17af..71262e08648e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -333,7 +333,7 @@ struct ahci_host_priv {
333 u32 em_msg_type; /* EM message type */ 333 u32 em_msg_type; /* EM message type */
334 bool got_runtime_pm; /* Did we do pm_runtime_get? */ 334 bool got_runtime_pm; /* Did we do pm_runtime_get? */
335 struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ 335 struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
336 struct regulator *target_pwr; /* Optional */ 336 struct regulator **target_pwrs; /* Optional */
337 /* 337 /*
338 * If platform uses PHYs. There is a 1:1 relation between the port number and 338 * If platform uses PHYs. There is a 1:1 relation between the port number and
339 * the PHY position in this array. 339 * the PHY position in this array.
@@ -354,6 +354,10 @@ extern int ahci_ignore_sss;
354extern struct device_attribute *ahci_shost_attrs[]; 354extern struct device_attribute *ahci_shost_attrs[];
355extern struct device_attribute *ahci_sdev_attrs[]; 355extern struct device_attribute *ahci_sdev_attrs[];
356 356
357/*
358 * This must be instantiated by the edge drivers. Read the comments
359 * for ATA_BASE_SHT
360 */
357#define AHCI_SHT(drv_name) \ 361#define AHCI_SHT(drv_name) \
358 ATA_NCQ_SHT(drv_name), \ 362 ATA_NCQ_SHT(drv_name), \
359 .can_queue = AHCI_MAX_CMDS - 1, \ 363 .can_queue = AHCI_MAX_CMDS - 1, \
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index ce8a7a6d6c7f..267a3d3e79f4 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -16,6 +16,8 @@
16#include <linux/ahci_platform.h> 16#include <linux/ahci_platform.h>
17#include "ahci.h" 17#include "ahci.h"
18 18
19#define DRV_NAME "ahci_da850"
20
19/* SATA PHY Control Register offset from AHCI base */ 21/* SATA PHY Control Register offset from AHCI base */
20#define SATA_P0PHYCR_REG 0x178 22#define SATA_P0PHYCR_REG 0x178
21 23
@@ -59,6 +61,10 @@ static const struct ata_port_info ahci_da850_port_info = {
59 .port_ops = &ahci_platform_ops, 61 .port_ops = &ahci_platform_ops,
60}; 62};
61 63
64static struct scsi_host_template ahci_platform_sht = {
65 AHCI_SHT(DRV_NAME),
66};
67
62static int ahci_da850_probe(struct platform_device *pdev) 68static int ahci_da850_probe(struct platform_device *pdev)
63{ 69{
64 struct device *dev = &pdev->dev; 70 struct device *dev = &pdev->dev;
@@ -85,7 +91,8 @@ static int ahci_da850_probe(struct platform_device *pdev)
85 91
86 da850_sata_init(dev, pwrdn_reg, hpriv->mmio); 92 da850_sata_init(dev, pwrdn_reg, hpriv->mmio);
87 93
88 rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info); 94 rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info,
95 &ahci_platform_sht);
89 if (rc) 96 if (rc)
90 goto disable_resources; 97 goto disable_resources;
91 98
@@ -102,7 +109,7 @@ static struct platform_driver ahci_da850_driver = {
102 .probe = ahci_da850_probe, 109 .probe = ahci_da850_probe,
103 .remove = ata_platform_remove_one, 110 .remove = ata_platform_remove_one,
104 .driver = { 111 .driver = {
105 .name = "ahci_da850", 112 .name = DRV_NAME,
106 .pm = &ahci_da850_pm_ops, 113 .pm = &ahci_da850_pm_ops,
107 }, 114 },
108}; 115};
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 35d51c59a370..3f3a7db208ae 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -28,6 +28,8 @@
28#include <linux/libata.h> 28#include <linux/libata.h>
29#include "ahci.h" 29#include "ahci.h"
30 30
31#define DRV_NAME "ahci-imx"
32
31enum { 33enum {
32 /* Timer 1-ms Register */ 34 /* Timer 1-ms Register */
33 IMX_TIMER1MS = 0x00e0, 35 IMX_TIMER1MS = 0x00e0,
@@ -221,11 +223,9 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
221 if (imxpriv->no_device) 223 if (imxpriv->no_device)
222 return 0; 224 return 0;
223 225
224 if (hpriv->target_pwr) { 226 ret = ahci_platform_enable_regulators(hpriv);
225 ret = regulator_enable(hpriv->target_pwr); 227 if (ret)
226 if (ret) 228 return ret;
227 return ret;
228 }
229 229
230 ret = clk_prepare_enable(imxpriv->sata_ref_clk); 230 ret = clk_prepare_enable(imxpriv->sata_ref_clk);
231 if (ret < 0) 231 if (ret < 0)
@@ -270,8 +270,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
270disable_clk: 270disable_clk:
271 clk_disable_unprepare(imxpriv->sata_ref_clk); 271 clk_disable_unprepare(imxpriv->sata_ref_clk);
272disable_regulator: 272disable_regulator:
273 if (hpriv->target_pwr) 273 ahci_platform_disable_regulators(hpriv);
274 regulator_disable(hpriv->target_pwr);
275 274
276 return ret; 275 return ret;
277} 276}
@@ -291,8 +290,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
291 290
292 clk_disable_unprepare(imxpriv->sata_ref_clk); 291 clk_disable_unprepare(imxpriv->sata_ref_clk);
293 292
294 if (hpriv->target_pwr) 293 ahci_platform_disable_regulators(hpriv);
295 regulator_disable(hpriv->target_pwr);
296} 294}
297 295
298static void ahci_imx_error_handler(struct ata_port *ap) 296static void ahci_imx_error_handler(struct ata_port *ap)
@@ -524,6 +522,10 @@ static u32 imx_ahci_parse_props(struct device *dev,
524 return reg_value; 522 return reg_value;
525} 523}
526 524
525static struct scsi_host_template ahci_platform_sht = {
526 AHCI_SHT(DRV_NAME),
527};
528
527static int imx_ahci_probe(struct platform_device *pdev) 529static int imx_ahci_probe(struct platform_device *pdev)
528{ 530{
529 struct device *dev = &pdev->dev; 531 struct device *dev = &pdev->dev;
@@ -620,7 +622,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
620 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; 622 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
621 writel(reg_val, hpriv->mmio + IMX_TIMER1MS); 623 writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
622 624
623 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info); 625 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
626 &ahci_platform_sht);
624 if (ret) 627 if (ret)
625 goto disable_sata; 628 goto disable_sata;
626 629
@@ -678,7 +681,7 @@ static struct platform_driver imx_ahci_driver = {
678 .probe = imx_ahci_probe, 681 .probe = imx_ahci_probe,
679 .remove = ata_platform_remove_one, 682 .remove = ata_platform_remove_one,
680 .driver = { 683 .driver = {
681 .name = "ahci-imx", 684 .name = DRV_NAME,
682 .of_match_table = imx_ahci_of_match, 685 .of_match_table = imx_ahci_of_match,
683 .pm = &ahci_imx_pm_ops, 686 .pm = &ahci_imx_pm_ops,
684 }, 687 },
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 64bb08432b69..23716dd8a7ec 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -19,6 +19,8 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include "ahci.h" 20#include "ahci.h"
21 21
22#define DRV_NAME "ahci-mvebu"
23
22#define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0 24#define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0
23#define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4 25#define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4
24 26
@@ -67,6 +69,10 @@ static const struct ata_port_info ahci_mvebu_port_info = {
67 .port_ops = &ahci_platform_ops, 69 .port_ops = &ahci_platform_ops,
68}; 70};
69 71
72static struct scsi_host_template ahci_platform_sht = {
73 AHCI_SHT(DRV_NAME),
74};
75
70static int ahci_mvebu_probe(struct platform_device *pdev) 76static int ahci_mvebu_probe(struct platform_device *pdev)
71{ 77{
72 struct ahci_host_priv *hpriv; 78 struct ahci_host_priv *hpriv;
@@ -88,7 +94,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
88 ahci_mvebu_mbus_config(hpriv, dram); 94 ahci_mvebu_mbus_config(hpriv, dram);
89 ahci_mvebu_regret_option(hpriv); 95 ahci_mvebu_regret_option(hpriv);
90 96
91 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info); 97 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
98 &ahci_platform_sht);
92 if (rc) 99 if (rc)
93 goto disable_resources; 100 goto disable_resources;
94 101
@@ -114,7 +121,7 @@ static struct platform_driver ahci_mvebu_driver = {
114 .probe = ahci_mvebu_probe, 121 .probe = ahci_mvebu_probe,
115 .remove = ata_platform_remove_one, 122 .remove = ata_platform_remove_one,
116 .driver = { 123 .driver = {
117 .name = "ahci-mvebu", 124 .name = DRV_NAME,
118 .of_match_table = ahci_mvebu_of_match, 125 .of_match_table = ahci_mvebu_of_match,
119 }, 126 },
120}; 127};
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 18d539837045..78d6ae0b90c4 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -22,6 +22,8 @@
22#include <linux/ahci_platform.h> 22#include <linux/ahci_platform.h>
23#include "ahci.h" 23#include "ahci.h"
24 24
25#define DRV_NAME "ahci"
26
25static const struct ata_port_info ahci_port_info = { 27static const struct ata_port_info ahci_port_info = {
26 .flags = AHCI_FLAG_COMMON, 28 .flags = AHCI_FLAG_COMMON,
27 .pio_mask = ATA_PIO4, 29 .pio_mask = ATA_PIO4,
@@ -29,6 +31,10 @@ static const struct ata_port_info ahci_port_info = {
29 .port_ops = &ahci_platform_ops, 31 .port_ops = &ahci_platform_ops,
30}; 32};
31 33
34static struct scsi_host_template ahci_platform_sht = {
35 AHCI_SHT(DRV_NAME),
36};
37
32static int ahci_probe(struct platform_device *pdev) 38static int ahci_probe(struct platform_device *pdev)
33{ 39{
34 struct device *dev = &pdev->dev; 40 struct device *dev = &pdev->dev;
@@ -46,7 +52,8 @@ static int ahci_probe(struct platform_device *pdev)
46 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) 52 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
47 hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; 53 hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
48 54
49 rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info); 55 rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
56 &ahci_platform_sht);
50 if (rc) 57 if (rc)
51 goto disable_resources; 58 goto disable_resources;
52 59
@@ -75,7 +82,7 @@ static struct platform_driver ahci_driver = {
75 .probe = ahci_probe, 82 .probe = ahci_probe,
76 .remove = ata_platform_remove_one, 83 .remove = ata_platform_remove_one,
77 .driver = { 84 .driver = {
78 .name = "ahci", 85 .name = DRV_NAME,
79 .of_match_table = ahci_of_match, 86 .of_match_table = ahci_of_match,
80 .pm = &ahci_pm_ops, 87 .pm = &ahci_pm_ops,
81 }, 88 },
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 2f9e8317cc16..bc971af262e7 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -23,6 +23,8 @@
23 23
24#include "ahci.h" 24#include "ahci.h"
25 25
26#define DRV_NAME "st_ahci"
27
26#define ST_AHCI_OOBR 0xbc 28#define ST_AHCI_OOBR 0xbc
27#define ST_AHCI_OOBR_WE BIT(31) 29#define ST_AHCI_OOBR_WE BIT(31)
28#define ST_AHCI_OOBR_CWMIN_SHIFT 24 30#define ST_AHCI_OOBR_CWMIN_SHIFT 24
@@ -140,6 +142,10 @@ static const struct ata_port_info st_ahci_port_info = {
140 .port_ops = &st_ahci_port_ops, 142 .port_ops = &st_ahci_port_ops,
141}; 143};
142 144
145static struct scsi_host_template ahci_platform_sht = {
146 AHCI_SHT(DRV_NAME),
147};
148
143static int st_ahci_probe(struct platform_device *pdev) 149static int st_ahci_probe(struct platform_device *pdev)
144{ 150{
145 struct st_ahci_drv_data *drv_data; 151 struct st_ahci_drv_data *drv_data;
@@ -166,7 +172,8 @@ static int st_ahci_probe(struct platform_device *pdev)
166 if (err) 172 if (err)
167 return err; 173 return err;
168 174
169 err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info); 175 err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
176 &ahci_platform_sht);
170 if (err) { 177 if (err) {
171 ahci_platform_disable_resources(hpriv); 178 ahci_platform_disable_resources(hpriv);
172 return err; 179 return err;
@@ -229,7 +236,7 @@ MODULE_DEVICE_TABLE(of, st_ahci_match);
229 236
230static struct platform_driver st_ahci_driver = { 237static struct platform_driver st_ahci_driver = {
231 .driver = { 238 .driver = {
232 .name = "st_ahci", 239 .name = DRV_NAME,
233 .pm = &st_ahci_pm_ops, 240 .pm = &st_ahci_pm_ops,
234 .of_match_table = of_match_ptr(st_ahci_match), 241 .of_match_table = of_match_ptr(st_ahci_match),
235 }, 242 },
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index e2e0da539a2f..b26437430163 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -27,6 +27,8 @@
27#include <linux/regulator/consumer.h> 27#include <linux/regulator/consumer.h>
28#include "ahci.h" 28#include "ahci.h"
29 29
30#define DRV_NAME "ahci-sunxi"
31
30/* Insmod parameters */ 32/* Insmod parameters */
31static bool enable_pmp; 33static bool enable_pmp;
32module_param(enable_pmp, bool, 0); 34module_param(enable_pmp, bool, 0);
@@ -169,6 +171,10 @@ static const struct ata_port_info ahci_sunxi_port_info = {
169 .port_ops = &ahci_platform_ops, 171 .port_ops = &ahci_platform_ops,
170}; 172};
171 173
174static struct scsi_host_template ahci_platform_sht = {
175 AHCI_SHT(DRV_NAME),
176};
177
172static int ahci_sunxi_probe(struct platform_device *pdev) 178static int ahci_sunxi_probe(struct platform_device *pdev)
173{ 179{
174 struct device *dev = &pdev->dev; 180 struct device *dev = &pdev->dev;
@@ -200,7 +206,8 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
200 if (!enable_pmp) 206 if (!enable_pmp)
201 hpriv->flags |= AHCI_HFLAG_NO_PMP; 207 hpriv->flags |= AHCI_HFLAG_NO_PMP;
202 208
203 rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info); 209 rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info,
210 &ahci_platform_sht);
204 if (rc) 211 if (rc)
205 goto disable_resources; 212 goto disable_resources;
206 213
@@ -251,7 +258,7 @@ static struct platform_driver ahci_sunxi_driver = {
251 .probe = ahci_sunxi_probe, 258 .probe = ahci_sunxi_probe,
252 .remove = ata_platform_remove_one, 259 .remove = ata_platform_remove_one,
253 .driver = { 260 .driver = {
254 .name = "ahci-sunxi", 261 .name = DRV_NAME,
255 .of_match_table = ahci_sunxi_of_match, 262 .of_match_table = ahci_sunxi_of_match,
256 .pm = &ahci_sunxi_pm_ops, 263 .pm = &ahci_sunxi_pm_ops,
257 }, 264 },
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 032904402c95..3a62eb246d80 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -31,6 +31,8 @@
31 31
32#include "ahci.h" 32#include "ahci.h"
33 33
34#define DRV_NAME "tegra-ahci"
35
34#define SATA_CONFIGURATION_0 0x180 36#define SATA_CONFIGURATION_0 0x180
35#define SATA_CONFIGURATION_EN_FPCI BIT(0) 37#define SATA_CONFIGURATION_EN_FPCI BIT(0)
36 38
@@ -289,6 +291,10 @@ static const struct of_device_id tegra_ahci_of_match[] = {
289}; 291};
290MODULE_DEVICE_TABLE(of, tegra_ahci_of_match); 292MODULE_DEVICE_TABLE(of, tegra_ahci_of_match);
291 293
294static struct scsi_host_template ahci_platform_sht = {
295 AHCI_SHT(DRV_NAME),
296};
297
292static int tegra_ahci_probe(struct platform_device *pdev) 298static int tegra_ahci_probe(struct platform_device *pdev)
293{ 299{
294 struct ahci_host_priv *hpriv; 300 struct ahci_host_priv *hpriv;
@@ -354,7 +360,8 @@ static int tegra_ahci_probe(struct platform_device *pdev)
354 if (ret) 360 if (ret)
355 return ret; 361 return ret;
356 362
357 ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info); 363 ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info,
364 &ahci_platform_sht);
358 if (ret) 365 if (ret)
359 goto deinit_controller; 366 goto deinit_controller;
360 367
@@ -370,7 +377,7 @@ static struct platform_driver tegra_ahci_driver = {
370 .probe = tegra_ahci_probe, 377 .probe = tegra_ahci_probe,
371 .remove = ata_platform_remove_one, 378 .remove = ata_platform_remove_one,
372 .driver = { 379 .driver = {
373 .name = "tegra-ahci", 380 .name = DRV_NAME,
374 .of_match_table = tegra_ahci_of_match, 381 .of_match_table = tegra_ahci_of_match,
375 }, 382 },
376 /* LP0 suspend support not implemented */ 383 /* LP0 suspend support not implemented */
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index cbcd20810355..2e8bb603e447 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -30,6 +30,8 @@
30#include <linux/phy/phy.h> 30#include <linux/phy/phy.h>
31#include "ahci.h" 31#include "ahci.h"
32 32
33#define DRV_NAME "xgene-ahci"
34
33/* Max # of disk per a controller */ 35/* Max # of disk per a controller */
34#define MAX_AHCI_CHN_PERCTR 2 36#define MAX_AHCI_CHN_PERCTR 2
35 37
@@ -85,6 +87,7 @@ struct xgene_ahci_context {
85 struct ahci_host_priv *hpriv; 87 struct ahci_host_priv *hpriv;
86 struct device *dev; 88 struct device *dev;
87 u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/ 89 u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
90 u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
88 void __iomem *csr_core; /* Core CSR address of IP */ 91 void __iomem *csr_core; /* Core CSR address of IP */
89 void __iomem *csr_diag; /* Diag CSR address of IP */ 92 void __iomem *csr_diag; /* Diag CSR address of IP */
90 void __iomem *csr_axi; /* AXI CSR address of IP */ 93 void __iomem *csr_axi; /* AXI CSR address of IP */
@@ -105,17 +108,69 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
105} 108}
106 109
107/** 110/**
111 * xgene_ahci_poll_reg_val- Poll a register on a specific value.
112 * @ap : ATA port of interest.
113 * @reg : Register of interest.
114 * @val : Value to be attained.
115 * @interval : waiting interval for polling.
116 * @timeout : timeout for achieving the value.
117 */
118static int xgene_ahci_poll_reg_val(struct ata_port *ap,
119 void __iomem *reg, unsigned
120 int val, unsigned long interval,
121 unsigned long timeout)
122{
123 unsigned long deadline;
124 unsigned int tmp;
125
126 tmp = ioread32(reg);
127 deadline = ata_deadline(jiffies, timeout);
128
129 while (tmp != val && time_before(jiffies, deadline)) {
130 ata_msleep(ap, interval);
131 tmp = ioread32(reg);
132 }
133
134 return tmp;
135}
136
137/**
108 * xgene_ahci_restart_engine - Restart the dma engine. 138 * xgene_ahci_restart_engine - Restart the dma engine.
109 * @ap : ATA port of interest 139 * @ap : ATA port of interest
110 * 140 *
111 * Restarts the dma engine inside the controller. 141 * Waits for completion of multiple commands and restarts
142 * the DMA engine inside the controller.
112 */ 143 */
113static int xgene_ahci_restart_engine(struct ata_port *ap) 144static int xgene_ahci_restart_engine(struct ata_port *ap)
114{ 145{
115 struct ahci_host_priv *hpriv = ap->host->private_data; 146 struct ahci_host_priv *hpriv = ap->host->private_data;
147 struct ahci_port_priv *pp = ap->private_data;
148 void __iomem *port_mmio = ahci_port_base(ap);
149 u32 fbs;
150
151 /*
152 * In case of PMP multiple IDENTIFY DEVICE commands can be
153 * issued inside PxCI. So need to poll PxCI for the
154 * completion of outstanding IDENTIFY DEVICE commands before
155 * we restart the DMA engine.
156 */
157 if (xgene_ahci_poll_reg_val(ap, port_mmio +
158 PORT_CMD_ISSUE, 0x0, 1, 100))
159 return -EBUSY;
116 160
117 ahci_stop_engine(ap); 161 ahci_stop_engine(ap);
118 ahci_start_fis_rx(ap); 162 ahci_start_fis_rx(ap);
163
164 /*
165 * Enable the PxFBS.FBS_EN bit as it
166 * gets cleared due to stopping the engine.
167 */
168 if (pp->fbs_supported) {
169 fbs = readl(port_mmio + PORT_FBS);
170 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
171 fbs = readl(port_mmio + PORT_FBS);
172 }
173
119 hpriv->start_engine(ap); 174 hpriv->start_engine(ap);
120 175
121 return 0; 176 return 0;
@@ -125,11 +180,17 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
125 * xgene_ahci_qc_issue - Issue commands to the device 180 * xgene_ahci_qc_issue - Issue commands to the device
126 * @qc: Command to issue 181 * @qc: Command to issue
127 * 182 *
128 * Due to Hardware errata for IDENTIFY DEVICE command and PACKET 183 * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
129 * command of ATAPI protocol set, the controller cannot clear the BSY bit 184 * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
130 * after receiving the PIO setup FIS. This results in the DMA state machine 185 * state machine goes into the CMFatalErrorUpdate state and locks up. By
131 * going into the CMFatalErrorUpdate state and locks up. By restarting the 186 * restarting the dma engine, it removes the controller out of lock up state.
132 * DMA engine, it removes the controller out of lock up state. 187 *
188 * Due to H/W errata, the controller is unable to save the PMP
189 * field fetched from command header before sending the H2D FIS.
190 * When the device returns the PMP port field in the D2H FIS, there is
191 * a mismatch and results in command completion failure. The
192 * workaround is to write the pmp value to PxFBS.DEV field before issuing
193 * any command to PMP.
133 */ 194 */
134static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) 195static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
135{ 196{
@@ -137,9 +198,23 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
137 struct ahci_host_priv *hpriv = ap->host->private_data; 198 struct ahci_host_priv *hpriv = ap->host->private_data;
138 struct xgene_ahci_context *ctx = hpriv->plat_data; 199 struct xgene_ahci_context *ctx = hpriv->plat_data;
139 int rc = 0; 200 int rc = 0;
201 u32 port_fbs;
202 void *port_mmio = ahci_port_base(ap);
203
204 /*
205 * Write the pmp value to PxFBS.DEV
206 * for case of Port Mulitplier.
207 */
208 if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
209 port_fbs = readl(port_mmio + PORT_FBS);
210 port_fbs &= ~PORT_FBS_DEV_MASK;
211 port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
212 writel(port_fbs, port_mmio + PORT_FBS);
213 }
140 214
141 if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) || 215 if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
142 (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET))) 216 (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
217 (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
143 xgene_ahci_restart_engine(ap); 218 xgene_ahci_restart_engine(ap);
144 219
145 rc = ahci_qc_issue(qc); 220 rc = ahci_qc_issue(qc);
@@ -365,16 +440,119 @@ static void xgene_ahci_host_stop(struct ata_host *host)
365 ahci_platform_disable_resources(hpriv); 440 ahci_platform_disable_resources(hpriv);
366} 441}
367 442
443/**
444 * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
445 * to Port Multiplier.
446 * @link: link to reset
447 * @class: Return value to indicate class of device
448 * @deadline: deadline jiffies for the operation
449 *
450 * Due to H/W errata, the controller is unable to save the PMP
451 * field fetched from command header before sending the H2D FIS.
452 * When the device returns the PMP port field in the D2H FIS, there is
453 * a mismatch and results in command completion failure. The workaround
454 * is to write the pmp value to PxFBS.DEV field before issuing any command
455 * to PMP.
456 */
457static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
458 unsigned long deadline)
459{
460 int pmp = sata_srst_pmp(link);
461 struct ata_port *ap = link->ap;
462 u32 rc;
463 void *port_mmio = ahci_port_base(ap);
464 u32 port_fbs;
465
466 /*
467 * Set PxFBS.DEV field with pmp
468 * value.
469 */
470 port_fbs = readl(port_mmio + PORT_FBS);
471 port_fbs &= ~PORT_FBS_DEV_MASK;
472 port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
473 writel(port_fbs, port_mmio + PORT_FBS);
474
475 rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
476
477 return rc;
478}
479
480/**
481 * xgene_ahci_softreset - Issue the softreset to the drive.
482 * @link: link to reset
483 * @class: Return value to indicate class of device
484 * @deadline: deadline jiffies for the operation
485 *
486 * Due to H/W errata, the controller is unable to save the PMP
487 * field fetched from command header before sending the H2D FIS.
488 * When the device returns the PMP port field in the D2H FIS, there is
489 * a mismatch and results in command completion failure. The workaround
490 * is to write the pmp value to PxFBS.DEV field before issuing any command
491 * to PMP. Here is the algorithm to detect PMP :
492 *
493 * 1. Save the PxFBS value
494 * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
495 * 0xF for both PMP/NON-PMP initially
496 * 3. Issue softreset
497 * 4. If signature class is PMP goto 6
498 * 5. restore the original PxFBS and goto 3
499 * 6. return
500 */
501static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
502 unsigned long deadline)
503{
504 int pmp = sata_srst_pmp(link);
505 struct ata_port *ap = link->ap;
506 struct ahci_host_priv *hpriv = ap->host->private_data;
507 struct xgene_ahci_context *ctx = hpriv->plat_data;
508 void *port_mmio = ahci_port_base(ap);
509 u32 port_fbs;
510 u32 port_fbs_save;
511 u32 retry = 1;
512 u32 rc;
513
514 port_fbs_save = readl(port_mmio + PORT_FBS);
515
516 /*
517 * Set PxFBS.DEV field with pmp
518 * value.
519 */
520 port_fbs = readl(port_mmio + PORT_FBS);
521 port_fbs &= ~PORT_FBS_DEV_MASK;
522 port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
523 writel(port_fbs, port_mmio + PORT_FBS);
524
525softreset_retry:
526 rc = ahci_do_softreset(link, class, pmp,
527 deadline, ahci_check_ready);
528
529 ctx->class[ap->port_no] = *class;
530 if (*class != ATA_DEV_PMP) {
531 /*
532 * Retry for normal drives without
533 * setting PxFBS.DEV field with pmp value.
534 */
535 if (retry--) {
536 writel(port_fbs_save, port_mmio + PORT_FBS);
537 goto softreset_retry;
538 }
539 }
540
541 return rc;
542}
543
368static struct ata_port_operations xgene_ahci_ops = { 544static struct ata_port_operations xgene_ahci_ops = {
369 .inherits = &ahci_ops, 545 .inherits = &ahci_ops,
370 .host_stop = xgene_ahci_host_stop, 546 .host_stop = xgene_ahci_host_stop,
371 .hardreset = xgene_ahci_hardreset, 547 .hardreset = xgene_ahci_hardreset,
372 .read_id = xgene_ahci_read_id, 548 .read_id = xgene_ahci_read_id,
373 .qc_issue = xgene_ahci_qc_issue, 549 .qc_issue = xgene_ahci_qc_issue,
550 .softreset = xgene_ahci_softreset,
551 .pmp_softreset = xgene_ahci_pmp_softreset
374}; 552};
375 553
376static const struct ata_port_info xgene_ahci_port_info = { 554static const struct ata_port_info xgene_ahci_port_info = {
377 .flags = AHCI_FLAG_COMMON, 555 .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
378 .pio_mask = ATA_PIO4, 556 .pio_mask = ATA_PIO4,
379 .udma_mask = ATA_UDMA6, 557 .udma_mask = ATA_UDMA6,
380 .port_ops = &xgene_ahci_ops, 558 .port_ops = &xgene_ahci_ops,
@@ -446,6 +624,10 @@ static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
446 return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0; 624 return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
447} 625}
448 626
627static struct scsi_host_template ahci_platform_sht = {
628 AHCI_SHT(DRV_NAME),
629};
630
449static int xgene_ahci_probe(struct platform_device *pdev) 631static int xgene_ahci_probe(struct platform_device *pdev)
450{ 632{
451 struct device *dev = &pdev->dev; 633 struct device *dev = &pdev->dev;
@@ -523,7 +705,8 @@ static int xgene_ahci_probe(struct platform_device *pdev)
523skip_clk_phy: 705skip_clk_phy:
524 hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ; 706 hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ;
525 707
526 rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info); 708 rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info,
709 &ahci_platform_sht);
527 if (rc) 710 if (rc)
528 goto disable_resources; 711 goto disable_resources;
529 712
@@ -545,7 +728,7 @@ static struct platform_driver xgene_ahci_driver = {
545 .probe = xgene_ahci_probe, 728 .probe = xgene_ahci_probe,
546 .remove = ata_platform_remove_one, 729 .remove = ata_platform_remove_one,
547 .driver = { 730 .driver = {
548 .name = "xgene-ahci", 731 .name = DRV_NAME,
549 .of_match_table = xgene_ahci_of_match, 732 .of_match_table = xgene_ahci_of_match,
550 }, 733 },
551}; 734};
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 0b03f9056692..d89305d289f6 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -24,6 +24,7 @@
24#include <linux/ahci_platform.h> 24#include <linux/ahci_platform.h>
25#include <linux/phy/phy.h> 25#include <linux/phy/phy.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/of_platform.h>
27#include "ahci.h" 28#include "ahci.h"
28 29
29static void ahci_host_stop(struct ata_host *host); 30static void ahci_host_stop(struct ata_host *host);
@@ -34,10 +35,6 @@ struct ata_port_operations ahci_platform_ops = {
34}; 35};
35EXPORT_SYMBOL_GPL(ahci_platform_ops); 36EXPORT_SYMBOL_GPL(ahci_platform_ops);
36 37
37static struct scsi_host_template ahci_platform_sht = {
38 AHCI_SHT("ahci_platform"),
39};
40
41/** 38/**
42 * ahci_platform_enable_phys - Enable PHYs 39 * ahci_platform_enable_phys - Enable PHYs
43 * @hpriv: host private area to store config values 40 * @hpriv: host private area to store config values
@@ -54,9 +51,6 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
54 int rc, i; 51 int rc, i;
55 52
56 for (i = 0; i < hpriv->nports; i++) { 53 for (i = 0; i < hpriv->nports; i++) {
57 if (!hpriv->phys[i])
58 continue;
59
60 rc = phy_init(hpriv->phys[i]); 54 rc = phy_init(hpriv->phys[i]);
61 if (rc) 55 if (rc)
62 goto disable_phys; 56 goto disable_phys;
@@ -89,9 +83,6 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
89 int i; 83 int i;
90 84
91 for (i = 0; i < hpriv->nports; i++) { 85 for (i = 0; i < hpriv->nports; i++) {
92 if (!hpriv->phys[i])
93 continue;
94
95 phy_power_off(hpriv->phys[i]); 86 phy_power_off(hpriv->phys[i]);
96 phy_exit(hpriv->phys[i]); 87 phy_exit(hpriv->phys[i]);
97 } 88 }
@@ -144,6 +135,59 @@ void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
144EXPORT_SYMBOL_GPL(ahci_platform_disable_clks); 135EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
145 136
146/** 137/**
138 * ahci_platform_enable_regulators - Enable regulators
139 * @hpriv: host private area to store config values
140 *
141 * This function enables all the regulators found in
142 * hpriv->target_pwrs, if any. If a regulator fails to be enabled, it
143 * disables all the regulators already enabled in reverse order and
144 * returns an error.
145 *
146 * RETURNS:
147 * 0 on success otherwise a negative error code
148 */
149int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
150{
151 int rc, i;
152
153 for (i = 0; i < hpriv->nports; i++) {
154 if (!hpriv->target_pwrs[i])
155 continue;
156
157 rc = regulator_enable(hpriv->target_pwrs[i]);
158 if (rc)
159 goto disable_target_pwrs;
160 }
161
162 return 0;
163
164disable_target_pwrs:
165 while (--i >= 0)
166 if (hpriv->target_pwrs[i])
167 regulator_disable(hpriv->target_pwrs[i]);
168
169 return rc;
170}
171EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
172
173/**
174 * ahci_platform_disable_regulators - Disable regulators
175 * @hpriv: host private area to store config values
176 *
177 * This function disables all regulators found in hpriv->target_pwrs.
178 */
179void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
180{
181 int i;
182
183 for (i = 0; i < hpriv->nports; i++) {
184 if (!hpriv->target_pwrs[i])
185 continue;
186 regulator_disable(hpriv->target_pwrs[i]);
187 }
188}
189EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
190/**
147 * ahci_platform_enable_resources - Enable platform resources 191 * ahci_platform_enable_resources - Enable platform resources
148 * @hpriv: host private area to store config values 192 * @hpriv: host private area to store config values
149 * 193 *
@@ -163,11 +207,9 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
163{ 207{
164 int rc; 208 int rc;
165 209
166 if (hpriv->target_pwr) { 210 rc = ahci_platform_enable_regulators(hpriv);
167 rc = regulator_enable(hpriv->target_pwr); 211 if (rc)
168 if (rc) 212 return rc;
169 return rc;
170 }
171 213
172 rc = ahci_platform_enable_clks(hpriv); 214 rc = ahci_platform_enable_clks(hpriv);
173 if (rc) 215 if (rc)
@@ -183,8 +225,8 @@ disable_clks:
183 ahci_platform_disable_clks(hpriv); 225 ahci_platform_disable_clks(hpriv);
184 226
185disable_regulator: 227disable_regulator:
186 if (hpriv->target_pwr) 228 ahci_platform_disable_regulators(hpriv);
187 regulator_disable(hpriv->target_pwr); 229
188 return rc; 230 return rc;
189} 231}
190EXPORT_SYMBOL_GPL(ahci_platform_enable_resources); 232EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
@@ -205,8 +247,7 @@ void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
205 247
206 ahci_platform_disable_clks(hpriv); 248 ahci_platform_disable_clks(hpriv);
207 249
208 if (hpriv->target_pwr) 250 ahci_platform_disable_regulators(hpriv);
209 regulator_disable(hpriv->target_pwr);
210} 251}
211EXPORT_SYMBOL_GPL(ahci_platform_disable_resources); 252EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
212 253
@@ -222,6 +263,69 @@ static void ahci_platform_put_resources(struct device *dev, void *res)
222 263
223 for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) 264 for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
224 clk_put(hpriv->clks[c]); 265 clk_put(hpriv->clks[c]);
266 /*
267 * The regulators are tied to child node device and not to the
268 * SATA device itself. So we can't use devm for automatically
269 * releasing them. We have to do it manually here.
270 */
271 for (c = 0; c < hpriv->nports; c++)
272 if (hpriv->target_pwrs && hpriv->target_pwrs[c])
273 regulator_put(hpriv->target_pwrs[c]);
274
275 kfree(hpriv->target_pwrs);
276}
277
278static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
279 struct device *dev, struct device_node *node)
280{
281 int rc;
282
283 hpriv->phys[port] = devm_of_phy_get(dev, node, NULL);
284
285 if (!IS_ERR(hpriv->phys[port]))
286 return 0;
287
288 rc = PTR_ERR(hpriv->phys[port]);
289 switch (rc) {
290 case -ENOSYS:
291 /* No PHY support. Check if PHY is required. */
292 if (of_find_property(node, "phys", NULL)) {
293 dev_err(dev,
294 "couldn't get PHY in node %s: ENOSYS\n",
295 node->name);
296 break;
297 }
298 case -ENODEV:
299 /* continue normally */
300 hpriv->phys[port] = NULL;
301 rc = 0;
302 break;
303
304 default:
305 dev_err(dev,
306 "couldn't get PHY in node %s: %d\n",
307 node->name, rc);
308
309 break;
310 }
311
312 return rc;
313}
314
315static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
316 struct device *dev)
317{
318 struct regulator *target_pwr;
319 int rc = 0;
320
321 target_pwr = regulator_get_optional(dev, "target");
322
323 if (!IS_ERR(target_pwr))
324 hpriv->target_pwrs[port] = target_pwr;
325 else
326 rc = PTR_ERR(target_pwr);
327
328 return rc;
225} 329}
226 330
227/** 331/**
@@ -246,7 +350,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
246 struct ahci_host_priv *hpriv; 350 struct ahci_host_priv *hpriv;
247 struct clk *clk; 351 struct clk *clk;
248 struct device_node *child; 352 struct device_node *child;
249 int i, enabled_ports = 0, rc = -ENOMEM; 353 int i, sz, enabled_ports = 0, rc = -ENOMEM, child_nodes;
250 u32 mask_port_map = 0; 354 u32 mask_port_map = 0;
251 355
252 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 356 if (!devres_open_group(dev, NULL, GFP_KERNEL))
@@ -267,14 +371,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
267 goto err_out; 371 goto err_out;
268 } 372 }
269 373
270 hpriv->target_pwr = devm_regulator_get_optional(dev, "target");
271 if (IS_ERR(hpriv->target_pwr)) {
272 rc = PTR_ERR(hpriv->target_pwr);
273 if (rc == -EPROBE_DEFER)
274 goto err_out;
275 hpriv->target_pwr = NULL;
276 }
277
278 for (i = 0; i < AHCI_MAX_CLKS; i++) { 374 for (i = 0; i < AHCI_MAX_CLKS; i++) {
279 /* 375 /*
280 * For now we must use clk_get(dev, NULL) for the first clock, 376 * For now we must use clk_get(dev, NULL) for the first clock,
@@ -296,19 +392,33 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
296 hpriv->clks[i] = clk; 392 hpriv->clks[i] = clk;
297 } 393 }
298 394
299 hpriv->nports = of_get_child_count(dev->of_node); 395 hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
300 396
301 if (hpriv->nports) { 397 /*
302 hpriv->phys = devm_kzalloc(dev, 398 * If no sub-node was found, we still need to set nports to
303 hpriv->nports * sizeof(*hpriv->phys), 399 * one in order to be able to use the
304 GFP_KERNEL); 400 * ahci_platform_[en|dis]able_[phys|regulators] functions.
305 if (!hpriv->phys) { 401 */
306 rc = -ENOMEM; 402 if (!child_nodes)
307 goto err_out; 403 hpriv->nports = 1;
308 }
309 404
405 sz = hpriv->nports * sizeof(*hpriv->phys);
406 hpriv->phys = devm_kzalloc(dev, sz, GFP_KERNEL);
407 if (!hpriv->phys) {
408 rc = -ENOMEM;
409 goto err_out;
410 }
411 sz = hpriv->nports * sizeof(*hpriv->target_pwrs);
412 hpriv->target_pwrs = kzalloc(sz, GFP_KERNEL);
413 if (!hpriv->target_pwrs) {
414 rc = -ENOMEM;
415 goto err_out;
416 }
417
418 if (child_nodes) {
310 for_each_child_of_node(dev->of_node, child) { 419 for_each_child_of_node(dev->of_node, child) {
311 u32 port; 420 u32 port;
421 struct platform_device *port_dev __maybe_unused;
312 422
313 if (!of_device_is_available(child)) 423 if (!of_device_is_available(child))
314 continue; 424 continue;
@@ -322,17 +432,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
322 dev_warn(dev, "invalid port number %d\n", port); 432 dev_warn(dev, "invalid port number %d\n", port);
323 continue; 433 continue;
324 } 434 }
325
326 mask_port_map |= BIT(port); 435 mask_port_map |= BIT(port);
327 436
328 hpriv->phys[port] = devm_of_phy_get(dev, child, NULL); 437#ifdef CONFIG_OF_ADDRESS
329 if (IS_ERR(hpriv->phys[port])) { 438 of_platform_device_create(child, NULL, NULL);
330 rc = PTR_ERR(hpriv->phys[port]); 439
331 dev_err(dev, 440 port_dev = of_find_device_by_node(child);
332 "couldn't get PHY in node %s: %d\n", 441
333 child->name, rc); 442 if (port_dev) {
334 goto err_out; 443 rc = ahci_platform_get_regulator(hpriv, port,
444 &port_dev->dev);
445 if (rc == -EPROBE_DEFER)
446 goto err_out;
335 } 447 }
448#endif
449
450 rc = ahci_platform_get_phy(hpriv, port, dev, child);
451 if (rc)
452 goto err_out;
336 453
337 enabled_ports++; 454 enabled_ports++;
338 } 455 }
@@ -349,38 +466,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
349 * If no sub-node was found, keep this for device tree 466 * If no sub-node was found, keep this for device tree
350 * compatibility 467 * compatibility
351 */ 468 */
352 struct phy *phy = devm_phy_get(dev, "sata-phy"); 469 rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node);
353 if (!IS_ERR(phy)) { 470 if (rc)
354 hpriv->phys = devm_kzalloc(dev, sizeof(*hpriv->phys), 471 goto err_out;
355 GFP_KERNEL);
356 if (!hpriv->phys) {
357 rc = -ENOMEM;
358 goto err_out;
359 }
360
361 hpriv->phys[0] = phy;
362 hpriv->nports = 1;
363 } else {
364 rc = PTR_ERR(phy);
365 switch (rc) {
366 case -ENOSYS:
367 /* No PHY support. Check if PHY is required. */
368 if (of_find_property(dev->of_node, "phys", NULL)) {
369 dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
370 goto err_out;
371 }
372 case -ENODEV:
373 /* continue normally */
374 hpriv->phys = NULL;
375 break;
376
377 default:
378 goto err_out;
379 472
380 } 473 rc = ahci_platform_get_regulator(hpriv, 0, dev);
381 } 474 if (rc == -EPROBE_DEFER)
475 goto err_out;
382 } 476 }
383
384 pm_runtime_enable(dev); 477 pm_runtime_enable(dev);
385 pm_runtime_get_sync(dev); 478 pm_runtime_get_sync(dev);
386 hpriv->got_runtime_pm = true; 479 hpriv->got_runtime_pm = true;
@@ -399,6 +492,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
399 * @pdev: platform device pointer for the host 492 * @pdev: platform device pointer for the host
400 * @hpriv: ahci-host private data for the host 493 * @hpriv: ahci-host private data for the host
401 * @pi_template: template for the ata_port_info to use 494 * @pi_template: template for the ata_port_info to use
495 * @sht: scsi_host_template to use when registering
402 * 496 *
403 * This function does all the usual steps needed to bring up an 497 * This function does all the usual steps needed to bring up an
404 * ahci-platform host, note any necessary resources (ie clks, phys, etc.) 498 * ahci-platform host, note any necessary resources (ie clks, phys, etc.)
@@ -409,7 +503,8 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
409 */ 503 */
410int ahci_platform_init_host(struct platform_device *pdev, 504int ahci_platform_init_host(struct platform_device *pdev,
411 struct ahci_host_priv *hpriv, 505 struct ahci_host_priv *hpriv,
412 const struct ata_port_info *pi_template) 506 const struct ata_port_info *pi_template,
507 struct scsi_host_template *sht)
413{ 508{
414 struct device *dev = &pdev->dev; 509 struct device *dev = &pdev->dev;
415 struct ata_port_info pi = *pi_template; 510 struct ata_port_info pi = *pi_template;
@@ -493,7 +588,7 @@ int ahci_platform_init_host(struct platform_device *pdev,
493 ahci_init_controller(host); 588 ahci_init_controller(host);
494 ahci_print_info(host, "platform"); 589 ahci_print_info(host, "platform");
495 590
496 return ahci_host_activate(host, irq, &ahci_platform_sht); 591 return ahci_host_activate(host, irq, sht);
497} 592}
498EXPORT_SYMBOL_GPL(ahci_platform_init_host); 593EXPORT_SYMBOL_GPL(ahci_platform_init_host);
499 594
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d1a05f9bb91f..4b0d5e71858e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1752,33 +1752,6 @@ unsigned ata_exec_internal(struct ata_device *dev,
1752} 1752}
1753 1753
1754/** 1754/**
1755 * ata_do_simple_cmd - execute simple internal command
1756 * @dev: Device to which the command is sent
1757 * @cmd: Opcode to execute
1758 *
1759 * Execute a 'simple' command, that only consists of the opcode
1760 * 'cmd' itself, without filling any other registers
1761 *
1762 * LOCKING:
1763 * Kernel thread context (may sleep).
1764 *
1765 * RETURNS:
1766 * Zero on success, AC_ERR_* mask on failure
1767 */
1768unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1769{
1770 struct ata_taskfile tf;
1771
1772 ata_tf_init(dev, &tf);
1773
1774 tf.command = cmd;
1775 tf.flags |= ATA_TFLAG_DEVICE;
1776 tf.protocol = ATA_PROT_NODATA;
1777
1778 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1779}
1780
1781/**
1782 * ata_pio_need_iordy - check if iordy needed 1755 * ata_pio_need_iordy - check if iordy needed
1783 * @adev: ATA device 1756 * @adev: ATA device
1784 * 1757 *
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 8d00c2638bed..a9f5aed32d39 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1635,7 +1635,6 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
1635 1635
1636 DPRINTK("ATAPI request sense\n"); 1636 DPRINTK("ATAPI request sense\n");
1637 1637
1638 /* FIXME: is this needed? */
1639 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1638 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1640 1639
1641 /* initialize sense_buf with the error register, 1640 /* initialize sense_buf with the error register,
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 6abd17a85b13..280729325ebd 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1995,8 +1995,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1995 1995
1996 VPRINTK("ENTER\n"); 1996 VPRINTK("ENTER\n");
1997 1997
1998 /* set scsi removeable (RMB) bit per ata bit */ 1998 /* set scsi removable (RMB) bit per ata bit */
1999 if (ata_id_removeable(args->id)) 1999 if (ata_id_removable(args->id))
2000 hdr[1] |= (1 << 7); 2000 hdr[1] |= (1 << 7);
2001 2001
2002 if (args->dev->class == ATA_DEV_ZAC) { 2002 if (args->dev->class == ATA_DEV_ZAC) {
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 5f4e0cca56ec..82ebe263d2f1 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -76,7 +76,6 @@ extern unsigned ata_exec_internal_sg(struct ata_device *dev,
76 struct ata_taskfile *tf, const u8 *cdb, 76 struct ata_taskfile *tf, const u8 *cdb,
77 int dma_dir, struct scatterlist *sg, 77 int dma_dir, struct scatterlist *sg,
78 unsigned int n_elem, unsigned long timeout); 78 unsigned int n_elem, unsigned long timeout);
79extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
80extern int ata_wait_ready(struct ata_link *link, unsigned long deadline, 79extern int ata_wait_ready(struct ata_link *link, unsigned long deadline,
81 int (*check_ready)(struct ata_link *link)); 80 int (*check_ready)(struct ata_link *link));
82extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 81extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 48ae4b434474..f9ca72e937ee 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -276,10 +276,8 @@ static int cs5530_init_chip(void)
276 pci_dev_put(cs5530_0); 276 pci_dev_put(cs5530_0);
277 return 0; 277 return 0;
278fail_put: 278fail_put:
279 if (master_0) 279 pci_dev_put(master_0);
280 pci_dev_put(master_0); 280 pci_dev_put(cs5530_0);
281 if (cs5530_0)
282 pci_dev_put(cs5530_0);
283 return -ENODEV; 281 return -ENODEV;
284} 282}
285 283
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index dcc408abe171..b6b7af894d9d 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -16,6 +16,12 @@
16#include <linux/ata_platform.h> 16#include <linux/ata_platform.h>
17#include <linux/libata.h> 17#include <linux/libata.h>
18 18
19#define DRV_NAME "pata_of_platform"
20
21static struct scsi_host_template pata_platform_sht = {
22 ATA_PIO_SHT(DRV_NAME),
23};
24
19static int pata_of_platform_probe(struct platform_device *ofdev) 25static int pata_of_platform_probe(struct platform_device *ofdev)
20{ 26{
21 int ret; 27 int ret;
@@ -63,7 +69,7 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
63 pio_mask |= (1 << pio_mode) - 1; 69 pio_mask |= (1 << pio_mode) - 1;
64 70
65 return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res, 71 return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res,
66 reg_shift, pio_mask); 72 reg_shift, pio_mask, &pata_platform_sht);
67} 73}
68 74
69static struct of_device_id pata_of_platform_match[] = { 75static struct of_device_id pata_of_platform_match[] = {
@@ -74,7 +80,7 @@ MODULE_DEVICE_TABLE(of, pata_of_platform_match);
74 80
75static struct platform_driver pata_of_platform_driver = { 81static struct platform_driver pata_of_platform_driver = {
76 .driver = { 82 .driver = {
77 .name = "pata_of_platform", 83 .name = DRV_NAME,
78 .of_match_table = pata_of_platform_match, 84 .of_match_table = pata_of_platform_match,
79 }, 85 },
80 .probe = pata_of_platform_probe, 86 .probe = pata_of_platform_probe,
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 4d06a5cda987..dca8251b1aea 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -28,6 +28,7 @@
28#include <linux/blkdev.h> 28#include <linux/blkdev.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/ktime.h>
31#include <scsi/scsi.h> 32#include <scsi/scsi.h>
32#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
33#include <scsi/scsi_cmnd.h> 34#include <scsi/scsi_cmnd.h>
@@ -605,7 +606,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
605 void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR]; 606 void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
606 u32 scr; 607 u32 scr;
607 long start_count, end_count; 608 long start_count, end_count;
608 struct timeval start_time, end_time; 609 ktime_t start_time, end_time;
609 long pll_clock, usec_elapsed; 610 long pll_clock, usec_elapsed;
610 611
611 /* Start the test mode */ 612 /* Start the test mode */
@@ -616,14 +617,14 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
616 617
617 /* Read current counter value */ 618 /* Read current counter value */
618 start_count = pdc_read_counter(host); 619 start_count = pdc_read_counter(host);
619 do_gettimeofday(&start_time); 620 start_time = ktime_get();
620 621
621 /* Let the counter run for 100 ms. */ 622 /* Let the counter run for 100 ms. */
622 mdelay(100); 623 mdelay(100);
623 624
624 /* Read the counter values again */ 625 /* Read the counter values again */
625 end_count = pdc_read_counter(host); 626 end_count = pdc_read_counter(host);
626 do_gettimeofday(&end_time); 627 end_time = ktime_get();
627 628
628 /* Stop the test mode */ 629 /* Stop the test mode */
629 scr = ioread32(mmio_base + PDC_SYS_CTL); 630 scr = ioread32(mmio_base + PDC_SYS_CTL);
@@ -632,8 +633,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
632 ioread32(mmio_base + PDC_SYS_CTL); /* flush */ 633 ioread32(mmio_base + PDC_SYS_CTL); /* flush */
633 634
634 /* calculate the input clock in Hz */ 635 /* calculate the input clock in Hz */
635 usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 + 636 usec_elapsed = (long) ktime_us_delta(end_time, start_time);
636 (end_time.tv_usec - start_time.tv_usec);
637 637
638 pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 * 638 pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 *
639 (100000000 / usec_elapsed); 639 (100000000 / usec_elapsed);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 1eedfe46d7c8..c503ded87bb8 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -78,6 +78,7 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
78 * @irq_res: Resource representing IRQ and its flags 78 * @irq_res: Resource representing IRQ and its flags
79 * @ioport_shift: I/O port shift 79 * @ioport_shift: I/O port shift
80 * @__pio_mask: PIO mask 80 * @__pio_mask: PIO mask
81 * @sht: scsi_host_template to use when registering
81 * 82 *
82 * Register a platform bus IDE interface. Such interfaces are PIO and we 83 * Register a platform bus IDE interface. Such interfaces are PIO and we
83 * assume do not support IRQ sharing. 84 * assume do not support IRQ sharing.
@@ -99,7 +100,8 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr,
99 */ 100 */
100int __pata_platform_probe(struct device *dev, struct resource *io_res, 101int __pata_platform_probe(struct device *dev, struct resource *io_res,
101 struct resource *ctl_res, struct resource *irq_res, 102 struct resource *ctl_res, struct resource *irq_res,
102 unsigned int ioport_shift, int __pio_mask) 103 unsigned int ioport_shift, int __pio_mask,
104 struct scsi_host_template *sht)
103{ 105{
104 struct ata_host *host; 106 struct ata_host *host;
105 struct ata_port *ap; 107 struct ata_port *ap;
@@ -170,7 +172,7 @@ int __pata_platform_probe(struct device *dev, struct resource *io_res,
170 172
171 /* activate */ 173 /* activate */
172 return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL, 174 return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
173 irq_flags, &pata_platform_sht); 175 irq_flags, sht);
174} 176}
175EXPORT_SYMBOL_GPL(__pata_platform_probe); 177EXPORT_SYMBOL_GPL(__pata_platform_probe);
176 178
@@ -216,7 +218,7 @@ static int pata_platform_probe(struct platform_device *pdev)
216 218
217 return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res, 219 return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res,
218 pp_info ? pp_info->ioport_shift : 0, 220 pp_info ? pp_info->ioport_shift : 0,
219 pio_mask); 221 pio_mask, &pata_platform_sht);
220} 222}
221 223
222static struct platform_driver pata_platform_driver = { 224static struct platform_driver pata_platform_driver = {
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 8e8248179d20..fdb0f2879ea7 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -48,6 +48,18 @@
48#define DRV_NAME "sata-dwc" 48#define DRV_NAME "sata-dwc"
49#define DRV_VERSION "1.3" 49#define DRV_VERSION "1.3"
50 50
51#ifndef out_le32
52#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (void __iomem *)(a))
53#endif
54
55#ifndef in_le32
56#define in_le32(a) __le32_to_cpu(__raw_readl((void __iomem *)(a)))
57#endif
58
59#ifndef NO_IRQ
60#define NO_IRQ 0
61#endif
62
51/* SATA DMA driver Globals */ 63/* SATA DMA driver Globals */
52#define DMA_NUM_CHANS 1 64#define DMA_NUM_CHANS 1
53#define DMA_NUM_CHAN_REGS 8 65#define DMA_NUM_CHAN_REGS 8
@@ -273,7 +285,7 @@ struct sata_dwc_device {
273 struct device *dev; /* generic device struct */ 285 struct device *dev; /* generic device struct */
274 struct ata_probe_ent *pe; /* ptr to probe-ent */ 286 struct ata_probe_ent *pe; /* ptr to probe-ent */
275 struct ata_host *host; 287 struct ata_host *host;
276 u8 *reg_base; 288 u8 __iomem *reg_base;
277 struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ 289 struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
278 int irq_dma; 290 int irq_dma;
279}; 291};
@@ -323,7 +335,9 @@ struct sata_dwc_host_priv {
323 struct device *dwc_dev; 335 struct device *dwc_dev;
324 int dma_channel; 336 int dma_channel;
325}; 337};
326struct sata_dwc_host_priv host_pvt; 338
339static struct sata_dwc_host_priv host_pvt;
340
327/* 341/*
328 * Prototypes 342 * Prototypes
329 */ 343 */
@@ -580,9 +594,9 @@ static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
580 594
581 sms_val = 0; 595 sms_val = 0;
582 dms_val = 1 + host_pvt.dma_channel; 596 dms_val = 1 + host_pvt.dma_channel;
583 dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x" 597 dev_dbg(host_pvt.dwc_dev,
584 " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli, 598 "%s: sg=%p nelem=%d lli=%p dma_lli=0x%pad dmadr=0x%p\n",
585 (u32)dmadr_addr); 599 __func__, sg, num_elems, lli, &dma_lli, dmadr_addr);
586 600
587 bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); 601 bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
588 602
@@ -773,7 +787,7 @@ static void dma_dwc_exit(struct sata_dwc_device *hsdev)
773{ 787{
774 dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__); 788 dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
775 if (host_pvt.sata_dma_regs) { 789 if (host_pvt.sata_dma_regs) {
776 iounmap(host_pvt.sata_dma_regs); 790 iounmap((void __iomem *)host_pvt.sata_dma_regs);
777 host_pvt.sata_dma_regs = NULL; 791 host_pvt.sata_dma_regs = NULL;
778 } 792 }
779 793
@@ -818,7 +832,7 @@ static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
818 return -EINVAL; 832 return -EINVAL;
819 } 833 }
820 834
821 *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4)); 835 *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4));
822 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", 836 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
823 __func__, link->ap->print_id, scr, *val); 837 __func__, link->ap->print_id, scr, *val);
824 838
@@ -834,21 +848,19 @@ static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
834 __func__, scr); 848 __func__, scr);
835 return -EINVAL; 849 return -EINVAL;
836 } 850 }
837 out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val); 851 out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val);
838 852
839 return 0; 853 return 0;
840} 854}
841 855
842static u32 core_scr_read(unsigned int scr) 856static u32 core_scr_read(unsigned int scr)
843{ 857{
844 return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\ 858 return in_le32(host_pvt.scr_addr_sstatus + (scr * 4));
845 (scr * 4));
846} 859}
847 860
848static void core_scr_write(unsigned int scr, u32 val) 861static void core_scr_write(unsigned int scr, u32 val)
849{ 862{
850 out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4), 863 out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val);
851 val);
852} 864}
853 865
854static void clear_serror(void) 866static void clear_serror(void)
@@ -856,7 +868,6 @@ static void clear_serror(void)
856 u32 val; 868 u32 val;
857 val = core_scr_read(SCR_ERROR); 869 val = core_scr_read(SCR_ERROR);
858 core_scr_write(SCR_ERROR, val); 870 core_scr_write(SCR_ERROR, val);
859
860} 871}
861 872
862static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) 873static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
@@ -1256,24 +1267,24 @@ static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
1256 1267
1257static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) 1268static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
1258{ 1269{
1259 port->cmd_addr = (void *)base + 0x00; 1270 port->cmd_addr = (void __iomem *)base + 0x00;
1260 port->data_addr = (void *)base + 0x00; 1271 port->data_addr = (void __iomem *)base + 0x00;
1261 1272
1262 port->error_addr = (void *)base + 0x04; 1273 port->error_addr = (void __iomem *)base + 0x04;
1263 port->feature_addr = (void *)base + 0x04; 1274 port->feature_addr = (void __iomem *)base + 0x04;
1264 1275
1265 port->nsect_addr = (void *)base + 0x08; 1276 port->nsect_addr = (void __iomem *)base + 0x08;
1266 1277
1267 port->lbal_addr = (void *)base + 0x0c; 1278 port->lbal_addr = (void __iomem *)base + 0x0c;
1268 port->lbam_addr = (void *)base + 0x10; 1279 port->lbam_addr = (void __iomem *)base + 0x10;
1269 port->lbah_addr = (void *)base + 0x14; 1280 port->lbah_addr = (void __iomem *)base + 0x14;
1270 1281
1271 port->device_addr = (void *)base + 0x18; 1282 port->device_addr = (void __iomem *)base + 0x18;
1272 port->command_addr = (void *)base + 0x1c; 1283 port->command_addr = (void __iomem *)base + 0x1c;
1273 port->status_addr = (void *)base + 0x1c; 1284 port->status_addr = (void __iomem *)base + 0x1c;
1274 1285
1275 port->altstatus_addr = (void *)base + 0x20; 1286 port->altstatus_addr = (void __iomem *)base + 0x20;
1276 port->ctl_addr = (void *)base + 0x20; 1287 port->ctl_addr = (void __iomem *)base + 0x20;
1277} 1288}
1278 1289
1279/* 1290/*
@@ -1314,7 +1325,7 @@ static int sata_dwc_port_start(struct ata_port *ap)
1314 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) 1325 for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
1315 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; 1326 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
1316 1327
1317 ap->bmdma_prd = 0; /* set these so libata doesn't use them */ 1328 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */
1318 ap->bmdma_prd_dma = 0; 1329 ap->bmdma_prd_dma = 0;
1319 1330
1320 /* 1331 /*
@@ -1511,8 +1522,8 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1511 1522
1512 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], 1523 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
1513 hsdevp->llit_dma[tag], 1524 hsdevp->llit_dma[tag],
1514 (void *__iomem)(&hsdev->sata_dwc_regs->\ 1525 (void __iomem *)&hsdev->sata_dwc_regs->dmadr,
1515 dmadr), qc->dma_dir); 1526 qc->dma_dir);
1516 if (dma_chan < 0) { 1527 if (dma_chan < 0) {
1517 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", 1528 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
1518 __func__, dma_chan); 1529 __func__, dma_chan);
@@ -1585,8 +1596,8 @@ static void sata_dwc_error_handler(struct ata_port *ap)
1585 ata_sff_error_handler(ap); 1596 ata_sff_error_handler(ap);
1586} 1597}
1587 1598
1588int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, 1599static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
1589 unsigned long deadline) 1600 unsigned long deadline)
1590{ 1601{
1591 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); 1602 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
1592 int ret; 1603 int ret;
@@ -1618,7 +1629,7 @@ static struct scsi_host_template sata_dwc_sht = {
1618 * max of 1. This will get fixed in in a future release. 1629 * max of 1. This will get fixed in in a future release.
1619 */ 1630 */
1620 .sg_tablesize = LIBATA_MAX_PRD, 1631 .sg_tablesize = LIBATA_MAX_PRD,
1621 .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */ 1632 /* .can_queue = ATA_MAX_QUEUE, */
1622 .dma_boundary = ATA_DMA_BOUNDARY, 1633 .dma_boundary = ATA_DMA_BOUNDARY,
1623}; 1634};
1624 1635
@@ -1655,7 +1666,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1655 struct sata_dwc_device *hsdev; 1666 struct sata_dwc_device *hsdev;
1656 u32 idr, versionr; 1667 u32 idr, versionr;
1657 char *ver = (char *)&versionr; 1668 char *ver = (char *)&versionr;
1658 u8 *base = NULL; 1669 u8 __iomem *base;
1659 int err = 0; 1670 int err = 0;
1660 int irq; 1671 int irq;
1661 struct ata_host *host; 1672 struct ata_host *host;
@@ -1665,12 +1676,12 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1665 u32 dma_chan; 1676 u32 dma_chan;
1666 1677
1667 /* Allocate DWC SATA device */ 1678 /* Allocate DWC SATA device */
1668 hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL); 1679 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
1669 if (hsdev == NULL) { 1680 hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL);
1670 dev_err(&ofdev->dev, "kmalloc failed for hsdev\n"); 1681 if (!host || !hsdev)
1671 err = -ENOMEM; 1682 return -ENOMEM;
1672 goto error; 1683
1673 } 1684 host->private_data = hsdev;
1674 1685
1675 if (of_property_read_u32(np, "dma-channel", &dma_chan)) { 1686 if (of_property_read_u32(np, "dma-channel", &dma_chan)) {
1676 dev_warn(&ofdev->dev, "no dma-channel property set." 1687 dev_warn(&ofdev->dev, "no dma-channel property set."
@@ -1680,12 +1691,11 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1680 host_pvt.dma_channel = dma_chan; 1691 host_pvt.dma_channel = dma_chan;
1681 1692
1682 /* Ioremap SATA registers */ 1693 /* Ioremap SATA registers */
1683 base = of_iomap(ofdev->dev.of_node, 0); 1694 base = of_iomap(np, 0);
1684 if (!base) { 1695 if (!base) {
1685 dev_err(&ofdev->dev, "ioremap failed for SATA register" 1696 dev_err(&ofdev->dev, "ioremap failed for SATA register"
1686 " address\n"); 1697 " address\n");
1687 err = -ENODEV; 1698 return -ENODEV;
1688 goto error_kmalloc;
1689 } 1699 }
1690 hsdev->reg_base = base; 1700 hsdev->reg_base = base;
1691 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); 1701 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
@@ -1693,16 +1703,6 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1693 /* Synopsys DWC SATA specific Registers */ 1703 /* Synopsys DWC SATA specific Registers */
1694 hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); 1704 hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
1695 1705
1696 /* Allocate and fill host */
1697 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
1698 if (!host) {
1699 dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
1700 err = -ENOMEM;
1701 goto error_iomap;
1702 }
1703
1704 host->private_data = hsdev;
1705
1706 /* Setup port */ 1706 /* Setup port */
1707 host->ports[0]->ioaddr.cmd_addr = base; 1707 host->ports[0]->ioaddr.cmd_addr = base;
1708 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; 1708 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
@@ -1716,7 +1716,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1716 idr, ver[0], ver[1], ver[2]); 1716 idr, ver[0], ver[1], ver[2]);
1717 1717
1718 /* Get SATA DMA interrupt number */ 1718 /* Get SATA DMA interrupt number */
1719 irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); 1719 irq = irq_of_parse_and_map(np, 1);
1720 if (irq == NO_IRQ) { 1720 if (irq == NO_IRQ) {
1721 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1721 dev_err(&ofdev->dev, "no SATA DMA irq\n");
1722 err = -ENODEV; 1722 err = -ENODEV;
@@ -1724,7 +1724,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1724 } 1724 }
1725 1725
1726 /* Get physical SATA DMA register base address */ 1726 /* Get physical SATA DMA register base address */
1727 host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1); 1727 host_pvt.sata_dma_regs = (void *)of_iomap(np, 1);
1728 if (!(host_pvt.sata_dma_regs)) { 1728 if (!(host_pvt.sata_dma_regs)) {
1729 dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" 1729 dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
1730 " address\n"); 1730 " address\n");
@@ -1744,7 +1744,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1744 sata_dwc_enable_interrupts(hsdev); 1744 sata_dwc_enable_interrupts(hsdev);
1745 1745
1746 /* Get SATA interrupt number */ 1746 /* Get SATA interrupt number */
1747 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); 1747 irq = irq_of_parse_and_map(np, 0);
1748 if (irq == NO_IRQ) { 1748 if (irq == NO_IRQ) {
1749 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1749 dev_err(&ofdev->dev, "no SATA DMA irq\n");
1750 err = -ENODEV; 1750 err = -ENODEV;
@@ -1770,9 +1770,6 @@ error_dma_iomap:
1770 iounmap((void __iomem *)host_pvt.sata_dma_regs); 1770 iounmap((void __iomem *)host_pvt.sata_dma_regs);
1771error_iomap: 1771error_iomap:
1772 iounmap(base); 1772 iounmap(base);
1773error_kmalloc:
1774 kfree(hsdev);
1775error:
1776 return err; 1773 return err;
1777} 1774}
1778 1775
@@ -1783,15 +1780,12 @@ static int sata_dwc_remove(struct platform_device *ofdev)
1783 struct sata_dwc_device *hsdev = host->private_data; 1780 struct sata_dwc_device *hsdev = host->private_data;
1784 1781
1785 ata_host_detach(host); 1782 ata_host_detach(host);
1786 dev_set_drvdata(dev, NULL);
1787 1783
1788 /* Free SATA DMA resources */ 1784 /* Free SATA DMA resources */
1789 dma_dwc_exit(hsdev); 1785 dma_dwc_exit(hsdev);
1790 1786
1791 iounmap((void __iomem *)host_pvt.sata_dma_regs); 1787 iounmap((void __iomem *)host_pvt.sata_dma_regs);
1792 iounmap(hsdev->reg_base); 1788 iounmap(hsdev->reg_base);
1793 kfree(hsdev);
1794 kfree(host);
1795 dev_dbg(&ofdev->dev, "done\n"); 1789 dev_dbg(&ofdev->dev, "done\n");
1796 return 0; 1790 return 0;
1797} 1791}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index f9a0e34eb111..f8c33e3772b8 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4185,8 +4185,7 @@ err:
4185 clk_disable_unprepare(hpriv->port_clks[port]); 4185 clk_disable_unprepare(hpriv->port_clks[port]);
4186 clk_put(hpriv->port_clks[port]); 4186 clk_put(hpriv->port_clks[port]);
4187 } 4187 }
4188 if (hpriv->port_phys[port]) 4188 phy_power_off(hpriv->port_phys[port]);
4189 phy_power_off(hpriv->port_phys[port]);
4190 } 4189 }
4191 4190
4192 return rc; 4191 return rc;
@@ -4216,8 +4215,7 @@ static int mv_platform_remove(struct platform_device *pdev)
4216 clk_disable_unprepare(hpriv->port_clks[port]); 4215 clk_disable_unprepare(hpriv->port_clks[port]);
4217 clk_put(hpriv->port_clks[port]); 4216 clk_put(hpriv->port_clks[port]);
4218 } 4217 }
4219 if (hpriv->port_phys[port]) 4218 phy_power_off(hpriv->port_phys[port]);
4220 phy_power_off(hpriv->port_phys[port]);
4221 } 4219 }
4222 return 0; 4220 return 0;
4223} 4221}
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index cb0d2e644af5..d49a5193b7de 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -2,8 +2,8 @@
2 * Renesas R-Car SATA driver 2 * Renesas R-Car SATA driver
3 * 3 *
4 * Author: Vladimir Barinov <source@cogentembedded.com> 4 * Author: Vladimir Barinov <source@cogentembedded.com>
5 * Copyright (C) 2013 Cogent Embedded, Inc. 5 * Copyright (C) 2013-2015 Cogent Embedded, Inc.
6 * Copyright (C) 2013 Renesas Solutions Corp. 6 * Copyright (C) 2013-2015 Renesas Solutions Corp.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
@@ -992,9 +992,30 @@ static int sata_rcar_resume(struct device *dev)
992 return 0; 992 return 0;
993} 993}
994 994
995static int sata_rcar_restore(struct device *dev)
996{
997 struct ata_host *host = dev_get_drvdata(dev);
998 struct sata_rcar_priv *priv = host->private_data;
999
1000 clk_prepare_enable(priv->clk);
1001
1002 sata_rcar_setup_port(host);
1003
1004 /* initialize host controller */
1005 sata_rcar_init_controller(host);
1006
1007 ata_host_resume(host);
1008
1009 return 0;
1010}
1011
995static const struct dev_pm_ops sata_rcar_pm_ops = { 1012static const struct dev_pm_ops sata_rcar_pm_ops = {
996 .suspend = sata_rcar_suspend, 1013 .suspend = sata_rcar_suspend,
997 .resume = sata_rcar_resume, 1014 .resume = sata_rcar_resume,
1015 .freeze = sata_rcar_suspend,
1016 .thaw = sata_rcar_resume,
1017 .poweroff = sata_rcar_suspend,
1018 .restore = sata_rcar_restore,
998}; 1019};
999#endif 1020#endif
1000 1021
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 58470c395301..c3293f0a8573 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -855,7 +855,6 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
855 855
856 fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL); 856 fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
857 if (!fw_priv) { 857 if (!fw_priv) {
858 dev_err(device, "%s: kmalloc failed\n", __func__);
859 fw_priv = ERR_PTR(-ENOMEM); 858 fw_priv = ERR_PTR(-ENOMEM);
860 goto exit; 859 goto exit;
861 } 860 }
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index d626576a4f75..7fdd0172605a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -81,10 +81,8 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
81 return -EINVAL; 81 return -EINVAL;
82 82
83 ce = kzalloc(sizeof(*ce), GFP_KERNEL); 83 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
84 if (!ce) { 84 if (!ce)
85 dev_err(dev, "Not enough memory for clock entry.\n");
86 return -ENOMEM; 85 return -ENOMEM;
87 }
88 86
89 if (con_id) { 87 if (con_id) {
90 ce->con_id = kstrdup(con_id, GFP_KERNEL); 88 ce->con_id = kstrdup(con_id, GFP_KERNEL);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index b0f138806bbc..f32b802b98f4 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -19,8 +19,8 @@
19 * @dev: Device to handle. 19 * @dev: Device to handle.
20 * 20 *
21 * If power.subsys_data is NULL, point it to a new object, otherwise increment 21 * If power.subsys_data is NULL, point it to a new object, otherwise increment
22 * its reference counter. Return 1 if a new object has been created, otherwise 22 * its reference counter. Return 0 if new object has been created or refcount
23 * return 0 or error code. 23 * increased, otherwise negative error code.
24 */ 24 */
25int dev_pm_get_subsys_data(struct device *dev) 25int dev_pm_get_subsys_data(struct device *dev)
26{ 26{
@@ -56,13 +56,11 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
56 * @dev: Device to handle. 56 * @dev: Device to handle.
57 * 57 *
58 * If the reference counter of power.subsys_data is zero after dropping the 58 * If the reference counter of power.subsys_data is zero after dropping the
59 * reference, power.subsys_data is removed. Return 1 if that happens or 0 59 * reference, power.subsys_data is removed.
60 * otherwise.
61 */ 60 */
62int dev_pm_put_subsys_data(struct device *dev) 61void dev_pm_put_subsys_data(struct device *dev)
63{ 62{
64 struct pm_subsys_data *psd; 63 struct pm_subsys_data *psd;
65 int ret = 1;
66 64
67 spin_lock_irq(&dev->power.lock); 65 spin_lock_irq(&dev->power.lock);
68 66
@@ -70,18 +68,14 @@ int dev_pm_put_subsys_data(struct device *dev)
70 if (!psd) 68 if (!psd)
71 goto out; 69 goto out;
72 70
73 if (--psd->refcount == 0) { 71 if (--psd->refcount == 0)
74 dev->power.subsys_data = NULL; 72 dev->power.subsys_data = NULL;
75 } else { 73 else
76 psd = NULL; 74 psd = NULL;
77 ret = 0;
78 }
79 75
80 out: 76 out:
81 spin_unlock_irq(&dev->power.lock); 77 spin_unlock_irq(&dev->power.lock);
82 kfree(psd); 78 kfree(psd);
83
84 return ret;
85} 79}
86EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); 80EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
87 81
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0d8780c04a5e..ba4abbe4693c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -344,14 +344,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
344 struct device *dev; 344 struct device *dev;
345 345
346 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 346 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
347
348 mutex_lock(&gpd_data->lock);
349 dev = gpd_data->base.dev; 347 dev = gpd_data->base.dev;
350 if (!dev) {
351 mutex_unlock(&gpd_data->lock);
352 return NOTIFY_DONE;
353 }
354 mutex_unlock(&gpd_data->lock);
355 348
356 for (;;) { 349 for (;;) {
357 struct generic_pm_domain *genpd; 350 struct generic_pm_domain *genpd;
@@ -1384,25 +1377,66 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1384 1377
1385#endif /* CONFIG_PM_SLEEP */ 1378#endif /* CONFIG_PM_SLEEP */
1386 1379
1387static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) 1380static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1381 struct generic_pm_domain *genpd,
1382 struct gpd_timing_data *td)
1388{ 1383{
1389 struct generic_pm_domain_data *gpd_data; 1384 struct generic_pm_domain_data *gpd_data;
1385 int ret;
1386
1387 ret = dev_pm_get_subsys_data(dev);
1388 if (ret)
1389 return ERR_PTR(ret);
1390 1390
1391 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1391 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1392 if (!gpd_data) 1392 if (!gpd_data) {
1393 return NULL; 1393 ret = -ENOMEM;
1394 goto err_put;
1395 }
1396
1397 if (td)
1398 gpd_data->td = *td;
1394 1399
1395 mutex_init(&gpd_data->lock); 1400 gpd_data->base.dev = dev;
1401 gpd_data->need_restore = -1;
1402 gpd_data->td.constraint_changed = true;
1403 gpd_data->td.effective_constraint_ns = -1;
1396 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1404 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1397 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1405
1406 spin_lock_irq(&dev->power.lock);
1407
1408 if (dev->power.subsys_data->domain_data) {
1409 ret = -EINVAL;
1410 goto err_free;
1411 }
1412
1413 dev->power.subsys_data->domain_data = &gpd_data->base;
1414 dev->pm_domain = &genpd->domain;
1415
1416 spin_unlock_irq(&dev->power.lock);
1417
1398 return gpd_data; 1418 return gpd_data;
1419
1420 err_free:
1421 spin_unlock_irq(&dev->power.lock);
1422 kfree(gpd_data);
1423 err_put:
1424 dev_pm_put_subsys_data(dev);
1425 return ERR_PTR(ret);
1399} 1426}
1400 1427
1401static void __pm_genpd_free_dev_data(struct device *dev, 1428static void genpd_free_dev_data(struct device *dev,
1402 struct generic_pm_domain_data *gpd_data) 1429 struct generic_pm_domain_data *gpd_data)
1403{ 1430{
1404 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1431 spin_lock_irq(&dev->power.lock);
1432
1433 dev->pm_domain = NULL;
1434 dev->power.subsys_data->domain_data = NULL;
1435
1436 spin_unlock_irq(&dev->power.lock);
1437
1405 kfree(gpd_data); 1438 kfree(gpd_data);
1439 dev_pm_put_subsys_data(dev);
1406} 1440}
1407 1441
1408/** 1442/**
@@ -1414,8 +1448,7 @@ static void __pm_genpd_free_dev_data(struct device *dev,
1414int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1448int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1415 struct gpd_timing_data *td) 1449 struct gpd_timing_data *td)
1416{ 1450{
1417 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; 1451 struct generic_pm_domain_data *gpd_data;
1418 struct pm_domain_data *pdd;
1419 int ret = 0; 1452 int ret = 0;
1420 1453
1421 dev_dbg(dev, "%s()\n", __func__); 1454 dev_dbg(dev, "%s()\n", __func__);
@@ -1423,9 +1456,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1423 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1456 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1424 return -EINVAL; 1457 return -EINVAL;
1425 1458
1426 gpd_data_new = __pm_genpd_alloc_dev_data(dev); 1459 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1427 if (!gpd_data_new) 1460 if (IS_ERR(gpd_data))
1428 return -ENOMEM; 1461 return PTR_ERR(gpd_data);
1429 1462
1430 genpd_acquire_lock(genpd); 1463 genpd_acquire_lock(genpd);
1431 1464
@@ -1434,50 +1467,22 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1434 goto out; 1467 goto out;
1435 } 1468 }
1436 1469
1437 list_for_each_entry(pdd, &genpd->dev_list, list_node) 1470 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1438 if (pdd->dev == dev) {
1439 ret = -EINVAL;
1440 goto out;
1441 }
1442
1443 ret = dev_pm_get_subsys_data(dev);
1444 if (ret) 1471 if (ret)
1445 goto out; 1472 goto out;
1446 1473
1447 genpd->device_count++; 1474 genpd->device_count++;
1448 genpd->max_off_time_changed = true; 1475 genpd->max_off_time_changed = true;
1449 1476
1450 spin_lock_irq(&dev->power.lock);
1451
1452 dev->pm_domain = &genpd->domain;
1453 if (dev->power.subsys_data->domain_data) {
1454 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1455 } else {
1456 gpd_data = gpd_data_new;
1457 dev->power.subsys_data->domain_data = &gpd_data->base;
1458 }
1459 gpd_data->refcount++;
1460 if (td)
1461 gpd_data->td = *td;
1462
1463 spin_unlock_irq(&dev->power.lock);
1464
1465 if (genpd->attach_dev)
1466 genpd->attach_dev(genpd, dev);
1467
1468 mutex_lock(&gpd_data->lock);
1469 gpd_data->base.dev = dev;
1470 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1477 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1471 gpd_data->need_restore = -1;
1472 gpd_data->td.constraint_changed = true;
1473 gpd_data->td.effective_constraint_ns = -1;
1474 mutex_unlock(&gpd_data->lock);
1475 1478
1476 out: 1479 out:
1477 genpd_release_lock(genpd); 1480 genpd_release_lock(genpd);
1478 1481
1479 if (gpd_data != gpd_data_new) 1482 if (ret)
1480 __pm_genpd_free_dev_data(dev, gpd_data_new); 1483 genpd_free_dev_data(dev, gpd_data);
1484 else
1485 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1481 1486
1482 return ret; 1487 return ret;
1483} 1488}
@@ -1504,7 +1509,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1504{ 1509{
1505 struct generic_pm_domain_data *gpd_data; 1510 struct generic_pm_domain_data *gpd_data;
1506 struct pm_domain_data *pdd; 1511 struct pm_domain_data *pdd;
1507 bool remove = false;
1508 int ret = 0; 1512 int ret = 0;
1509 1513
1510 dev_dbg(dev, "%s()\n", __func__); 1514 dev_dbg(dev, "%s()\n", __func__);
@@ -1514,6 +1518,11 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1514 || pd_to_genpd(dev->pm_domain) != genpd) 1518 || pd_to_genpd(dev->pm_domain) != genpd)
1515 return -EINVAL; 1519 return -EINVAL;
1516 1520
1521 /* The above validation also means we have existing domain_data. */
1522 pdd = dev->power.subsys_data->domain_data;
1523 gpd_data = to_gpd_data(pdd);
1524 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1525
1517 genpd_acquire_lock(genpd); 1526 genpd_acquire_lock(genpd);
1518 1527
1519 if (genpd->prepared_count > 0) { 1528 if (genpd->prepared_count > 0) {
@@ -1527,58 +1536,22 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1527 if (genpd->detach_dev) 1536 if (genpd->detach_dev)
1528 genpd->detach_dev(genpd, dev); 1537 genpd->detach_dev(genpd, dev);
1529 1538
1530 spin_lock_irq(&dev->power.lock);
1531
1532 dev->pm_domain = NULL;
1533 pdd = dev->power.subsys_data->domain_data;
1534 list_del_init(&pdd->list_node); 1539 list_del_init(&pdd->list_node);
1535 gpd_data = to_gpd_data(pdd);
1536 if (--gpd_data->refcount == 0) {
1537 dev->power.subsys_data->domain_data = NULL;
1538 remove = true;
1539 }
1540
1541 spin_unlock_irq(&dev->power.lock);
1542
1543 mutex_lock(&gpd_data->lock);
1544 pdd->dev = NULL;
1545 mutex_unlock(&gpd_data->lock);
1546 1540
1547 genpd_release_lock(genpd); 1541 genpd_release_lock(genpd);
1548 1542
1549 dev_pm_put_subsys_data(dev); 1543 genpd_free_dev_data(dev, gpd_data);
1550 if (remove)
1551 __pm_genpd_free_dev_data(dev, gpd_data);
1552 1544
1553 return 0; 1545 return 0;
1554 1546
1555 out: 1547 out:
1556 genpd_release_lock(genpd); 1548 genpd_release_lock(genpd);
1549 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1557 1550
1558 return ret; 1551 return ret;
1559} 1552}
1560 1553
1561/** 1554/**
1562 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1563 * @dev: Device to set/unset the flag for.
1564 * @val: The new value of the device's "need restore" flag.
1565 */
1566void pm_genpd_dev_need_restore(struct device *dev, bool val)
1567{
1568 struct pm_subsys_data *psd;
1569 unsigned long flags;
1570
1571 spin_lock_irqsave(&dev->power.lock, flags);
1572
1573 psd = dev_to_psd(dev);
1574 if (psd && psd->domain_data)
1575 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
1576
1577 spin_unlock_irqrestore(&dev->power.lock, flags);
1578}
1579EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1580
1581/**
1582 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1555 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1583 * @genpd: Master PM domain to add the subdomain to. 1556 * @genpd: Master PM domain to add the subdomain to.
1584 * @subdomain: Subdomain to be added. 1557 * @subdomain: Subdomain to be added.
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 106c69359306..677fb2843553 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -117,20 +117,20 @@ do { \
117} while (0) 117} while (0)
118 118
119/** 119/**
120 * find_device_opp() - find device_opp struct using device pointer 120 * _find_device_opp() - find device_opp struct using device pointer
121 * @dev: device pointer used to lookup device OPPs 121 * @dev: device pointer used to lookup device OPPs
122 * 122 *
123 * Search list of device OPPs for one containing matching device. Does a RCU 123 * Search list of device OPPs for one containing matching device. Does a RCU
124 * reader operation to grab the pointer needed. 124 * reader operation to grab the pointer needed.
125 * 125 *
126 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or 126 * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
127 * -EINVAL based on type of error. 127 * -EINVAL based on type of error.
128 * 128 *
129 * Locking: This function must be called under rcu_read_lock(). device_opp 129 * Locking: This function must be called under rcu_read_lock(). device_opp
130 * is a RCU protected pointer. This means that device_opp is valid as long 130 * is a RCU protected pointer. This means that device_opp is valid as long
131 * as we are under RCU lock. 131 * as we are under RCU lock.
132 */ 132 */
133static struct device_opp *find_device_opp(struct device *dev) 133static struct device_opp *_find_device_opp(struct device *dev)
134{ 134{
135 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 135 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
136 136
@@ -153,7 +153,7 @@ static struct device_opp *find_device_opp(struct device *dev)
153 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp 153 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
154 * @opp: opp for which voltage has to be returned for 154 * @opp: opp for which voltage has to be returned for
155 * 155 *
156 * Return voltage in micro volt corresponding to the opp, else 156 * Return: voltage in micro volt corresponding to the opp, else
157 * return 0 157 * return 0
158 * 158 *
159 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 159 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
@@ -169,6 +169,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
169 struct dev_pm_opp *tmp_opp; 169 struct dev_pm_opp *tmp_opp;
170 unsigned long v = 0; 170 unsigned long v = 0;
171 171
172 opp_rcu_lockdep_assert();
173
172 tmp_opp = rcu_dereference(opp); 174 tmp_opp = rcu_dereference(opp);
173 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 175 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
174 pr_err("%s: Invalid parameters\n", __func__); 176 pr_err("%s: Invalid parameters\n", __func__);
@@ -183,7 +185,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
183 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp 185 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
184 * @opp: opp for which frequency has to be returned for 186 * @opp: opp for which frequency has to be returned for
185 * 187 *
186 * Return frequency in hertz corresponding to the opp, else 188 * Return: frequency in hertz corresponding to the opp, else
187 * return 0 189 * return 0
188 * 190 *
189 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 191 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
@@ -199,6 +201,8 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
199 struct dev_pm_opp *tmp_opp; 201 struct dev_pm_opp *tmp_opp;
200 unsigned long f = 0; 202 unsigned long f = 0;
201 203
204 opp_rcu_lockdep_assert();
205
202 tmp_opp = rcu_dereference(opp); 206 tmp_opp = rcu_dereference(opp);
203 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) 207 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
204 pr_err("%s: Invalid parameters\n", __func__); 208 pr_err("%s: Invalid parameters\n", __func__);
@@ -213,7 +217,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
213 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list 217 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
214 * @dev: device for which we do this operation 218 * @dev: device for which we do this operation
215 * 219 *
216 * This function returns the number of available opps if there are any, 220 * Return: This function returns the number of available opps if there are any,
217 * else returns 0 if none or the corresponding error value. 221 * else returns 0 if none or the corresponding error value.
218 * 222 *
219 * Locking: This function takes rcu_read_lock(). 223 * Locking: This function takes rcu_read_lock().
@@ -226,7 +230,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
226 230
227 rcu_read_lock(); 231 rcu_read_lock();
228 232
229 dev_opp = find_device_opp(dev); 233 dev_opp = _find_device_opp(dev);
230 if (IS_ERR(dev_opp)) { 234 if (IS_ERR(dev_opp)) {
231 count = PTR_ERR(dev_opp); 235 count = PTR_ERR(dev_opp);
232 dev_err(dev, "%s: device OPP not found (%d)\n", 236 dev_err(dev, "%s: device OPP not found (%d)\n",
@@ -251,9 +255,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
251 * @freq: frequency to search for 255 * @freq: frequency to search for
252 * @available: true/false - match for available opp 256 * @available: true/false - match for available opp
253 * 257 *
254 * Searches for exact match in the opp list and returns pointer to the matching 258 * Return: Searches for exact match in the opp list and returns pointer to the
255 * opp if found, else returns ERR_PTR in case of error and should be handled 259 * matching opp if found, else returns ERR_PTR in case of error and should
256 * using IS_ERR. Error return values can be: 260 * be handled using IS_ERR. Error return values can be:
257 * EINVAL: for bad pointer 261 * EINVAL: for bad pointer
258 * ERANGE: no match found for search 262 * ERANGE: no match found for search
259 * ENODEV: if device not found in list of registered devices 263 * ENODEV: if device not found in list of registered devices
@@ -280,7 +284,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
280 284
281 opp_rcu_lockdep_assert(); 285 opp_rcu_lockdep_assert();
282 286
283 dev_opp = find_device_opp(dev); 287 dev_opp = _find_device_opp(dev);
284 if (IS_ERR(dev_opp)) { 288 if (IS_ERR(dev_opp)) {
285 int r = PTR_ERR(dev_opp); 289 int r = PTR_ERR(dev_opp);
286 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 290 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
@@ -307,7 +311,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
307 * Search for the matching ceil *available* OPP from a starting freq 311 * Search for the matching ceil *available* OPP from a starting freq
308 * for a device. 312 * for a device.
309 * 313 *
310 * Returns matching *opp and refreshes *freq accordingly, else returns 314 * Return: matching *opp and refreshes *freq accordingly, else returns
311 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 315 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
312 * values can be: 316 * values can be:
313 * EINVAL: for bad pointer 317 * EINVAL: for bad pointer
@@ -333,7 +337,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
333 return ERR_PTR(-EINVAL); 337 return ERR_PTR(-EINVAL);
334 } 338 }
335 339
336 dev_opp = find_device_opp(dev); 340 dev_opp = _find_device_opp(dev);
337 if (IS_ERR(dev_opp)) 341 if (IS_ERR(dev_opp))
338 return ERR_CAST(dev_opp); 342 return ERR_CAST(dev_opp);
339 343
@@ -357,7 +361,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
357 * Search for the matching floor *available* OPP from a starting freq 361 * Search for the matching floor *available* OPP from a starting freq
358 * for a device. 362 * for a device.
359 * 363 *
360 * Returns matching *opp and refreshes *freq accordingly, else returns 364 * Return: matching *opp and refreshes *freq accordingly, else returns
361 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 365 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
362 * values can be: 366 * values can be:
363 * EINVAL: for bad pointer 367 * EINVAL: for bad pointer
@@ -383,7 +387,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
383 return ERR_PTR(-EINVAL); 387 return ERR_PTR(-EINVAL);
384 } 388 }
385 389
386 dev_opp = find_device_opp(dev); 390 dev_opp = _find_device_opp(dev);
387 if (IS_ERR(dev_opp)) 391 if (IS_ERR(dev_opp))
388 return ERR_CAST(dev_opp); 392 return ERR_CAST(dev_opp);
389 393
@@ -403,7 +407,16 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
403} 407}
404EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 408EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
405 409
406static struct device_opp *add_device_opp(struct device *dev) 410/**
411 * _add_device_opp() - Allocate a new device OPP table
412 * @dev: device for which we do this operation
413 *
414 * New device node which uses OPPs - used when multiple devices with OPP tables
415 * are maintained.
416 *
417 * Return: valid device_opp pointer if success, else NULL.
418 */
419static struct device_opp *_add_device_opp(struct device *dev)
407{ 420{
408 struct device_opp *dev_opp; 421 struct device_opp *dev_opp;
409 422
@@ -424,8 +437,35 @@ static struct device_opp *add_device_opp(struct device *dev)
424 return dev_opp; 437 return dev_opp;
425} 438}
426 439
427static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, 440/**
428 unsigned long u_volt, bool dynamic) 441 * _opp_add_dynamic() - Allocate a dynamic OPP.
442 * @dev: device for which we do this operation
443 * @freq: Frequency in Hz for this OPP
444 * @u_volt: Voltage in uVolts for this OPP
445 * @dynamic: Dynamically added OPPs.
446 *
447 * This function adds an opp definition to the opp list and returns status.
448 * The opp is made available by default and it can be controlled using
449 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
450 *
451 * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
452 * freed by of_free_opp_table.
453 *
454 * Locking: The internal device_opp and opp structures are RCU protected.
455 * Hence this function internally uses RCU updater strategy with mutex locks
456 * to keep the integrity of the internal data structures. Callers should ensure
457 * that this function is *NOT* called under RCU protection or in contexts where
458 * mutex cannot be locked.
459 *
460 * Return:
461 * 0 On success OR
462 * Duplicate OPPs (both freq and volt are same) and opp->available
463 * -EEXIST Freq are same and volt are different OR
464 * Duplicate OPPs (both freq and volt are same) and !opp->available
465 * -ENOMEM Memory allocation failure
466 */
467static int _opp_add_dynamic(struct device *dev, unsigned long freq,
468 long u_volt, bool dynamic)
429{ 469{
430 struct device_opp *dev_opp = NULL; 470 struct device_opp *dev_opp = NULL;
431 struct dev_pm_opp *opp, *new_opp; 471 struct dev_pm_opp *opp, *new_opp;
@@ -434,10 +474,8 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
434 474
435 /* allocate new OPP node */ 475 /* allocate new OPP node */
436 new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); 476 new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
437 if (!new_opp) { 477 if (!new_opp)
438 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
439 return -ENOMEM; 478 return -ENOMEM;
440 }
441 479
442 /* Hold our list modification lock here */ 480 /* Hold our list modification lock here */
443 mutex_lock(&dev_opp_list_lock); 481 mutex_lock(&dev_opp_list_lock);
@@ -449,9 +487,9 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
449 new_opp->dynamic = dynamic; 487 new_opp->dynamic = dynamic;
450 488
451 /* Check for existing list for 'dev' */ 489 /* Check for existing list for 'dev' */
452 dev_opp = find_device_opp(dev); 490 dev_opp = _find_device_opp(dev);
453 if (IS_ERR(dev_opp)) { 491 if (IS_ERR(dev_opp)) {
454 dev_opp = add_device_opp(dev); 492 dev_opp = _add_device_opp(dev);
455 if (!dev_opp) { 493 if (!dev_opp) {
456 ret = -ENOMEM; 494 ret = -ENOMEM;
457 goto free_opp; 495 goto free_opp;
@@ -519,34 +557,53 @@ free_opp:
519 * mutex cannot be locked. 557 * mutex cannot be locked.
520 * 558 *
521 * Return: 559 * Return:
522 * 0: On success OR 560 * 0 On success OR
523 * Duplicate OPPs (both freq and volt are same) and opp->available 561 * Duplicate OPPs (both freq and volt are same) and opp->available
524 * -EEXIST: Freq are same and volt are different OR 562 * -EEXIST Freq are same and volt are different OR
525 * Duplicate OPPs (both freq and volt are same) and !opp->available 563 * Duplicate OPPs (both freq and volt are same) and !opp->available
526 * -ENOMEM: Memory allocation failure 564 * -ENOMEM Memory allocation failure
527 */ 565 */
528int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 566int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
529{ 567{
530 return dev_pm_opp_add_dynamic(dev, freq, u_volt, true); 568 return _opp_add_dynamic(dev, freq, u_volt, true);
531} 569}
532EXPORT_SYMBOL_GPL(dev_pm_opp_add); 570EXPORT_SYMBOL_GPL(dev_pm_opp_add);
533 571
534static void kfree_opp_rcu(struct rcu_head *head) 572/**
573 * _kfree_opp_rcu() - Free OPP RCU handler
574 * @head: RCU head
575 */
576static void _kfree_opp_rcu(struct rcu_head *head)
535{ 577{
536 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); 578 struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
537 579
538 kfree_rcu(opp, rcu_head); 580 kfree_rcu(opp, rcu_head);
539} 581}
540 582
541static void kfree_device_rcu(struct rcu_head *head) 583/**
584 * _kfree_device_rcu() - Free device_opp RCU handler
585 * @head: RCU head
586 */
587static void _kfree_device_rcu(struct rcu_head *head)
542{ 588{
543 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); 589 struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
544 590
545 kfree_rcu(device_opp, rcu_head); 591 kfree_rcu(device_opp, rcu_head);
546} 592}
547 593
548static void __dev_pm_opp_remove(struct device_opp *dev_opp, 594/**
549 struct dev_pm_opp *opp) 595 * _opp_remove() - Remove an OPP from a table definition
596 * @dev_opp: points back to the device_opp struct this opp belongs to
597 * @opp: pointer to the OPP to remove
598 *
599 * This function removes an opp definition from the opp list.
600 *
601 * Locking: The internal device_opp and opp structures are RCU protected.
602 * It is assumed that the caller holds required mutex for an RCU updater
603 * strategy.
604 */
605static void _opp_remove(struct device_opp *dev_opp,
606 struct dev_pm_opp *opp)
550{ 607{
551 /* 608 /*
552 * Notify the changes in the availability of the operable 609 * Notify the changes in the availability of the operable
@@ -554,12 +611,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp,
554 */ 611 */
555 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); 612 srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
556 list_del_rcu(&opp->node); 613 list_del_rcu(&opp->node);
557 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu); 614 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
558 615
559 if (list_empty(&dev_opp->opp_list)) { 616 if (list_empty(&dev_opp->opp_list)) {
560 list_del_rcu(&dev_opp->node); 617 list_del_rcu(&dev_opp->node);
561 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, 618 call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
562 kfree_device_rcu); 619 _kfree_device_rcu);
563 } 620 }
564} 621}
565 622
@@ -569,6 +626,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp,
569 * @freq: OPP to remove with matching 'freq' 626 * @freq: OPP to remove with matching 'freq'
570 * 627 *
571 * This function removes an opp from the opp list. 628 * This function removes an opp from the opp list.
629 *
630 * Locking: The internal device_opp and opp structures are RCU protected.
631 * Hence this function internally uses RCU updater strategy with mutex locks
632 * to keep the integrity of the internal data structures. Callers should ensure
633 * that this function is *NOT* called under RCU protection or in contexts where
634 * mutex cannot be locked.
572 */ 635 */
573void dev_pm_opp_remove(struct device *dev, unsigned long freq) 636void dev_pm_opp_remove(struct device *dev, unsigned long freq)
574{ 637{
@@ -579,7 +642,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
579 /* Hold our list modification lock here */ 642 /* Hold our list modification lock here */
580 mutex_lock(&dev_opp_list_lock); 643 mutex_lock(&dev_opp_list_lock);
581 644
582 dev_opp = find_device_opp(dev); 645 dev_opp = _find_device_opp(dev);
583 if (IS_ERR(dev_opp)) 646 if (IS_ERR(dev_opp))
584 goto unlock; 647 goto unlock;
585 648
@@ -596,14 +659,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
596 goto unlock; 659 goto unlock;
597 } 660 }
598 661
599 __dev_pm_opp_remove(dev_opp, opp); 662 _opp_remove(dev_opp, opp);
600unlock: 663unlock:
601 mutex_unlock(&dev_opp_list_lock); 664 mutex_unlock(&dev_opp_list_lock);
602} 665}
603EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 666EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
604 667
605/** 668/**
606 * opp_set_availability() - helper to set the availability of an opp 669 * _opp_set_availability() - helper to set the availability of an opp
607 * @dev: device for which we do this operation 670 * @dev: device for which we do this operation
608 * @freq: OPP frequency to modify availability 671 * @freq: OPP frequency to modify availability
609 * @availability_req: availability status requested for this opp 672 * @availability_req: availability status requested for this opp
@@ -611,7 +674,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
611 * Set the availability of an OPP with an RCU operation, opp_{enable,disable} 674 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
612 * share a common logic which is isolated here. 675 * share a common logic which is isolated here.
613 * 676 *
614 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the 677 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
615 * copy operation, returns 0 if no modifcation was done OR modification was 678 * copy operation, returns 0 if no modifcation was done OR modification was
616 * successful. 679 * successful.
617 * 680 *
@@ -621,8 +684,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
621 * that this function is *NOT* called under RCU protection or in contexts where 684 * that this function is *NOT* called under RCU protection or in contexts where
622 * mutex locking or synchronize_rcu() blocking calls cannot be used. 685 * mutex locking or synchronize_rcu() blocking calls cannot be used.
623 */ 686 */
624static int opp_set_availability(struct device *dev, unsigned long freq, 687static int _opp_set_availability(struct device *dev, unsigned long freq,
625 bool availability_req) 688 bool availability_req)
626{ 689{
627 struct device_opp *dev_opp; 690 struct device_opp *dev_opp;
628 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); 691 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
@@ -630,15 +693,13 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
630 693
631 /* keep the node allocated */ 694 /* keep the node allocated */
632 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); 695 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
633 if (!new_opp) { 696 if (!new_opp)
634 dev_warn(dev, "%s: Unable to create OPP\n", __func__);
635 return -ENOMEM; 697 return -ENOMEM;
636 }
637 698
638 mutex_lock(&dev_opp_list_lock); 699 mutex_lock(&dev_opp_list_lock);
639 700
640 /* Find the device_opp */ 701 /* Find the device_opp */
641 dev_opp = find_device_opp(dev); 702 dev_opp = _find_device_opp(dev);
642 if (IS_ERR(dev_opp)) { 703 if (IS_ERR(dev_opp)) {
643 r = PTR_ERR(dev_opp); 704 r = PTR_ERR(dev_opp);
644 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 705 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
@@ -668,7 +729,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
668 729
669 list_replace_rcu(&opp->node, &new_opp->node); 730 list_replace_rcu(&opp->node, &new_opp->node);
670 mutex_unlock(&dev_opp_list_lock); 731 mutex_unlock(&dev_opp_list_lock);
671 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu); 732 call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
672 733
673 /* Notify the change of the OPP availability */ 734 /* Notify the change of the OPP availability */
674 if (availability_req) 735 if (availability_req)
@@ -700,10 +761,14 @@ unlock:
700 * integrity of the internal data structures. Callers should ensure that 761 * integrity of the internal data structures. Callers should ensure that
701 * this function is *NOT* called under RCU protection or in contexts where 762 * this function is *NOT* called under RCU protection or in contexts where
702 * mutex locking or synchronize_rcu() blocking calls cannot be used. 763 * mutex locking or synchronize_rcu() blocking calls cannot be used.
764 *
765 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
766 * copy operation, returns 0 if no modifcation was done OR modification was
767 * successful.
703 */ 768 */
704int dev_pm_opp_enable(struct device *dev, unsigned long freq) 769int dev_pm_opp_enable(struct device *dev, unsigned long freq)
705{ 770{
706 return opp_set_availability(dev, freq, true); 771 return _opp_set_availability(dev, freq, true);
707} 772}
708EXPORT_SYMBOL_GPL(dev_pm_opp_enable); 773EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
709 774
@@ -722,26 +787,41 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
722 * integrity of the internal data structures. Callers should ensure that 787 * integrity of the internal data structures. Callers should ensure that
723 * this function is *NOT* called under RCU protection or in contexts where 788 * this function is *NOT* called under RCU protection or in contexts where
724 * mutex locking or synchronize_rcu() blocking calls cannot be used. 789 * mutex locking or synchronize_rcu() blocking calls cannot be used.
790 *
791 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
792 * copy operation, returns 0 if no modifcation was done OR modification was
793 * successful.
725 */ 794 */
726int dev_pm_opp_disable(struct device *dev, unsigned long freq) 795int dev_pm_opp_disable(struct device *dev, unsigned long freq)
727{ 796{
728 return opp_set_availability(dev, freq, false); 797 return _opp_set_availability(dev, freq, false);
729} 798}
730EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 799EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
731 800
732/** 801/**
733 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp 802 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
734 * @dev: device pointer used to lookup device OPPs. 803 * @dev: device pointer used to lookup device OPPs.
804 *
805 * Return: pointer to notifier head if found, otherwise -ENODEV or
806 * -EINVAL based on type of error casted as pointer. value must be checked
807 * with IS_ERR to determine valid pointer or error result.
808 *
809 * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
810 * protected pointer. The reason for the same is that the opp pointer which is
811 * returned will remain valid for use with opp_get_{voltage, freq} only while
812 * under the locked area. The pointer returned must be used prior to unlocking
813 * with rcu_read_unlock() to maintain the integrity of the pointer.
735 */ 814 */
736struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) 815struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
737{ 816{
738 struct device_opp *dev_opp = find_device_opp(dev); 817 struct device_opp *dev_opp = _find_device_opp(dev);
739 818
740 if (IS_ERR(dev_opp)) 819 if (IS_ERR(dev_opp))
741 return ERR_CAST(dev_opp); /* matching type */ 820 return ERR_CAST(dev_opp); /* matching type */
742 821
743 return &dev_opp->srcu_head; 822 return &dev_opp->srcu_head;
744} 823}
824EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
745 825
746#ifdef CONFIG_OF 826#ifdef CONFIG_OF
747/** 827/**
@@ -749,6 +829,22 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
749 * @dev: device pointer used to lookup device OPPs. 829 * @dev: device pointer used to lookup device OPPs.
750 * 830 *
751 * Register the initial OPP table with the OPP library for given device. 831 * Register the initial OPP table with the OPP library for given device.
832 *
833 * Locking: The internal device_opp and opp structures are RCU protected.
834 * Hence this function indirectly uses RCU updater strategy with mutex locks
835 * to keep the integrity of the internal data structures. Callers should ensure
836 * that this function is *NOT* called under RCU protection or in contexts where
837 * mutex cannot be locked.
838 *
839 * Return:
840 * 0 On success OR
841 * Duplicate OPPs (both freq and volt are same) and opp->available
842 * -EEXIST Freq are same and volt are different OR
843 * Duplicate OPPs (both freq and volt are same) and !opp->available
844 * -ENOMEM Memory allocation failure
845 * -ENODEV when 'operating-points' property is not found or is invalid data
846 * in device node.
847 * -ENODATA when empty 'operating-points' property is found
752 */ 848 */
753int of_init_opp_table(struct device *dev) 849int of_init_opp_table(struct device *dev)
754{ 850{
@@ -777,7 +873,7 @@ int of_init_opp_table(struct device *dev)
777 unsigned long freq = be32_to_cpup(val++) * 1000; 873 unsigned long freq = be32_to_cpup(val++) * 1000;
778 unsigned long volt = be32_to_cpup(val++); 874 unsigned long volt = be32_to_cpup(val++);
779 875
780 if (dev_pm_opp_add_dynamic(dev, freq, volt, false)) 876 if (_opp_add_dynamic(dev, freq, volt, false))
781 dev_warn(dev, "%s: Failed to add OPP %ld\n", 877 dev_warn(dev, "%s: Failed to add OPP %ld\n",
782 __func__, freq); 878 __func__, freq);
783 nr -= 2; 879 nr -= 2;
@@ -792,6 +888,12 @@ EXPORT_SYMBOL_GPL(of_init_opp_table);
792 * @dev: device pointer used to lookup device OPPs. 888 * @dev: device pointer used to lookup device OPPs.
793 * 889 *
794 * Free OPPs created using static entries present in DT. 890 * Free OPPs created using static entries present in DT.
891 *
892 * Locking: The internal device_opp and opp structures are RCU protected.
893 * Hence this function indirectly uses RCU updater strategy with mutex locks
894 * to keep the integrity of the internal data structures. Callers should ensure
895 * that this function is *NOT* called under RCU protection or in contexts where
896 * mutex cannot be locked.
795 */ 897 */
796void of_free_opp_table(struct device *dev) 898void of_free_opp_table(struct device *dev)
797{ 899{
@@ -799,7 +901,7 @@ void of_free_opp_table(struct device *dev)
799 struct dev_pm_opp *opp, *tmp; 901 struct dev_pm_opp *opp, *tmp;
800 902
801 /* Check for existing list for 'dev' */ 903 /* Check for existing list for 'dev' */
802 dev_opp = find_device_opp(dev); 904 dev_opp = _find_device_opp(dev);
803 if (IS_ERR(dev_opp)) { 905 if (IS_ERR(dev_opp)) {
804 int error = PTR_ERR(dev_opp); 906 int error = PTR_ERR(dev_opp);
805 if (error != -ENODEV) 907 if (error != -ENODEV)
@@ -816,7 +918,7 @@ void of_free_opp_table(struct device *dev)
816 /* Free static OPPs */ 918 /* Free static OPPs */
817 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { 919 list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
818 if (!opp->dynamic) 920 if (!opp->dynamic)
819 __dev_pm_opp_remove(dev_opp, opp); 921 _opp_remove(dev_opp, opp);
820 } 922 }
821 923
822 mutex_unlock(&dev_opp_list_lock); 924 mutex_unlock(&dev_opp_list_lock);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index a8fe4c1a8d07..e56d538d039e 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -64,6 +64,8 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
64 struct pm_qos_flags *pqf; 64 struct pm_qos_flags *pqf;
65 s32 val; 65 s32 val;
66 66
67 lockdep_assert_held(&dev->power.lock);
68
67 if (IS_ERR_OR_NULL(qos)) 69 if (IS_ERR_OR_NULL(qos))
68 return PM_QOS_FLAGS_UNDEFINED; 70 return PM_QOS_FLAGS_UNDEFINED;
69 71
@@ -104,6 +106,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
104 */ 106 */
105s32 __dev_pm_qos_read_value(struct device *dev) 107s32 __dev_pm_qos_read_value(struct device *dev)
106{ 108{
109 lockdep_assert_held(&dev->power.lock);
110
107 return IS_ERR_OR_NULL(dev->power.qos) ? 111 return IS_ERR_OR_NULL(dev->power.qos) ?
108 0 : pm_qos_read_value(&dev->power.qos->resume_latency); 112 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
109} 113}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 0da5865df5b1..beb8b27d4621 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -51,9 +51,11 @@ struct regmap_async {
51struct regmap { 51struct regmap {
52 union { 52 union {
53 struct mutex mutex; 53 struct mutex mutex;
54 spinlock_t spinlock; 54 struct {
55 spinlock_t spinlock;
56 unsigned long spinlock_flags;
57 };
55 }; 58 };
56 unsigned long spinlock_flags;
57 regmap_lock lock; 59 regmap_lock lock;
58 regmap_unlock unlock; 60 regmap_unlock unlock;
59 void *lock_arg; /* This is passed to lock/unlock functions */ 61 void *lock_arg; /* This is passed to lock/unlock functions */
@@ -233,6 +235,10 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
233 235
234void regmap_async_complete_cb(struct regmap_async *async, int ret); 236void regmap_async_complete_cb(struct regmap_async *async, int ret);
235 237
238enum regmap_endian regmap_get_val_endian(struct device *dev,
239 const struct regmap_bus *bus,
240 const struct regmap_config *config);
241
236extern struct regcache_ops regcache_rbtree_ops; 242extern struct regcache_ops regcache_rbtree_ops;
237extern struct regcache_ops regcache_lzo_ops; 243extern struct regcache_ops regcache_lzo_ops;
238extern struct regcache_ops regcache_flat_ops; 244extern struct regcache_ops regcache_flat_ops;
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index e4c45d2299c1..8d304e2a943d 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -74,8 +74,8 @@ static int regmap_ac97_reg_write(void *context, unsigned int reg,
74} 74}
75 75
76static const struct regmap_bus ac97_regmap_bus = { 76static const struct regmap_bus ac97_regmap_bus = {
77 .reg_write = regmap_ac97_reg_write, 77 .reg_write = regmap_ac97_reg_write,
78 .reg_read = regmap_ac97_reg_read, 78 .reg_read = regmap_ac97_reg_read,
79}; 79};
80 80
81/** 81/**
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 053150a7f9f2..4b76e33110a2 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -14,6 +14,7 @@
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16
17#include "internal.h"
17 18
18static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, 19static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
19 unsigned int *val) 20 unsigned int *val)
@@ -87,6 +88,42 @@ static struct regmap_bus regmap_smbus_word = {
87 .reg_read = regmap_smbus_word_reg_read, 88 .reg_read = regmap_smbus_word_reg_read,
88}; 89};
89 90
91static int regmap_smbus_word_read_swapped(void *context, unsigned int reg,
92 unsigned int *val)
93{
94 struct device *dev = context;
95 struct i2c_client *i2c = to_i2c_client(dev);
96 int ret;
97
98 if (reg > 0xff)
99 return -EINVAL;
100
101 ret = i2c_smbus_read_word_swapped(i2c, reg);
102 if (ret < 0)
103 return ret;
104
105 *val = ret;
106
107 return 0;
108}
109
110static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
111 unsigned int val)
112{
113 struct device *dev = context;
114 struct i2c_client *i2c = to_i2c_client(dev);
115
116 if (val > 0xffff || reg > 0xff)
117 return -EINVAL;
118
119 return i2c_smbus_write_word_swapped(i2c, reg, val);
120}
121
122static struct regmap_bus regmap_smbus_word_swapped = {
123 .reg_write = regmap_smbus_word_write_swapped,
124 .reg_read = regmap_smbus_word_read_swapped,
125};
126
90static int regmap_i2c_write(void *context, const void *data, size_t count) 127static int regmap_i2c_write(void *context, const void *data, size_t count)
91{ 128{
92 struct device *dev = context; 129 struct device *dev = context;
@@ -180,7 +217,14 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
180 else if (config->val_bits == 16 && config->reg_bits == 8 && 217 else if (config->val_bits == 16 && config->reg_bits == 8 &&
181 i2c_check_functionality(i2c->adapter, 218 i2c_check_functionality(i2c->adapter,
182 I2C_FUNC_SMBUS_WORD_DATA)) 219 I2C_FUNC_SMBUS_WORD_DATA))
183 return &regmap_smbus_word; 220 switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
221 case REGMAP_ENDIAN_LITTLE:
222 return &regmap_smbus_word;
223 case REGMAP_ENDIAN_BIG:
224 return &regmap_smbus_word_swapped;
225 default: /* everything else is not supported */
226 break;
227 }
184 else if (config->val_bits == 8 && config->reg_bits == 8 && 228 else if (config->val_bits == 8 && config->reg_bits == 8 &&
185 i2c_check_functionality(i2c->adapter, 229 i2c_check_functionality(i2c->adapter,
186 I2C_FUNC_SMBUS_BYTE_DATA)) 230 I2C_FUNC_SMBUS_BYTE_DATA))
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index d2f8a818d200..f99b098ddabf 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -473,9 +473,9 @@ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
473 return REGMAP_ENDIAN_BIG; 473 return REGMAP_ENDIAN_BIG;
474} 474}
475 475
476static enum regmap_endian regmap_get_val_endian(struct device *dev, 476enum regmap_endian regmap_get_val_endian(struct device *dev,
477 const struct regmap_bus *bus, 477 const struct regmap_bus *bus,
478 const struct regmap_config *config) 478 const struct regmap_config *config)
479{ 479{
480 struct device_node *np; 480 struct device_node *np;
481 enum regmap_endian endian; 481 enum regmap_endian endian;
@@ -513,6 +513,7 @@ static enum regmap_endian regmap_get_val_endian(struct device *dev,
513 /* Use this if no other value was found */ 513 /* Use this if no other value was found */
514 return REGMAP_ENDIAN_BIG; 514 return REGMAP_ENDIAN_BIG;
515} 515}
516EXPORT_SYMBOL_GPL(regmap_get_val_endian);
516 517
517/** 518/**
518 * regmap_init(): Initialise register map 519 * regmap_init(): Initialise register map
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 63fc7f06a014..2a04d341e598 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -47,6 +47,7 @@
47#include <asm/xen/hypervisor.h> 47#include <asm/xen/hypervisor.h>
48#include <asm/xen/hypercall.h> 48#include <asm/xen/hypercall.h>
49#include <xen/balloon.h> 49#include <xen/balloon.h>
50#include <xen/grant_table.h>
50#include "common.h" 51#include "common.h"
51 52
52/* 53/*
@@ -100,7 +101,7 @@ module_param(log_stats, int, 0644);
100 101
101#define BLKBACK_INVALID_HANDLE (~0) 102#define BLKBACK_INVALID_HANDLE (~0)
102 103
103/* Number of free pages to remove on each call to free_xenballooned_pages */ 104/* Number of free pages to remove on each call to gnttab_free_pages */
104#define NUM_BATCH_FREE_PAGES 10 105#define NUM_BATCH_FREE_PAGES 10
105 106
106static inline int get_free_page(struct xen_blkif *blkif, struct page **page) 107static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
@@ -111,7 +112,7 @@ static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
111 if (list_empty(&blkif->free_pages)) { 112 if (list_empty(&blkif->free_pages)) {
112 BUG_ON(blkif->free_pages_num != 0); 113 BUG_ON(blkif->free_pages_num != 0);
113 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); 114 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
114 return alloc_xenballooned_pages(1, page, false); 115 return gnttab_alloc_pages(1, page);
115 } 116 }
116 BUG_ON(blkif->free_pages_num == 0); 117 BUG_ON(blkif->free_pages_num == 0);
117 page[0] = list_first_entry(&blkif->free_pages, struct page, lru); 118 page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
@@ -151,14 +152,14 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
151 blkif->free_pages_num--; 152 blkif->free_pages_num--;
152 if (++num_pages == NUM_BATCH_FREE_PAGES) { 153 if (++num_pages == NUM_BATCH_FREE_PAGES) {
153 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); 154 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154 free_xenballooned_pages(num_pages, page); 155 gnttab_free_pages(num_pages, page);
155 spin_lock_irqsave(&blkif->free_pages_lock, flags); 156 spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 num_pages = 0; 157 num_pages = 0;
157 } 158 }
158 } 159 }
159 spin_unlock_irqrestore(&blkif->free_pages_lock, flags); 160 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160 if (num_pages != 0) 161 if (num_pages != 0)
161 free_xenballooned_pages(num_pages, page); 162 gnttab_free_pages(num_pages, page);
162} 163}
163 164
164#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) 165#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
@@ -262,6 +263,17 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
262 atomic_dec(&blkif->persistent_gnt_in_use); 263 atomic_dec(&blkif->persistent_gnt_in_use);
263} 264}
264 265
266static void free_persistent_gnts_unmap_callback(int result,
267 struct gntab_unmap_queue_data *data)
268{
269 struct completion *c = data->data;
270
271 /* BUG_ON used to reproduce existing behaviour,
272 but is this the best way to deal with this? */
273 BUG_ON(result);
274 complete(c);
275}
276
265static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, 277static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266 unsigned int num) 278 unsigned int num)
267{ 279{
@@ -269,8 +281,17 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
269 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 281 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
270 struct persistent_gnt *persistent_gnt; 282 struct persistent_gnt *persistent_gnt;
271 struct rb_node *n; 283 struct rb_node *n;
272 int ret = 0;
273 int segs_to_unmap = 0; 284 int segs_to_unmap = 0;
285 struct gntab_unmap_queue_data unmap_data;
286 struct completion unmap_completion;
287
288 init_completion(&unmap_completion);
289
290 unmap_data.data = &unmap_completion;
291 unmap_data.done = &free_persistent_gnts_unmap_callback;
292 unmap_data.pages = pages;
293 unmap_data.unmap_ops = unmap;
294 unmap_data.kunmap_ops = NULL;
274 295
275 foreach_grant_safe(persistent_gnt, n, root, node) { 296 foreach_grant_safe(persistent_gnt, n, root, node) {
276 BUG_ON(persistent_gnt->handle == 297 BUG_ON(persistent_gnt->handle ==
@@ -285,9 +306,11 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
285 306
286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || 307 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 !rb_next(&persistent_gnt->node)) { 308 !rb_next(&persistent_gnt->node)) {
288 ret = gnttab_unmap_refs(unmap, NULL, pages, 309
289 segs_to_unmap); 310 unmap_data.count = segs_to_unmap;
290 BUG_ON(ret); 311 gnttab_unmap_refs_async(&unmap_data);
312 wait_for_completion(&unmap_completion);
313
291 put_free_pages(blkif, pages, segs_to_unmap); 314 put_free_pages(blkif, pages, segs_to_unmap);
292 segs_to_unmap = 0; 315 segs_to_unmap = 0;
293 } 316 }
@@ -653,18 +676,14 @@ void xen_blkbk_free_caches(struct xen_blkif *blkif)
653 shrink_free_pagepool(blkif, 0 /* All */); 676 shrink_free_pagepool(blkif, 0 /* All */);
654} 677}
655 678
656/* 679static unsigned int xen_blkbk_unmap_prepare(
657 * Unmap the grant references, and also remove the M2P over-rides 680 struct xen_blkif *blkif,
658 * used in the 'pending_req'. 681 struct grant_page **pages,
659 */ 682 unsigned int num,
660static void xen_blkbk_unmap(struct xen_blkif *blkif, 683 struct gnttab_unmap_grant_ref *unmap_ops,
661 struct grant_page *pages[], 684 struct page **unmap_pages)
662 int num)
663{ 685{
664 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
665 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
666 unsigned int i, invcount = 0; 686 unsigned int i, invcount = 0;
667 int ret;
668 687
669 for (i = 0; i < num; i++) { 688 for (i = 0; i < num; i++) {
670 if (pages[i]->persistent_gnt != NULL) { 689 if (pages[i]->persistent_gnt != NULL) {
@@ -674,21 +693,95 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
674 if (pages[i]->handle == BLKBACK_INVALID_HANDLE) 693 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
675 continue; 694 continue;
676 unmap_pages[invcount] = pages[i]->page; 695 unmap_pages[invcount] = pages[i]->page;
677 gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), 696 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
678 GNTMAP_host_map, pages[i]->handle); 697 GNTMAP_host_map, pages[i]->handle);
679 pages[i]->handle = BLKBACK_INVALID_HANDLE; 698 pages[i]->handle = BLKBACK_INVALID_HANDLE;
680 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 699 invcount++;
681 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, 700 }
682 invcount); 701
702 return invcount;
703}
704
705static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
706{
707 struct pending_req* pending_req = (struct pending_req*) (data->data);
708 struct xen_blkif *blkif = pending_req->blkif;
709
710 /* BUG_ON used to reproduce existing behaviour,
711 but is this the best way to deal with this? */
712 BUG_ON(result);
713
714 put_free_pages(blkif, data->pages, data->count);
715 make_response(blkif, pending_req->id,
716 pending_req->operation, pending_req->status);
717 free_req(blkif, pending_req);
718 /*
719 * Make sure the request is freed before releasing blkif,
720 * or there could be a race between free_req and the
721 * cleanup done in xen_blkif_free during shutdown.
722 *
723 * NB: The fact that we might try to wake up pending_free_wq
724 * before drain_complete (in case there's a drain going on)
725 * it's not a problem with our current implementation
726 * because we can assure there's no thread waiting on
727 * pending_free_wq if there's a drain going on, but it has
728 * to be taken into account if the current model is changed.
729 */
730 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
731 complete(&blkif->drain_complete);
732 }
733 xen_blkif_put(blkif);
734}
735
736static void xen_blkbk_unmap_and_respond(struct pending_req *req)
737{
738 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
739 struct xen_blkif *blkif = req->blkif;
740 struct grant_page **pages = req->segments;
741 unsigned int invcount;
742
743 invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages,
744 req->unmap, req->unmap_pages);
745
746 work->data = req;
747 work->done = xen_blkbk_unmap_and_respond_callback;
748 work->unmap_ops = req->unmap;
749 work->kunmap_ops = NULL;
750 work->pages = req->unmap_pages;
751 work->count = invcount;
752
753 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
754}
755
756
757/*
758 * Unmap the grant references.
759 *
760 * This could accumulate ops up to the batch size to reduce the number
761 * of hypercalls, but since this is only used in error paths there's
762 * no real need.
763 */
764static void xen_blkbk_unmap(struct xen_blkif *blkif,
765 struct grant_page *pages[],
766 int num)
767{
768 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
769 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
770 unsigned int invcount = 0;
771 int ret;
772
773 while (num) {
774 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
775
776 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
777 unmap, unmap_pages);
778 if (invcount) {
779 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
683 BUG_ON(ret); 780 BUG_ON(ret);
684 put_free_pages(blkif, unmap_pages, invcount); 781 put_free_pages(blkif, unmap_pages, invcount);
685 invcount = 0;
686 } 782 }
687 } 783 pages += batch;
688 if (invcount) { 784 num -= batch;
689 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
690 BUG_ON(ret);
691 put_free_pages(blkif, unmap_pages, invcount);
692 } 785 }
693} 786}
694 787
@@ -982,32 +1075,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
982 * the grant references associated with 'request' and provide 1075 * the grant references associated with 'request' and provide
983 * the proper response on the ring. 1076 * the proper response on the ring.
984 */ 1077 */
985 if (atomic_dec_and_test(&pending_req->pendcnt)) { 1078 if (atomic_dec_and_test(&pending_req->pendcnt))
986 struct xen_blkif *blkif = pending_req->blkif; 1079 xen_blkbk_unmap_and_respond(pending_req);
987
988 xen_blkbk_unmap(blkif,
989 pending_req->segments,
990 pending_req->nr_pages);
991 make_response(blkif, pending_req->id,
992 pending_req->operation, pending_req->status);
993 free_req(blkif, pending_req);
994 /*
995 * Make sure the request is freed before releasing blkif,
996 * or there could be a race between free_req and the
997 * cleanup done in xen_blkif_free during shutdown.
998 *
999 * NB: The fact that we might try to wake up pending_free_wq
1000 * before drain_complete (in case there's a drain going on)
1001 * it's not a problem with our current implementation
1002 * because we can assure there's no thread waiting on
1003 * pending_free_wq if there's a drain going on, but it has
1004 * to be taken into account if the current model is changed.
1005 */
1006 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1007 complete(&blkif->drain_complete);
1008 }
1009 xen_blkif_put(blkif);
1010 }
1011} 1080}
1012 1081
1013/* 1082/*
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index f65b807e3236..cc90a840e616 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -350,6 +350,9 @@ struct pending_req {
350 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES]; 350 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
351 struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; 351 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
352 struct bio *biolist[MAX_INDIRECT_SEGMENTS]; 352 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
353 struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
354 struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
355 struct gntab_unmap_queue_data gnttab_unmap_data;
353}; 356};
354 357
355 358
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index d5d4cd82b9f7..5c0baa9ffc64 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -976,8 +976,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
976 status = acpi_resource_to_address64(res, &addr); 976 status = acpi_resource_to_address64(res, &addr);
977 977
978 if (ACPI_SUCCESS(status)) { 978 if (ACPI_SUCCESS(status)) {
979 hdp->hd_phys_address = addr.minimum; 979 hdp->hd_phys_address = addr.address.minimum;
980 hdp->hd_address = ioremap(addr.minimum, addr.address_length); 980 hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
981 981
982 if (hpet_is_known(hdp)) { 982 if (hpet_is_known(hdp)) {
983 iounmap(hdp->hd_address); 983 iounmap(hdp->hd_address);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 04645c09fe5e..9cd6968e2f92 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f)
569 __u32 c = f->pool[2], d = f->pool[3]; 569 __u32 c = f->pool[2], d = f->pool[3];
570 570
571 a += b; c += d; 571 a += b; c += d;
572 b = rol32(a, 6); d = rol32(c, 27); 572 b = rol32(b, 6); d = rol32(d, 27);
573 d ^= a; b ^= c; 573 d ^= a; b ^= c;
574 574
575 a += b; c += d; 575 a += b; c += d;
576 b = rol32(a, 16); d = rol32(c, 14); 576 b = rol32(b, 16); d = rol32(d, 14);
577 d ^= a; b ^= c; 577 d ^= a; b ^= c;
578 578
579 a += b; c += d; 579 a += b; c += d;
580 b = rol32(a, 6); d = rol32(c, 27); 580 b = rol32(b, 6); d = rol32(d, 27);
581 d ^= a; b ^= c; 581 d ^= a; b ^= c;
582 582
583 a += b; c += d; 583 a += b; c += d;
584 b = rol32(a, 16); d = rol32(c, 14); 584 b = rol32(b, 16); d = rol32(d, 14);
585 d ^= a; b ^= c; 585 d ^= a; b ^= c;
586 586
587 f->pool[0] = a; f->pool[1] = b; 587 f->pool[0] = a; f->pool[1] = b;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 3f44f292d066..91f86131bb7a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -13,6 +13,7 @@ config COMMON_CLK
13 bool 13 bool
14 select HAVE_CLK_PREPARE 14 select HAVE_CLK_PREPARE
15 select CLKDEV_LOOKUP 15 select CLKDEV_LOOKUP
16 select SRCU
16 ---help--- 17 ---help---
17 The common clock framework is a single definition of struct 18 The common clock framework is a single definition of struct
18 clk, useful across many platforms, as well as an 19 clk, useful across many platforms, as well as an
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 29b2ef5a68b9..a171fef2c2b6 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -2,6 +2,7 @@ menu "CPU Frequency scaling"
2 2
3config CPU_FREQ 3config CPU_FREQ
4 bool "CPU Frequency scaling" 4 bool "CPU Frequency scaling"
5 select SRCU
5 help 6 help
6 CPU Frequency scaling allows you to change the clock speed of 7 CPU Frequency scaling allows you to change the clock speed of
7 CPUs on the fly. This is a nice method to save power, because 8 CPUs on the fly. This is a nice method to save power, because
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 89ae88f91895..c59bdcb83217 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -57,6 +57,16 @@ config X86_ACPI_CPUFREQ_CPB
57 By enabling this option the acpi_cpufreq driver provides the old 57 By enabling this option the acpi_cpufreq driver provides the old
58 entry in addition to the new boost ones, for compatibility reasons. 58 entry in addition to the new boost ones, for compatibility reasons.
59 59
60config X86_SFI_CPUFREQ
61 tristate "SFI Performance-States driver"
62 depends on X86_INTEL_MID && SFI
63 help
64 This adds a CPUFreq driver for some Silvermont based Intel Atom
65 architectures like Z34xx and Z35xx which enumerate processor
66 performance states through SFI.
67
68 If in doubt, say N.
69
60config ELAN_CPUFREQ 70config ELAN_CPUFREQ
61 tristate "AMD Elan SC400 and SC410" 71 tristate "AMD Elan SC400 and SC410"
62 depends on MELAN 72 depends on MELAN
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index b3ca7b0b2c33..8b4220ac888b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
41obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 41obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
42obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o 42obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
43obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o 43obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
44obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
44 45
45################################################################################## 46##################################################################################
46# ARM SoC drivers 47# ARM SoC drivers
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index fde97d6e31d6..bab67db54b7e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -320,8 +320,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
320{ 320{
321 struct private_data *priv = policy->driver_data; 321 struct private_data *priv = policy->driver_data;
322 322
323 if (priv->cdev) 323 cpufreq_cooling_unregister(priv->cdev);
324 cpufreq_cooling_unregister(priv->cdev);
325 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 324 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
326 of_free_opp_table(priv->cpu_dev); 325 of_free_opp_table(priv->cpu_dev);
327 clk_put(policy->clk); 326 clk_put(policy->clk);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 46bed4f81cde..28e59a48b35f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -27,9 +27,21 @@
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/suspend.h> 29#include <linux/suspend.h>
30#include <linux/syscore_ops.h>
30#include <linux/tick.h> 31#include <linux/tick.h>
31#include <trace/events/power.h> 32#include <trace/events/power.h>
32 33
34/* Macros to iterate over lists */
35/* Iterate over online CPUs policies */
36static LIST_HEAD(cpufreq_policy_list);
37#define for_each_policy(__policy) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
39
40/* Iterate over governors */
41static LIST_HEAD(cpufreq_governor_list);
42#define for_each_governor(__governor) \
43 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
44
33/** 45/**
34 * The "cpufreq driver" - the arch- or hardware-dependent low 46 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock 47 * level driver of CPUFreq support, and its spinlock. This lock
@@ -40,7 +52,6 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); 52static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41static DEFINE_RWLOCK(cpufreq_driver_lock); 53static DEFINE_RWLOCK(cpufreq_driver_lock);
42DEFINE_MUTEX(cpufreq_governor_lock); 54DEFINE_MUTEX(cpufreq_governor_lock);
43static LIST_HEAD(cpufreq_policy_list);
44 55
45/* This one keeps track of the previously set governor of a removed CPU */ 56/* This one keeps track of the previously set governor of a removed CPU */
46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 57static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
@@ -62,7 +73,7 @@ static DECLARE_RWSEM(cpufreq_rwsem);
62/* internal prototypes */ 73/* internal prototypes */
63static int __cpufreq_governor(struct cpufreq_policy *policy, 74static int __cpufreq_governor(struct cpufreq_policy *policy,
64 unsigned int event); 75 unsigned int event);
65static unsigned int __cpufreq_get(unsigned int cpu); 76static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
66static void handle_update(struct work_struct *work); 77static void handle_update(struct work_struct *work);
67 78
68/** 79/**
@@ -93,7 +104,6 @@ void disable_cpufreq(void)
93{ 104{
94 off = 1; 105 off = 1;
95} 106}
96static LIST_HEAD(cpufreq_governor_list);
97static DEFINE_MUTEX(cpufreq_governor_mutex); 107static DEFINE_MUTEX(cpufreq_governor_mutex);
98 108
99bool have_governor_per_policy(void) 109bool have_governor_per_policy(void)
@@ -202,7 +212,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
202 struct cpufreq_policy *policy = NULL; 212 struct cpufreq_policy *policy = NULL;
203 unsigned long flags; 213 unsigned long flags;
204 214
205 if (cpufreq_disabled() || (cpu >= nr_cpu_ids)) 215 if (cpu >= nr_cpu_ids)
206 return NULL; 216 return NULL;
207 217
208 if (!down_read_trylock(&cpufreq_rwsem)) 218 if (!down_read_trylock(&cpufreq_rwsem))
@@ -229,9 +239,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
229 239
230void cpufreq_cpu_put(struct cpufreq_policy *policy) 240void cpufreq_cpu_put(struct cpufreq_policy *policy)
231{ 241{
232 if (cpufreq_disabled())
233 return;
234
235 kobject_put(&policy->kobj); 242 kobject_put(&policy->kobj);
236 up_read(&cpufreq_rwsem); 243 up_read(&cpufreq_rwsem);
237} 244}
@@ -249,12 +256,12 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
249 * systems as each CPU might be scaled differently. So, use the arch 256 * systems as each CPU might be scaled differently. So, use the arch
250 * per-CPU loops_per_jiffy value wherever possible. 257 * per-CPU loops_per_jiffy value wherever possible.
251 */ 258 */
252#ifndef CONFIG_SMP
253static unsigned long l_p_j_ref;
254static unsigned int l_p_j_ref_freq;
255
256static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 259static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
257{ 260{
261#ifndef CONFIG_SMP
262 static unsigned long l_p_j_ref;
263 static unsigned int l_p_j_ref_freq;
264
258 if (ci->flags & CPUFREQ_CONST_LOOPS) 265 if (ci->flags & CPUFREQ_CONST_LOOPS)
259 return; 266 return;
260 267
@@ -270,13 +277,8 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
270 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", 277 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
271 loops_per_jiffy, ci->new); 278 loops_per_jiffy, ci->new);
272 } 279 }
273}
274#else
275static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
276{
277 return;
278}
279#endif 280#endif
281}
280 282
281static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 283static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
282 struct cpufreq_freqs *freqs, unsigned int state) 284 struct cpufreq_freqs *freqs, unsigned int state)
@@ -432,11 +434,11 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
432} 434}
433define_one_global_rw(boost); 435define_one_global_rw(boost);
434 436
435static struct cpufreq_governor *__find_governor(const char *str_governor) 437static struct cpufreq_governor *find_governor(const char *str_governor)
436{ 438{
437 struct cpufreq_governor *t; 439 struct cpufreq_governor *t;
438 440
439 list_for_each_entry(t, &cpufreq_governor_list, governor_list) 441 for_each_governor(t)
440 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 442 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
441 return t; 443 return t;
442 444
@@ -463,12 +465,12 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
463 *policy = CPUFREQ_POLICY_POWERSAVE; 465 *policy = CPUFREQ_POLICY_POWERSAVE;
464 err = 0; 466 err = 0;
465 } 467 }
466 } else if (has_target()) { 468 } else {
467 struct cpufreq_governor *t; 469 struct cpufreq_governor *t;
468 470
469 mutex_lock(&cpufreq_governor_mutex); 471 mutex_lock(&cpufreq_governor_mutex);
470 472
471 t = __find_governor(str_governor); 473 t = find_governor(str_governor);
472 474
473 if (t == NULL) { 475 if (t == NULL) {
474 int ret; 476 int ret;
@@ -478,7 +480,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
478 mutex_lock(&cpufreq_governor_mutex); 480 mutex_lock(&cpufreq_governor_mutex);
479 481
480 if (ret == 0) 482 if (ret == 0)
481 t = __find_governor(str_governor); 483 t = find_governor(str_governor);
482 } 484 }
483 485
484 if (t != NULL) { 486 if (t != NULL) {
@@ -513,8 +515,7 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
513show_one(scaling_min_freq, min); 515show_one(scaling_min_freq, min);
514show_one(scaling_max_freq, max); 516show_one(scaling_max_freq, max);
515 517
516static ssize_t show_scaling_cur_freq( 518static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
517 struct cpufreq_policy *policy, char *buf)
518{ 519{
519 ssize_t ret; 520 ssize_t ret;
520 521
@@ -563,7 +564,7 @@ store_one(scaling_max_freq, max);
563static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 564static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
564 char *buf) 565 char *buf)
565{ 566{
566 unsigned int cur_freq = __cpufreq_get(policy->cpu); 567 unsigned int cur_freq = __cpufreq_get(policy);
567 if (!cur_freq) 568 if (!cur_freq)
568 return sprintf(buf, "<unknown>"); 569 return sprintf(buf, "<unknown>");
569 return sprintf(buf, "%u\n", cur_freq); 570 return sprintf(buf, "%u\n", cur_freq);
@@ -639,7 +640,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
639 goto out; 640 goto out;
640 } 641 }
641 642
642 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 643 for_each_governor(t) {
643 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 644 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
644 - (CPUFREQ_NAME_LEN + 2))) 645 - (CPUFREQ_NAME_LEN + 2)))
645 goto out; 646 goto out;
@@ -902,7 +903,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
902 903
903 /* set up files for this cpu device */ 904 /* set up files for this cpu device */
904 drv_attr = cpufreq_driver->attr; 905 drv_attr = cpufreq_driver->attr;
905 while ((drv_attr) && (*drv_attr)) { 906 while (drv_attr && *drv_attr) {
906 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 907 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
907 if (ret) 908 if (ret)
908 return ret; 909 return ret;
@@ -936,7 +937,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
936 memcpy(&new_policy, policy, sizeof(*policy)); 937 memcpy(&new_policy, policy, sizeof(*policy));
937 938
938 /* Update governor of new_policy to the governor used before hotplug */ 939 /* Update governor of new_policy to the governor used before hotplug */
939 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); 940 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
940 if (gov) 941 if (gov)
941 pr_debug("Restoring governor %s for cpu %d\n", 942 pr_debug("Restoring governor %s for cpu %d\n",
942 policy->governor->name, policy->cpu); 943 policy->governor->name, policy->cpu);
@@ -958,7 +959,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
958 } 959 }
959} 960}
960 961
961#ifdef CONFIG_HOTPLUG_CPU
962static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, 962static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
963 unsigned int cpu, struct device *dev) 963 unsigned int cpu, struct device *dev)
964{ 964{
@@ -996,7 +996,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
996 996
997 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 997 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
998} 998}
999#endif
1000 999
1001static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 1000static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1002{ 1001{
@@ -1033,6 +1032,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
1033 init_rwsem(&policy->rwsem); 1032 init_rwsem(&policy->rwsem);
1034 spin_lock_init(&policy->transition_lock); 1033 spin_lock_init(&policy->transition_lock);
1035 init_waitqueue_head(&policy->transition_wait); 1034 init_waitqueue_head(&policy->transition_wait);
1035 init_completion(&policy->kobj_unregister);
1036 INIT_WORK(&policy->update, handle_update);
1036 1037
1037 return policy; 1038 return policy;
1038 1039
@@ -1091,15 +1092,9 @@ static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1091 } 1092 }
1092 1093
1093 down_write(&policy->rwsem); 1094 down_write(&policy->rwsem);
1094
1095 policy->last_cpu = policy->cpu;
1096 policy->cpu = cpu; 1095 policy->cpu = cpu;
1097
1098 up_write(&policy->rwsem); 1096 up_write(&policy->rwsem);
1099 1097
1100 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1101 CPUFREQ_UPDATE_POLICY_CPU, policy);
1102
1103 return 0; 1098 return 0;
1104} 1099}
1105 1100
@@ -1110,41 +1105,32 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1110 struct cpufreq_policy *policy; 1105 struct cpufreq_policy *policy;
1111 unsigned long flags; 1106 unsigned long flags;
1112 bool recover_policy = cpufreq_suspended; 1107 bool recover_policy = cpufreq_suspended;
1113#ifdef CONFIG_HOTPLUG_CPU
1114 struct cpufreq_policy *tpolicy;
1115#endif
1116 1108
1117 if (cpu_is_offline(cpu)) 1109 if (cpu_is_offline(cpu))
1118 return 0; 1110 return 0;
1119 1111
1120 pr_debug("adding CPU %u\n", cpu); 1112 pr_debug("adding CPU %u\n", cpu);
1121 1113
1122#ifdef CONFIG_SMP
1123 /* check whether a different CPU already registered this 1114 /* check whether a different CPU already registered this
1124 * CPU because it is in the same boat. */ 1115 * CPU because it is in the same boat. */
1125 policy = cpufreq_cpu_get(cpu); 1116 policy = cpufreq_cpu_get_raw(cpu);
1126 if (unlikely(policy)) { 1117 if (unlikely(policy))
1127 cpufreq_cpu_put(policy);
1128 return 0; 1118 return 0;
1129 }
1130#endif
1131 1119
1132 if (!down_read_trylock(&cpufreq_rwsem)) 1120 if (!down_read_trylock(&cpufreq_rwsem))
1133 return 0; 1121 return 0;
1134 1122
1135#ifdef CONFIG_HOTPLUG_CPU
1136 /* Check if this cpu was hot-unplugged earlier and has siblings */ 1123 /* Check if this cpu was hot-unplugged earlier and has siblings */
1137 read_lock_irqsave(&cpufreq_driver_lock, flags); 1124 read_lock_irqsave(&cpufreq_driver_lock, flags);
1138 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { 1125 for_each_policy(policy) {
1139 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { 1126 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
1140 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1127 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1141 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); 1128 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1142 up_read(&cpufreq_rwsem); 1129 up_read(&cpufreq_rwsem);
1143 return ret; 1130 return ret;
1144 } 1131 }
1145 } 1132 }
1146 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1133 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1147#endif
1148 1134
1149 /* 1135 /*
1150 * Restore the saved policy when doing light-weight init and fall back 1136 * Restore the saved policy when doing light-weight init and fall back
@@ -1171,9 +1157,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1171 1157
1172 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1158 cpumask_copy(policy->cpus, cpumask_of(cpu));
1173 1159
1174 init_completion(&policy->kobj_unregister);
1175 INIT_WORK(&policy->update, handle_update);
1176
1177 /* call driver. From then on the cpufreq must be able 1160 /* call driver. From then on the cpufreq must be able
1178 * to accept all calls to ->verify and ->setpolicy for this CPU 1161 * to accept all calls to ->verify and ->setpolicy for this CPU
1179 */ 1162 */
@@ -1371,11 +1354,10 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1371 pr_err("%s: Failed to stop governor\n", __func__); 1354 pr_err("%s: Failed to stop governor\n", __func__);
1372 return ret; 1355 return ret;
1373 } 1356 }
1374 }
1375 1357
1376 if (!cpufreq_driver->setpolicy)
1377 strncpy(per_cpu(cpufreq_cpu_governor, cpu), 1358 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1378 policy->governor->name, CPUFREQ_NAME_LEN); 1359 policy->governor->name, CPUFREQ_NAME_LEN);
1360 }
1379 1361
1380 down_read(&policy->rwsem); 1362 down_read(&policy->rwsem);
1381 cpus = cpumask_weight(policy->cpus); 1363 cpus = cpumask_weight(policy->cpus);
@@ -1416,9 +1398,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1416 unsigned long flags; 1398 unsigned long flags;
1417 struct cpufreq_policy *policy; 1399 struct cpufreq_policy *policy;
1418 1400
1419 read_lock_irqsave(&cpufreq_driver_lock, flags); 1401 write_lock_irqsave(&cpufreq_driver_lock, flags);
1420 policy = per_cpu(cpufreq_cpu_data, cpu); 1402 policy = per_cpu(cpufreq_cpu_data, cpu);
1421 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1403 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1404 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1422 1405
1423 if (!policy) { 1406 if (!policy) {
1424 pr_debug("%s: No cpu_data found\n", __func__); 1407 pr_debug("%s: No cpu_data found\n", __func__);
@@ -1473,7 +1456,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1473 } 1456 }
1474 } 1457 }
1475 1458
1476 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1477 return 0; 1459 return 0;
1478} 1460}
1479 1461
@@ -1510,30 +1492,23 @@ static void handle_update(struct work_struct *work)
1510/** 1492/**
1511 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1493 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1512 * in deep trouble. 1494 * in deep trouble.
1513 * @cpu: cpu number 1495 * @policy: policy managing CPUs
1514 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1515 * @new_freq: CPU frequency the CPU actually runs at 1496 * @new_freq: CPU frequency the CPU actually runs at
1516 * 1497 *
1517 * We adjust to current frequency first, and need to clean up later. 1498 * We adjust to current frequency first, and need to clean up later.
1518 * So either call to cpufreq_update_policy() or schedule handle_update()). 1499 * So either call to cpufreq_update_policy() or schedule handle_update()).
1519 */ 1500 */
1520static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, 1501static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1521 unsigned int new_freq) 1502 unsigned int new_freq)
1522{ 1503{
1523 struct cpufreq_policy *policy;
1524 struct cpufreq_freqs freqs; 1504 struct cpufreq_freqs freqs;
1525 unsigned long flags;
1526 1505
1527 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", 1506 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1528 old_freq, new_freq); 1507 policy->cur, new_freq);
1529 1508
1530 freqs.old = old_freq; 1509 freqs.old = policy->cur;
1531 freqs.new = new_freq; 1510 freqs.new = new_freq;
1532 1511
1533 read_lock_irqsave(&cpufreq_driver_lock, flags);
1534 policy = per_cpu(cpufreq_cpu_data, cpu);
1535 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1536
1537 cpufreq_freq_transition_begin(policy, &freqs); 1512 cpufreq_freq_transition_begin(policy, &freqs);
1538 cpufreq_freq_transition_end(policy, &freqs, 0); 1513 cpufreq_freq_transition_end(policy, &freqs, 0);
1539} 1514}
@@ -1583,22 +1558,21 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu)
1583} 1558}
1584EXPORT_SYMBOL(cpufreq_quick_get_max); 1559EXPORT_SYMBOL(cpufreq_quick_get_max);
1585 1560
1586static unsigned int __cpufreq_get(unsigned int cpu) 1561static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1587{ 1562{
1588 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1589 unsigned int ret_freq = 0; 1563 unsigned int ret_freq = 0;
1590 1564
1591 if (!cpufreq_driver->get) 1565 if (!cpufreq_driver->get)
1592 return ret_freq; 1566 return ret_freq;
1593 1567
1594 ret_freq = cpufreq_driver->get(cpu); 1568 ret_freq = cpufreq_driver->get(policy->cpu);
1595 1569
1596 if (ret_freq && policy->cur && 1570 if (ret_freq && policy->cur &&
1597 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1571 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1598 /* verify no discrepancy between actual and 1572 /* verify no discrepancy between actual and
1599 saved value exists */ 1573 saved value exists */
1600 if (unlikely(ret_freq != policy->cur)) { 1574 if (unlikely(ret_freq != policy->cur)) {
1601 cpufreq_out_of_sync(cpu, policy->cur, ret_freq); 1575 cpufreq_out_of_sync(policy, ret_freq);
1602 schedule_work(&policy->update); 1576 schedule_work(&policy->update);
1603 } 1577 }
1604 } 1578 }
@@ -1619,7 +1593,7 @@ unsigned int cpufreq_get(unsigned int cpu)
1619 1593
1620 if (policy) { 1594 if (policy) {
1621 down_read(&policy->rwsem); 1595 down_read(&policy->rwsem);
1622 ret_freq = __cpufreq_get(cpu); 1596 ret_freq = __cpufreq_get(policy);
1623 up_read(&policy->rwsem); 1597 up_read(&policy->rwsem);
1624 1598
1625 cpufreq_cpu_put(policy); 1599 cpufreq_cpu_put(policy);
@@ -1682,7 +1656,7 @@ void cpufreq_suspend(void)
1682 1656
1683 pr_debug("%s: Suspending Governors\n", __func__); 1657 pr_debug("%s: Suspending Governors\n", __func__);
1684 1658
1685 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { 1659 for_each_policy(policy) {
1686 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) 1660 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1687 pr_err("%s: Failed to stop governor for policy: %p\n", 1661 pr_err("%s: Failed to stop governor for policy: %p\n",
1688 __func__, policy); 1662 __func__, policy);
@@ -1716,7 +1690,7 @@ void cpufreq_resume(void)
1716 1690
1717 pr_debug("%s: Resuming Governors\n", __func__); 1691 pr_debug("%s: Resuming Governors\n", __func__);
1718 1692
1719 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { 1693 for_each_policy(policy) {
1720 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) 1694 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1721 pr_err("%s: Failed to resume driver: %p\n", __func__, 1695 pr_err("%s: Failed to resume driver: %p\n", __func__,
1722 policy); 1696 policy);
@@ -2006,10 +1980,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
2006} 1980}
2007EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1981EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2008 1982
2009/*
2010 * when "event" is CPUFREQ_GOV_LIMITS
2011 */
2012
2013static int __cpufreq_governor(struct cpufreq_policy *policy, 1983static int __cpufreq_governor(struct cpufreq_policy *policy,
2014 unsigned int event) 1984 unsigned int event)
2015{ 1985{
@@ -2107,7 +2077,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
2107 2077
2108 governor->initialized = 0; 2078 governor->initialized = 0;
2109 err = -EBUSY; 2079 err = -EBUSY;
2110 if (__find_governor(governor->name) == NULL) { 2080 if (!find_governor(governor->name)) {
2111 err = 0; 2081 err = 0;
2112 list_add(&governor->governor_list, &cpufreq_governor_list); 2082 list_add(&governor->governor_list, &cpufreq_governor_list);
2113 } 2083 }
@@ -2307,8 +2277,7 @@ int cpufreq_update_policy(unsigned int cpu)
2307 policy->cur = new_policy.cur; 2277 policy->cur = new_policy.cur;
2308 } else { 2278 } else {
2309 if (policy->cur != new_policy.cur && has_target()) 2279 if (policy->cur != new_policy.cur && has_target())
2310 cpufreq_out_of_sync(cpu, policy->cur, 2280 cpufreq_out_of_sync(policy, new_policy.cur);
2311 new_policy.cur);
2312 } 2281 }
2313 } 2282 }
2314 2283
@@ -2364,7 +2333,7 @@ static int cpufreq_boost_set_sw(int state)
2364 struct cpufreq_policy *policy; 2333 struct cpufreq_policy *policy;
2365 int ret = -EINVAL; 2334 int ret = -EINVAL;
2366 2335
2367 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { 2336 for_each_policy(policy) {
2368 freq_table = cpufreq_frequency_get_table(policy->cpu); 2337 freq_table = cpufreq_frequency_get_table(policy->cpu);
2369 if (freq_table) { 2338 if (freq_table) {
2370 ret = cpufreq_frequency_table_cpuinfo(policy, 2339 ret = cpufreq_frequency_table_cpuinfo(policy,
@@ -2454,9 +2423,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2454 2423
2455 pr_debug("trying to register driver %s\n", driver_data->name); 2424 pr_debug("trying to register driver %s\n", driver_data->name);
2456 2425
2457 if (driver_data->setpolicy)
2458 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2459
2460 write_lock_irqsave(&cpufreq_driver_lock, flags); 2426 write_lock_irqsave(&cpufreq_driver_lock, flags);
2461 if (cpufreq_driver) { 2427 if (cpufreq_driver) {
2462 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2428 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2465,6 +2431,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2465 cpufreq_driver = driver_data; 2431 cpufreq_driver = driver_data;
2466 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2432 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2467 2433
2434 if (driver_data->setpolicy)
2435 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2436
2468 if (cpufreq_boost_supported()) { 2437 if (cpufreq_boost_supported()) {
2469 /* 2438 /*
2470 * Check if driver provides function to enable boost - 2439 * Check if driver provides function to enable boost -
@@ -2485,23 +2454,12 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2485 if (ret) 2454 if (ret)
2486 goto err_boost_unreg; 2455 goto err_boost_unreg;
2487 2456
2488 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { 2457 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2489 int i; 2458 list_empty(&cpufreq_policy_list)) {
2490 ret = -ENODEV;
2491
2492 /* check for at least one working CPU */
2493 for (i = 0; i < nr_cpu_ids; i++)
2494 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2495 ret = 0;
2496 break;
2497 }
2498
2499 /* if all ->init() calls failed, unregister */ 2459 /* if all ->init() calls failed, unregister */
2500 if (ret) { 2460 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2501 pr_debug("no CPU initialized for driver %s\n", 2461 driver_data->name);
2502 driver_data->name); 2462 goto err_if_unreg;
2503 goto err_if_unreg;
2504 }
2505 } 2463 }
2506 2464
2507 register_hotcpu_notifier(&cpufreq_cpu_notifier); 2465 register_hotcpu_notifier(&cpufreq_cpu_notifier);
@@ -2556,6 +2514,14 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2556} 2514}
2557EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 2515EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2558 2516
2517/*
2518 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2519 * or mutexes when secondary CPUs are halted.
2520 */
2521static struct syscore_ops cpufreq_syscore_ops = {
2522 .shutdown = cpufreq_suspend,
2523};
2524
2559static int __init cpufreq_core_init(void) 2525static int __init cpufreq_core_init(void)
2560{ 2526{
2561 if (cpufreq_disabled()) 2527 if (cpufreq_disabled())
@@ -2564,6 +2530,8 @@ static int __init cpufreq_core_init(void)
2564 cpufreq_global_kobject = kobject_create(); 2530 cpufreq_global_kobject = kobject_create();
2565 BUG_ON(!cpufreq_global_kobject); 2531 BUG_ON(!cpufreq_global_kobject);
2566 2532
2533 register_syscore_ops(&cpufreq_syscore_ops);
2534
2567 return 0; 2535 return 0;
2568} 2536}
2569core_initcall(cpufreq_core_init); 2537core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 0cd9b4dcef99..5e370a30a964 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -18,7 +18,6 @@
18static spinlock_t cpufreq_stats_lock; 18static spinlock_t cpufreq_stats_lock;
19 19
20struct cpufreq_stats { 20struct cpufreq_stats {
21 unsigned int cpu;
22 unsigned int total_trans; 21 unsigned int total_trans;
23 unsigned long long last_time; 22 unsigned long long last_time;
24 unsigned int max_state; 23 unsigned int max_state;
@@ -31,50 +30,33 @@ struct cpufreq_stats {
31#endif 30#endif
32}; 31};
33 32
34static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table); 33static int cpufreq_stats_update(struct cpufreq_stats *stats)
35
36struct cpufreq_stats_attribute {
37 struct attribute attr;
38 ssize_t(*show) (struct cpufreq_stats *, char *);
39};
40
41static int cpufreq_stats_update(unsigned int cpu)
42{ 34{
43 struct cpufreq_stats *stat; 35 unsigned long long cur_time = get_jiffies_64();
44 unsigned long long cur_time;
45 36
46 cur_time = get_jiffies_64();
47 spin_lock(&cpufreq_stats_lock); 37 spin_lock(&cpufreq_stats_lock);
48 stat = per_cpu(cpufreq_stats_table, cpu); 38 stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
49 if (stat->time_in_state) 39 stats->last_time = cur_time;
50 stat->time_in_state[stat->last_index] +=
51 cur_time - stat->last_time;
52 stat->last_time = cur_time;
53 spin_unlock(&cpufreq_stats_lock); 40 spin_unlock(&cpufreq_stats_lock);
54 return 0; 41 return 0;
55} 42}
56 43
57static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) 44static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
58{ 45{
59 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 46 return sprintf(buf, "%d\n", policy->stats->total_trans);
60 if (!stat)
61 return 0;
62 return sprintf(buf, "%d\n",
63 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
64} 47}
65 48
66static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) 49static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
67{ 50{
51 struct cpufreq_stats *stats = policy->stats;
68 ssize_t len = 0; 52 ssize_t len = 0;
69 int i; 53 int i;
70 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 54
71 if (!stat) 55 cpufreq_stats_update(stats);
72 return 0; 56 for (i = 0; i < stats->state_num; i++) {
73 cpufreq_stats_update(stat->cpu); 57 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
74 for (i = 0; i < stat->state_num; i++) {
75 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
76 (unsigned long long) 58 (unsigned long long)
77 jiffies_64_to_clock_t(stat->time_in_state[i])); 59 jiffies_64_to_clock_t(stats->time_in_state[i]));
78 } 60 }
79 return len; 61 return len;
80} 62}
@@ -82,38 +64,35 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
82#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 64#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
83static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) 65static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
84{ 66{
67 struct cpufreq_stats *stats = policy->stats;
85 ssize_t len = 0; 68 ssize_t len = 0;
86 int i, j; 69 int i, j;
87 70
88 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
89 if (!stat)
90 return 0;
91 cpufreq_stats_update(stat->cpu);
92 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); 71 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
93 len += snprintf(buf + len, PAGE_SIZE - len, " : "); 72 len += snprintf(buf + len, PAGE_SIZE - len, " : ");
94 for (i = 0; i < stat->state_num; i++) { 73 for (i = 0; i < stats->state_num; i++) {
95 if (len >= PAGE_SIZE) 74 if (len >= PAGE_SIZE)
96 break; 75 break;
97 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 76 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
98 stat->freq_table[i]); 77 stats->freq_table[i]);
99 } 78 }
100 if (len >= PAGE_SIZE) 79 if (len >= PAGE_SIZE)
101 return PAGE_SIZE; 80 return PAGE_SIZE;
102 81
103 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 82 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
104 83
105 for (i = 0; i < stat->state_num; i++) { 84 for (i = 0; i < stats->state_num; i++) {
106 if (len >= PAGE_SIZE) 85 if (len >= PAGE_SIZE)
107 break; 86 break;
108 87
109 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ", 88 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
110 stat->freq_table[i]); 89 stats->freq_table[i]);
111 90
112 for (j = 0; j < stat->state_num; j++) { 91 for (j = 0; j < stats->state_num; j++) {
113 if (len >= PAGE_SIZE) 92 if (len >= PAGE_SIZE)
114 break; 93 break;
115 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 94 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
116 stat->trans_table[i*stat->max_state+j]); 95 stats->trans_table[i*stats->max_state+j]);
117 } 96 }
118 if (len >= PAGE_SIZE) 97 if (len >= PAGE_SIZE)
119 break; 98 break;
@@ -142,28 +121,29 @@ static struct attribute_group stats_attr_group = {
142 .name = "stats" 121 .name = "stats"
143}; 122};
144 123
145static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) 124static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
146{ 125{
147 int index; 126 int index;
148 for (index = 0; index < stat->max_state; index++) 127 for (index = 0; index < stats->max_state; index++)
149 if (stat->freq_table[index] == freq) 128 if (stats->freq_table[index] == freq)
150 return index; 129 return index;
151 return -1; 130 return -1;
152} 131}
153 132
154static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) 133static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
155{ 134{
156 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 135 struct cpufreq_stats *stats = policy->stats;
157 136
158 if (!stat) 137 /* Already freed */
138 if (!stats)
159 return; 139 return;
160 140
161 pr_debug("%s: Free stat table\n", __func__); 141 pr_debug("%s: Free stats table\n", __func__);
162 142
163 sysfs_remove_group(&policy->kobj, &stats_attr_group); 143 sysfs_remove_group(&policy->kobj, &stats_attr_group);
164 kfree(stat->time_in_state); 144 kfree(stats->time_in_state);
165 kfree(stat); 145 kfree(stats);
166 per_cpu(cpufreq_stats_table, policy->cpu) = NULL; 146 policy->stats = NULL;
167} 147}
168 148
169static void cpufreq_stats_free_table(unsigned int cpu) 149static void cpufreq_stats_free_table(unsigned int cpu)
@@ -174,37 +154,33 @@ static void cpufreq_stats_free_table(unsigned int cpu)
174 if (!policy) 154 if (!policy)
175 return; 155 return;
176 156
177 if (cpufreq_frequency_get_table(policy->cpu)) 157 __cpufreq_stats_free_table(policy);
178 __cpufreq_stats_free_table(policy);
179 158
180 cpufreq_cpu_put(policy); 159 cpufreq_cpu_put(policy);
181} 160}
182 161
183static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) 162static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
184{ 163{
185 unsigned int i, count = 0, ret = 0; 164 unsigned int i = 0, count = 0, ret = -ENOMEM;
186 struct cpufreq_stats *stat; 165 struct cpufreq_stats *stats;
187 unsigned int alloc_size; 166 unsigned int alloc_size;
188 unsigned int cpu = policy->cpu; 167 unsigned int cpu = policy->cpu;
189 struct cpufreq_frequency_table *pos, *table; 168 struct cpufreq_frequency_table *pos, *table;
190 169
170 /* We need cpufreq table for creating stats table */
191 table = cpufreq_frequency_get_table(cpu); 171 table = cpufreq_frequency_get_table(cpu);
192 if (unlikely(!table)) 172 if (unlikely(!table))
193 return 0; 173 return 0;
194 174
195 if (per_cpu(cpufreq_stats_table, cpu)) 175 /* stats already initialized */
196 return -EBUSY; 176 if (policy->stats)
197 stat = kzalloc(sizeof(*stat), GFP_KERNEL); 177 return -EEXIST;
198 if ((stat) == NULL)
199 return -ENOMEM;
200
201 ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
202 if (ret)
203 goto error_out;
204 178
205 stat->cpu = cpu; 179 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
206 per_cpu(cpufreq_stats_table, cpu) = stat; 180 if (!stats)
181 return -ENOMEM;
207 182
183 /* Find total allocation size */
208 cpufreq_for_each_valid_entry(pos, table) 184 cpufreq_for_each_valid_entry(pos, table)
209 count++; 185 count++;
210 186
@@ -213,32 +189,40 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
213#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 189#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
214 alloc_size += count * count * sizeof(int); 190 alloc_size += count * count * sizeof(int);
215#endif 191#endif
216 stat->max_state = count; 192
217 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); 193 /* Allocate memory for time_in_state/freq_table/trans_table in one go */
218 if (!stat->time_in_state) { 194 stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
219 ret = -ENOMEM; 195 if (!stats->time_in_state)
220 goto error_alloc; 196 goto free_stat;
221 } 197
222 stat->freq_table = (unsigned int *)(stat->time_in_state + count); 198 stats->freq_table = (unsigned int *)(stats->time_in_state + count);
223 199
224#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 200#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
225 stat->trans_table = stat->freq_table + count; 201 stats->trans_table = stats->freq_table + count;
226#endif 202#endif
227 i = 0; 203
204 stats->max_state = count;
205
206 /* Find valid-unique entries */
228 cpufreq_for_each_valid_entry(pos, table) 207 cpufreq_for_each_valid_entry(pos, table)
229 if (freq_table_get_index(stat, pos->frequency) == -1) 208 if (freq_table_get_index(stats, pos->frequency) == -1)
230 stat->freq_table[i++] = pos->frequency; 209 stats->freq_table[i++] = pos->frequency;
231 stat->state_num = i; 210
232 spin_lock(&cpufreq_stats_lock); 211 stats->state_num = i;
233 stat->last_time = get_jiffies_64(); 212 stats->last_time = get_jiffies_64();
234 stat->last_index = freq_table_get_index(stat, policy->cur); 213 stats->last_index = freq_table_get_index(stats, policy->cur);
235 spin_unlock(&cpufreq_stats_lock); 214
236 return 0; 215 policy->stats = stats;
237error_alloc: 216 ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
238 sysfs_remove_group(&policy->kobj, &stats_attr_group); 217 if (!ret)
239error_out: 218 return 0;
240 kfree(stat); 219
241 per_cpu(cpufreq_stats_table, cpu) = NULL; 220 /* We failed, release resources */
221 policy->stats = NULL;
222 kfree(stats->time_in_state);
223free_stat:
224 kfree(stats);
225
242 return ret; 226 return ret;
243} 227}
244 228
@@ -259,30 +243,12 @@ static void cpufreq_stats_create_table(unsigned int cpu)
259 cpufreq_cpu_put(policy); 243 cpufreq_cpu_put(policy);
260} 244}
261 245
262static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
263{
264 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
265 policy->last_cpu);
266
267 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
268 policy->cpu, policy->last_cpu);
269 per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
270 policy->last_cpu);
271 per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
272 stat->cpu = policy->cpu;
273}
274
275static int cpufreq_stat_notifier_policy(struct notifier_block *nb, 246static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
276 unsigned long val, void *data) 247 unsigned long val, void *data)
277{ 248{
278 int ret = 0; 249 int ret = 0;
279 struct cpufreq_policy *policy = data; 250 struct cpufreq_policy *policy = data;
280 251
281 if (val == CPUFREQ_UPDATE_POLICY_CPU) {
282 cpufreq_stats_update_policy_cpu(policy);
283 return 0;
284 }
285
286 if (val == CPUFREQ_CREATE_POLICY) 252 if (val == CPUFREQ_CREATE_POLICY)
287 ret = __cpufreq_stats_create_table(policy); 253 ret = __cpufreq_stats_create_table(policy);
288 else if (val == CPUFREQ_REMOVE_POLICY) 254 else if (val == CPUFREQ_REMOVE_POLICY)
@@ -295,35 +261,45 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
295 unsigned long val, void *data) 261 unsigned long val, void *data)
296{ 262{
297 struct cpufreq_freqs *freq = data; 263 struct cpufreq_freqs *freq = data;
298 struct cpufreq_stats *stat; 264 struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
265 struct cpufreq_stats *stats;
299 int old_index, new_index; 266 int old_index, new_index;
300 267
301 if (val != CPUFREQ_POSTCHANGE) 268 if (!policy) {
269 pr_err("%s: No policy found\n", __func__);
302 return 0; 270 return 0;
271 }
303 272
304 stat = per_cpu(cpufreq_stats_table, freq->cpu); 273 if (val != CPUFREQ_POSTCHANGE)
305 if (!stat) 274 goto put_policy;
306 return 0;
307 275
308 old_index = stat->last_index; 276 if (!policy->stats) {
309 new_index = freq_table_get_index(stat, freq->new); 277 pr_debug("%s: No stats found\n", __func__);
278 goto put_policy;
279 }
310 280
311 /* We can't do stat->time_in_state[-1]= .. */ 281 stats = policy->stats;
312 if (old_index == -1 || new_index == -1) 282
313 return 0; 283 old_index = stats->last_index;
284 new_index = freq_table_get_index(stats, freq->new);
314 285
315 cpufreq_stats_update(freq->cpu); 286 /* We can't do stats->time_in_state[-1]= .. */
287 if (old_index == -1 || new_index == -1)
288 goto put_policy;
316 289
317 if (old_index == new_index) 290 if (old_index == new_index)
318 return 0; 291 goto put_policy;
319 292
320 spin_lock(&cpufreq_stats_lock); 293 cpufreq_stats_update(stats);
321 stat->last_index = new_index; 294
295 stats->last_index = new_index;
322#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 296#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
323 stat->trans_table[old_index * stat->max_state + new_index]++; 297 stats->trans_table[old_index * stats->max_state + new_index]++;
324#endif 298#endif
325 stat->total_trans++; 299 stats->total_trans++;
326 spin_unlock(&cpufreq_stats_lock); 300
301put_policy:
302 cpufreq_cpu_put(policy);
327 return 0; 303 return 0;
328} 304}
329 305
@@ -374,8 +350,7 @@ static void __exit cpufreq_stats_exit(void)
374} 350}
375 351
376MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); 352MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
377MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats " 353MODULE_DESCRIPTION("Export cpufreq stats via sysfs");
378 "through sysfs filesystem");
379MODULE_LICENSE("GPL"); 354MODULE_LICENSE("GPL");
380 355
381module_init(cpufreq_stats_init); 356module_init(cpufreq_stats_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 742eefba12c2..872c5772c5d3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -148,6 +148,8 @@ struct perf_limits {
148 int32_t min_perf; 148 int32_t min_perf;
149 int max_policy_pct; 149 int max_policy_pct;
150 int max_sysfs_pct; 150 int max_sysfs_pct;
151 int min_policy_pct;
152 int min_sysfs_pct;
151}; 153};
152 154
153static struct perf_limits limits = { 155static struct perf_limits limits = {
@@ -159,6 +161,8 @@ static struct perf_limits limits = {
159 .min_perf = 0, 161 .min_perf = 0,
160 .max_policy_pct = 100, 162 .max_policy_pct = 100,
161 .max_sysfs_pct = 100, 163 .max_sysfs_pct = 100,
164 .min_policy_pct = 0,
165 .min_sysfs_pct = 0,
162}; 166};
163 167
164static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 168static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -338,6 +342,33 @@ static void __init intel_pstate_debug_expose_params(void)
338 return sprintf(buf, "%u\n", limits.object); \ 342 return sprintf(buf, "%u\n", limits.object); \
339 } 343 }
340 344
345static ssize_t show_turbo_pct(struct kobject *kobj,
346 struct attribute *attr, char *buf)
347{
348 struct cpudata *cpu;
349 int total, no_turbo, turbo_pct;
350 uint32_t turbo_fp;
351
352 cpu = all_cpu_data[0];
353
354 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
355 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
356 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
357 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
358 return sprintf(buf, "%u\n", turbo_pct);
359}
360
361static ssize_t show_num_pstates(struct kobject *kobj,
362 struct attribute *attr, char *buf)
363{
364 struct cpudata *cpu;
365 int total;
366
367 cpu = all_cpu_data[0];
368 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
369 return sprintf(buf, "%u\n", total);
370}
371
341static ssize_t show_no_turbo(struct kobject *kobj, 372static ssize_t show_no_turbo(struct kobject *kobj,
342 struct attribute *attr, char *buf) 373 struct attribute *attr, char *buf)
343{ 374{
@@ -404,7 +435,9 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
404 ret = sscanf(buf, "%u", &input); 435 ret = sscanf(buf, "%u", &input);
405 if (ret != 1) 436 if (ret != 1)
406 return -EINVAL; 437 return -EINVAL;
407 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 438
439 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
440 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
408 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 441 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
409 442
410 if (hwp_active) 443 if (hwp_active)
@@ -418,11 +451,15 @@ show_one(min_perf_pct, min_perf_pct);
418define_one_global_rw(no_turbo); 451define_one_global_rw(no_turbo);
419define_one_global_rw(max_perf_pct); 452define_one_global_rw(max_perf_pct);
420define_one_global_rw(min_perf_pct); 453define_one_global_rw(min_perf_pct);
454define_one_global_ro(turbo_pct);
455define_one_global_ro(num_pstates);
421 456
422static struct attribute *intel_pstate_attributes[] = { 457static struct attribute *intel_pstate_attributes[] = {
423 &no_turbo.attr, 458 &no_turbo.attr,
424 &max_perf_pct.attr, 459 &max_perf_pct.attr,
425 &min_perf_pct.attr, 460 &min_perf_pct.attr,
461 &turbo_pct.attr,
462 &num_pstates.attr,
426 NULL 463 NULL
427}; 464};
428 465
@@ -825,6 +862,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
825 ICPU(0x46, core_params), 862 ICPU(0x46, core_params),
826 ICPU(0x47, core_params), 863 ICPU(0x47, core_params),
827 ICPU(0x4c, byt_params), 864 ICPU(0x4c, byt_params),
865 ICPU(0x4e, core_params),
828 ICPU(0x4f, core_params), 866 ICPU(0x4f, core_params),
829 ICPU(0x56, core_params), 867 ICPU(0x56, core_params),
830 {} 868 {}
@@ -887,7 +925,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
887 if (!policy->cpuinfo.max_freq) 925 if (!policy->cpuinfo.max_freq)
888 return -ENODEV; 926 return -ENODEV;
889 927
890 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 928 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
929 policy->max >= policy->cpuinfo.max_freq) {
930 limits.min_policy_pct = 100;
891 limits.min_perf_pct = 100; 931 limits.min_perf_pct = 100;
892 limits.min_perf = int_tofp(1); 932 limits.min_perf = int_tofp(1);
893 limits.max_policy_pct = 100; 933 limits.max_policy_pct = 100;
@@ -897,8 +937,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
897 return 0; 937 return 0;
898 } 938 }
899 939
900 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 940 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
901 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 941 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
942 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
902 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 943 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
903 944
904 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 945 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
@@ -978,6 +1019,7 @@ static struct cpufreq_driver intel_pstate_driver = {
978 1019
979static int __initdata no_load; 1020static int __initdata no_load;
980static int __initdata no_hwp; 1021static int __initdata no_hwp;
1022static int __initdata hwp_only;
981static unsigned int force_load; 1023static unsigned int force_load;
982 1024
983static int intel_pstate_msrs_not_valid(void) 1025static int intel_pstate_msrs_not_valid(void)
@@ -1175,6 +1217,9 @@ static int __init intel_pstate_init(void)
1175 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp) 1217 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
1176 intel_pstate_hwp_enable(); 1218 intel_pstate_hwp_enable();
1177 1219
1220 if (!hwp_active && hwp_only)
1221 goto out;
1222
1178 rc = cpufreq_register_driver(&intel_pstate_driver); 1223 rc = cpufreq_register_driver(&intel_pstate_driver);
1179 if (rc) 1224 if (rc)
1180 goto out; 1225 goto out;
@@ -1209,6 +1254,8 @@ static int __init intel_pstate_setup(char *str)
1209 no_hwp = 1; 1254 no_hwp = 1;
1210 if (!strcmp(str, "force")) 1255 if (!strcmp(str, "force"))
1211 force_load = 1; 1256 force_load = 1;
1257 if (!strcmp(str, "hwp_only"))
1258 hwp_only = 1;
1212 return 0; 1259 return 0;
1213} 1260}
1214early_param("intel_pstate", intel_pstate_setup); 1261early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
index 25fbd6a1374f..f0913eee2f50 100644
--- a/drivers/cpufreq/ls1x-cpufreq.c
+++ b/drivers/cpufreq/ls1x-cpufreq.c
@@ -210,7 +210,6 @@ out:
210static struct platform_driver ls1x_cpufreq_platdrv = { 210static struct platform_driver ls1x_cpufreq_platdrv = {
211 .driver = { 211 .driver = {
212 .name = "ls1x-cpufreq", 212 .name = "ls1x-cpufreq",
213 .owner = THIS_MODULE,
214 }, 213 },
215 .probe = ls1x_cpufreq_probe, 214 .probe = ls1x_cpufreq_probe,
216 .remove = ls1x_cpufreq_remove, 215 .remove = ls1x_cpufreq_remove,
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
new file mode 100644
index 000000000000..ffa3389e535b
--- /dev/null
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -0,0 +1,136 @@
1/*
2 * SFI Performance States Driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * Author: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
14 * Author: Srinidhi Kasagar <srinidhi.kasagar@intel.com>
15 */
16
17#include <linux/cpufreq.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/sfi.h>
22#include <linux/slab.h>
23#include <linux/smp.h>
24
25#include <asm/msr.h>
26
27struct cpufreq_frequency_table *freq_table;
28static struct sfi_freq_table_entry *sfi_cpufreq_array;
29static int num_freq_table_entries;
30
31static int sfi_parse_freq(struct sfi_table_header *table)
32{
33 struct sfi_table_simple *sb;
34 struct sfi_freq_table_entry *pentry;
35 int totallen;
36
37 sb = (struct sfi_table_simple *)table;
38 num_freq_table_entries = SFI_GET_NUM_ENTRIES(sb,
39 struct sfi_freq_table_entry);
40 if (num_freq_table_entries <= 1) {
41 pr_err("No p-states discovered\n");
42 return -ENODEV;
43 }
44
45 pentry = (struct sfi_freq_table_entry *)sb->pentry;
46 totallen = num_freq_table_entries * sizeof(*pentry);
47
48 sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL);
49 if (!sfi_cpufreq_array)
50 return -ENOMEM;
51
52 memcpy(sfi_cpufreq_array, pentry, totallen);
53
54 return 0;
55}
56
57static int sfi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
58{
59 unsigned int next_perf_state = 0; /* Index into perf table */
60 u32 lo, hi;
61
62 next_perf_state = policy->freq_table[index].driver_data;
63
64 rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
65 lo = (lo & ~INTEL_PERF_CTL_MASK) |
66 ((u32) sfi_cpufreq_array[next_perf_state].ctrl_val &
67 INTEL_PERF_CTL_MASK);
68 wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
69
70 return 0;
71}
72
73static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
74{
75 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
76 policy->cpuinfo.transition_latency = 100000; /* 100us */
77
78 return cpufreq_table_validate_and_show(policy, freq_table);
79}
80
81static struct cpufreq_driver sfi_cpufreq_driver = {
82 .flags = CPUFREQ_CONST_LOOPS,
83 .verify = cpufreq_generic_frequency_table_verify,
84 .target_index = sfi_cpufreq_target,
85 .init = sfi_cpufreq_cpu_init,
86 .name = "sfi-cpufreq",
87 .attr = cpufreq_generic_attr,
88};
89
90static int __init sfi_cpufreq_init(void)
91{
92 int ret, i;
93
94 /* parse the freq table from SFI */
95 ret = sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
96 if (ret)
97 return ret;
98
99 freq_table = kzalloc(sizeof(*freq_table) *
100 (num_freq_table_entries + 1), GFP_KERNEL);
101 if (!freq_table) {
102 ret = -ENOMEM;
103 goto err_free_array;
104 }
105
106 for (i = 0; i < num_freq_table_entries; i++) {
107 freq_table[i].driver_data = i;
108 freq_table[i].frequency = sfi_cpufreq_array[i].freq_mhz * 1000;
109 }
110 freq_table[i].frequency = CPUFREQ_TABLE_END;
111
112 ret = cpufreq_register_driver(&sfi_cpufreq_driver);
113 if (ret)
114 goto err_free_tbl;
115
116 return ret;
117
118err_free_tbl:
119 kfree(freq_table);
120err_free_array:
121 kfree(sfi_cpufreq_array);
122 return ret;
123}
124late_initcall(sfi_cpufreq_init);
125
126static void __exit sfi_cpufreq_exit(void)
127{
128 cpufreq_unregister_driver(&sfi_cpufreq_driver);
129 kfree(freq_table);
130 kfree(sfi_cpufreq_array);
131}
132module_exit(sfi_cpufreq_exit);
133
134MODULE_AUTHOR("Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>");
135MODULE_DESCRIPTION("SFI Performance-States Driver");
136MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 7047821a7f8a..4ab7a2156672 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
400 400
401 pr_debug("previous speed is %u\n", prev_speed); 401 pr_debug("previous speed is %u\n", prev_speed);
402 402
403 preempt_disable();
403 local_irq_save(flags); 404 local_irq_save(flags);
404 405
405 /* switch to low state */ 406 /* switch to low state */
@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
464 465
465out: 466out:
466 local_irq_restore(flags); 467 local_irq_restore(flags);
468 preempt_enable();
469
467 return ret; 470 return ret;
468} 471}
469EXPORT_SYMBOL_GPL(speedstep_get_freqs); 472EXPORT_SYMBOL_GPL(speedstep_get_freqs);
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 5fc96d5d656b..819229e824fb 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state)
156 return; 156 return;
157 157
158 /* Disable IRQs */ 158 /* Disable IRQs */
159 preempt_disable();
159 local_irq_save(flags); 160 local_irq_save(flags);
160 161
161 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); 162 command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state)
166 167
167 do { 168 do {
168 if (retry) { 169 if (retry) {
170 /*
171 * We need to enable interrupts, otherwise the blockage
172 * won't resolve.
173 *
174 * We disable preemption so that other processes don't
175 * run. If other processes were running, they could
176 * submit more DMA requests, making the blockage worse.
177 */
169 pr_debug("retry %u, previous result %u, waiting...\n", 178 pr_debug("retry %u, previous result %u, waiting...\n",
170 retry, result); 179 retry, result);
180 local_irq_enable();
171 mdelay(retry * 50); 181 mdelay(retry * 50);
182 local_irq_disable();
172 } 183 }
173 retry++; 184 retry++;
174 __asm__ __volatile__( 185 __asm__ __volatile__(
@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state)
185 196
186 /* enable IRQs */ 197 /* enable IRQs */
187 local_irq_restore(flags); 198 local_irq_restore(flags);
199 preempt_enable();
188 200
189 if (new_state == state) 201 if (new_state == state)
190 pr_debug("change to %u MHz succeeded after %u tries " 202 pr_debug("change to %u MHz succeeded after %u tries "
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index e3e225fe6b45..40c34faffe59 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -182,6 +182,10 @@ static int __init bl_idle_init(void)
182 */ 182 */
183 if (!of_match_node(compatible_machine_match, root)) 183 if (!of_match_node(compatible_machine_match, root))
184 return -ENODEV; 184 return -ENODEV;
185
186 if (!mcpm_is_available())
187 return -EUNATCH;
188
185 /* 189 /*
186 * For now the differentiation between little and big cores 190 * For now the differentiation between little and big cores
187 * is based on the part number. A7 cores are considered little 191 * is based on the part number. A7 cores are considered little
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index faf4e70c42e0..64281bb2f650 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,5 +1,6 @@
1menuconfig PM_DEVFREQ 1menuconfig PM_DEVFREQ
2 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" 2 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
3 select SRCU
3 help 4 help
4 A device may have a list of frequencies and voltages available. 5 A device may have a list of frequencies and voltages available.
5 devfreq, a generic DVFS framework can be registered for a device 6 devfreq, a generic DVFS framework can be registered for a device
@@ -87,4 +88,16 @@ config ARM_EXYNOS5_BUS_DEVFREQ
87 It reads PPMU counters of memory controllers and adjusts the 88 It reads PPMU counters of memory controllers and adjusts the
88 operating frequencies and voltages with OPP support. 89 operating frequencies and voltages with OPP support.
89 90
91config ARM_TEGRA_DEVFREQ
92 tristate "Tegra DEVFREQ Driver"
93 depends on ARCH_TEGRA_124_SOC
94 select DEVFREQ_GOV_SIMPLE_ONDEMAND
95 select PM_OPP
96 help
97 This adds the DEVFREQ driver for the Tegra family of SoCs.
98 It reads ACTMON counters of memory controllers and adjusts the
99 operating frequencies and voltages with OPP support.
100
101source "drivers/devfreq/event/Kconfig"
102
90endif # PM_DEVFREQ 103endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 16138c9e0d58..5134f9ee983d 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -1,4 +1,5 @@
1obj-$(CONFIG_PM_DEVFREQ) += devfreq.o 1obj-$(CONFIG_PM_DEVFREQ) += devfreq.o
2obj-$(CONFIG_PM_DEVFREQ_EVENT) += devfreq-event.o
2obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o 3obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
3obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o 4obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
4obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o 5obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
@@ -7,3 +8,7 @@ obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
7# DEVFREQ Drivers 8# DEVFREQ Drivers
8obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/ 9obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/
9obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos/ 10obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos/
11obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o
12
13# DEVFREQ Event Drivers
14obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
new file mode 100644
index 000000000000..f304a0289eda
--- /dev/null
+++ b/drivers/devfreq/devfreq-event.c
@@ -0,0 +1,494 @@
1/*
2 * devfreq-event: a framework to provide raw data and events of devfreq devices
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 * Author: Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This driver is based on drivers/devfreq/devfreq.c.
12 */
13
14#include <linux/devfreq-event.h>
15#include <linux/kernel.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/list.h>
21#include <linux/of.h>
22
23static struct class *devfreq_event_class;
24
25/* The list of all devfreq event list */
26static LIST_HEAD(devfreq_event_list);
27static DEFINE_MUTEX(devfreq_event_list_lock);
28
29#define to_devfreq_event(DEV) container_of(DEV, struct devfreq_event_dev, dev)
30
31/**
32 * devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
33 * the enable_count of devfreq-event dev.
34 * @edev : the devfreq-event device
35 *
36 * Note that this function increase the enable_count and enable the
37 * devfreq-event device. The devfreq-event device should be enabled before
38 * using it by devfreq device.
39 */
40int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
41{
42 int ret = 0;
43
44 if (!edev || !edev->desc)
45 return -EINVAL;
46
47 mutex_lock(&edev->lock);
48 if (edev->desc->ops && edev->desc->ops->enable
49 && edev->enable_count == 0) {
50 ret = edev->desc->ops->enable(edev);
51 if (ret < 0)
52 goto err;
53 }
54 edev->enable_count++;
55err:
56 mutex_unlock(&edev->lock);
57
58 return ret;
59}
60EXPORT_SYMBOL_GPL(devfreq_event_enable_edev);
61
62/**
63 * devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
64 * the enable_count of the devfreq-event dev.
65 * @edev : the devfreq-event device
66 *
67 * Note that this function decrease the enable_count and disable the
68 * devfreq-event device. After the devfreq-event device is disabled,
69 * devfreq device can't use the devfreq-event device for get/set/reset
70 * operations.
71 */
72int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
73{
74 int ret = 0;
75
76 if (!edev || !edev->desc)
77 return -EINVAL;
78
79 mutex_lock(&edev->lock);
80 if (edev->enable_count <= 0) {
81 dev_warn(&edev->dev, "unbalanced enable_count\n");
82 ret = -EIO;
83 goto err;
84 }
85
86 if (edev->desc->ops && edev->desc->ops->disable
87 && edev->enable_count == 1) {
88 ret = edev->desc->ops->disable(edev);
89 if (ret < 0)
90 goto err;
91 }
92 edev->enable_count--;
93err:
94 mutex_unlock(&edev->lock);
95
96 return ret;
97}
98EXPORT_SYMBOL_GPL(devfreq_event_disable_edev);
99
100/**
101 * devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
102 * not.
103 * @edev : the devfreq-event device
104 *
105 * Note that this function check whether devfreq-event dev is enabled or not.
106 * If return true, the devfreq-event dev is enabeld. If return false, the
107 * devfreq-event dev is disabled.
108 */
109bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
110{
111 bool enabled = false;
112
113 if (!edev || !edev->desc)
114 return enabled;
115
116 mutex_lock(&edev->lock);
117
118 if (edev->enable_count > 0)
119 enabled = true;
120
121 mutex_unlock(&edev->lock);
122
123 return enabled;
124}
125EXPORT_SYMBOL_GPL(devfreq_event_is_enabled);
126
127/**
128 * devfreq_event_set_event() - Set event to devfreq-event dev to start.
129 * @edev : the devfreq-event device
130 *
131 * Note that this function set the event to the devfreq-event device to start
132 * for getting the event data which could be various event type.
133 */
134int devfreq_event_set_event(struct devfreq_event_dev *edev)
135{
136 int ret;
137
138 if (!edev || !edev->desc)
139 return -EINVAL;
140
141 if (!edev->desc->ops || !edev->desc->ops->set_event)
142 return -EINVAL;
143
144 if (!devfreq_event_is_enabled(edev))
145 return -EPERM;
146
147 mutex_lock(&edev->lock);
148 ret = edev->desc->ops->set_event(edev);
149 mutex_unlock(&edev->lock);
150
151 return ret;
152}
153EXPORT_SYMBOL_GPL(devfreq_event_set_event);
154
155/**
156 * devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
157 * @edev : the devfreq-event device
158 * @edata : the calculated data of devfreq-event device
159 *
160 * Note that this function get the calculated event data from devfreq-event dev
161 * after stoping the progress of whole sequence of devfreq-event dev.
162 */
163int devfreq_event_get_event(struct devfreq_event_dev *edev,
164 struct devfreq_event_data *edata)
165{
166 int ret;
167
168 if (!edev || !edev->desc)
169 return -EINVAL;
170
171 if (!edev->desc->ops || !edev->desc->ops->get_event)
172 return -EINVAL;
173
174 if (!devfreq_event_is_enabled(edev))
175 return -EINVAL;
176
177 edata->total_count = edata->load_count = 0;
178
179 mutex_lock(&edev->lock);
180 ret = edev->desc->ops->get_event(edev, edata);
181 if (ret < 0)
182 edata->total_count = edata->load_count = 0;
183 mutex_unlock(&edev->lock);
184
185 return ret;
186}
187EXPORT_SYMBOL_GPL(devfreq_event_get_event);
188
189/**
190 * devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
191 * @edev : the devfreq-event device
192 *
193 * Note that this function stop all operations of devfreq-event dev and reset
194 * the current event data to make the devfreq-event device into initial state.
195 */
196int devfreq_event_reset_event(struct devfreq_event_dev *edev)
197{
198 int ret = 0;
199
200 if (!edev || !edev->desc)
201 return -EINVAL;
202
203 if (!devfreq_event_is_enabled(edev))
204 return -EPERM;
205
206 mutex_lock(&edev->lock);
207 if (edev->desc->ops && edev->desc->ops->reset)
208 ret = edev->desc->ops->reset(edev);
209 mutex_unlock(&edev->lock);
210
211 return ret;
212}
213EXPORT_SYMBOL_GPL(devfreq_event_reset_event);
214
215/**
216 * devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
217 * devicetree.
218 * @dev : the pointer to the given device
219 * @index : the index into list of devfreq-event device
220 *
221 * Note that this function return the pointer of devfreq-event device.
222 */
223struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
224 int index)
225{
226 struct device_node *node;
227 struct devfreq_event_dev *edev;
228
229 if (!dev->of_node) {
230 dev_err(dev, "device does not have a device node entry\n");
231 return ERR_PTR(-EINVAL);
232 }
233
234 node = of_parse_phandle(dev->of_node, "devfreq-events", index);
235 if (!node) {
236 dev_err(dev, "failed to get phandle in %s node\n",
237 dev->of_node->full_name);
238 return ERR_PTR(-ENODEV);
239 }
240
241 mutex_lock(&devfreq_event_list_lock);
242 list_for_each_entry(edev, &devfreq_event_list, node) {
243 if (!strcmp(edev->desc->name, node->name))
244 goto out;
245 }
246 edev = NULL;
247out:
248 mutex_unlock(&devfreq_event_list_lock);
249
250 if (!edev) {
251 dev_err(dev, "unable to get devfreq-event device : %s\n",
252 node->name);
253 of_node_put(node);
254 return ERR_PTR(-ENODEV);
255 }
256
257 of_node_put(node);
258
259 return edev;
260}
261EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle);
262
263/**
264 * devfreq_event_get_edev_count() - Get the count of devfreq-event dev
265 * @dev : the pointer to the given device
266 *
267 * Note that this function return the count of devfreq-event devices.
268 */
269int devfreq_event_get_edev_count(struct device *dev)
270{
271 int count;
272
273 if (!dev->of_node) {
274 dev_err(dev, "device does not have a device node entry\n");
275 return -EINVAL;
276 }
277
278 count = of_property_count_elems_of_size(dev->of_node, "devfreq-events",
279 sizeof(u32));
280 if (count < 0 ) {
281 dev_err(dev,
282 "failed to get the count of devfreq-event in %s node\n",
283 dev->of_node->full_name);
284 return count;
285 }
286
287 return count;
288}
289EXPORT_SYMBOL_GPL(devfreq_event_get_edev_count);
290
291static void devfreq_event_release_edev(struct device *dev)
292{
293 struct devfreq_event_dev *edev = to_devfreq_event(dev);
294
295 kfree(edev);
296}
297
298/**
299 * devfreq_event_add_edev() - Add new devfreq-event device.
300 * @dev : the device owning the devfreq-event device being created
301 * @desc : the devfreq-event device's decriptor which include essential
302 * data for devfreq-event device.
303 *
304 * Note that this function add new devfreq-event device to devfreq-event class
305 * list and register the device of the devfreq-event device.
306 */
307struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
308 struct devfreq_event_desc *desc)
309{
310 struct devfreq_event_dev *edev;
311 static atomic_t event_no = ATOMIC_INIT(0);
312 int ret;
313
314 if (!dev || !desc)
315 return ERR_PTR(-EINVAL);
316
317 if (!desc->name || !desc->ops)
318 return ERR_PTR(-EINVAL);
319
320 if (!desc->ops->set_event || !desc->ops->get_event)
321 return ERR_PTR(-EINVAL);
322
323 edev = kzalloc(sizeof(struct devfreq_event_dev), GFP_KERNEL);
324 if (!edev)
325 return ERR_PTR(-ENOMEM);
326
327 mutex_init(&edev->lock);
328 edev->desc = desc;
329 edev->enable_count = 0;
330 edev->dev.parent = dev;
331 edev->dev.class = devfreq_event_class;
332 edev->dev.release = devfreq_event_release_edev;
333
334 dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1);
335 ret = device_register(&edev->dev);
336 if (ret < 0) {
337 put_device(&edev->dev);
338 return ERR_PTR(ret);
339 }
340 dev_set_drvdata(&edev->dev, edev);
341
342 INIT_LIST_HEAD(&edev->node);
343
344 mutex_lock(&devfreq_event_list_lock);
345 list_add(&edev->node, &devfreq_event_list);
346 mutex_unlock(&devfreq_event_list_lock);
347
348 return edev;
349}
350EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
351
352/**
353 * devfreq_event_remove_edev() - Remove the devfreq-event device registered.
354 * @dev : the devfreq-event device
355 *
356 * Note that this function remove the registered devfreq-event device.
357 */
358int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
359{
360 if (!edev)
361 return -EINVAL;
362
363 WARN_ON(edev->enable_count);
364
365 mutex_lock(&devfreq_event_list_lock);
366 list_del(&edev->node);
367 mutex_unlock(&devfreq_event_list_lock);
368
369 device_unregister(&edev->dev);
370
371 return 0;
372}
373EXPORT_SYMBOL_GPL(devfreq_event_remove_edev);
374
375static int devm_devfreq_event_match(struct device *dev, void *res, void *data)
376{
377 struct devfreq_event_dev **r = res;
378
379 if (WARN_ON(!r || !*r))
380 return 0;
381
382 return *r == data;
383}
384
385static void devm_devfreq_event_release(struct device *dev, void *res)
386{
387 devfreq_event_remove_edev(*(struct devfreq_event_dev **)res);
388}
389
390/**
391 * devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
392 * @dev : the device owning the devfreq-event device being created
393 * @desc : the devfreq-event device's decriptor which include essential
394 * data for devfreq-event device.
395 *
396 * Note that this function manages automatically the memory of devfreq-event
397 * device using device resource management and simplify the free operation
398 * for memory of devfreq-event device.
399 */
400struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
401 struct devfreq_event_desc *desc)
402{
403 struct devfreq_event_dev **ptr, *edev;
404
405 ptr = devres_alloc(devm_devfreq_event_release, sizeof(*ptr), GFP_KERNEL);
406 if (!ptr)
407 return ERR_PTR(-ENOMEM);
408
409 edev = devfreq_event_add_edev(dev, desc);
410 if (IS_ERR(edev)) {
411 devres_free(ptr);
412 return ERR_PTR(-ENOMEM);
413 }
414
415 *ptr = edev;
416 devres_add(dev, ptr);
417
418 return edev;
419}
420EXPORT_SYMBOL_GPL(devm_devfreq_event_add_edev);
421
422/**
423 * devm_devfreq_event_remove_edev()- Resource-managed devfreq_event_remove_edev()
424 * @dev : the device owning the devfreq-event device being created
425 * @edev : the devfreq-event device
426 *
427 * Note that this function manages automatically the memory of devfreq-event
428 * device using device resource management.
429 */
430void devm_devfreq_event_remove_edev(struct device *dev,
431 struct devfreq_event_dev *edev)
432{
433 WARN_ON(devres_release(dev, devm_devfreq_event_release,
434 devm_devfreq_event_match, edev));
435}
436EXPORT_SYMBOL_GPL(devm_devfreq_event_remove_edev);
437
438/*
439 * Device attributes for devfreq-event class.
440 */
441static ssize_t name_show(struct device *dev, struct device_attribute *attr,
442 char *buf)
443{
444 struct devfreq_event_dev *edev = to_devfreq_event(dev);
445
446 if (!edev || !edev->desc)
447 return -EINVAL;
448
449 return sprintf(buf, "%s\n", edev->desc->name);
450}
451static DEVICE_ATTR_RO(name);
452
453static ssize_t enable_count_show(struct device *dev,
454 struct device_attribute *attr, char *buf)
455{
456 struct devfreq_event_dev *edev = to_devfreq_event(dev);
457
458 if (!edev || !edev->desc)
459 return -EINVAL;
460
461 return sprintf(buf, "%d\n", edev->enable_count);
462}
463static DEVICE_ATTR_RO(enable_count);
464
465static struct attribute *devfreq_event_attrs[] = {
466 &dev_attr_name.attr,
467 &dev_attr_enable_count.attr,
468 NULL,
469};
470ATTRIBUTE_GROUPS(devfreq_event);
471
472static int __init devfreq_event_init(void)
473{
474 devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
475 if (IS_ERR(devfreq_event_class)) {
476 pr_err("%s: couldn't create class\n", __FILE__);
477 return PTR_ERR(devfreq_event_class);
478 }
479
480 devfreq_event_class->dev_groups = devfreq_event_groups;
481
482 return 0;
483}
484subsys_initcall(devfreq_event_init);
485
486static void __exit devfreq_event_exit(void)
487{
488 class_destroy(devfreq_event_class);
489}
490module_exit(devfreq_event_exit);
491
492MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
493MODULE_DESCRIPTION("DEVFREQ-Event class support");
494MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
new file mode 100644
index 000000000000..a11720affc31
--- /dev/null
+++ b/drivers/devfreq/event/Kconfig
@@ -0,0 +1,25 @@
1menuconfig PM_DEVFREQ_EVENT
2 bool "DEVFREQ-Event device Support"
3 help
4 The devfreq-event device provide the raw data and events which
5 indicate the current state of devfreq-event device. The provided
6 data from devfreq-event device is used to monitor the state of
7 device and determine the suitable size of resource to reduce the
8 wasted resource.
9
10 The devfreq-event device can support the various type of events
11 (e.g., raw data, utilization, latency, bandwidth). The events
12 may be used by devfreq governor and other subsystem.
13
14if PM_DEVFREQ_EVENT
15
16config DEVFREQ_EVENT_EXYNOS_PPMU
17 bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
18 depends on ARCH_EXYNOS
19 select PM_OPP
20 help
21 This add the devfreq-event driver for Exynos SoC. It provides PPMU
22 (Platform Performance Monitoring Unit) counters to estimate the
23 utilization of each module.
24
25endif # PM_DEVFREQ_EVENT
diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile
new file mode 100644
index 000000000000..be146ead79cf
--- /dev/null
+++ b/drivers/devfreq/event/Makefile
@@ -0,0 +1,2 @@
1# Exynos DEVFREQ Event Drivers
2obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
new file mode 100644
index 000000000000..ad8347385f53
--- /dev/null
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -0,0 +1,374 @@
1/*
2 * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author : Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
12 */
13
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/of_address.h>
20#include <linux/platform_device.h>
21#include <linux/suspend.h>
22#include <linux/devfreq-event.h>
23
24#include "exynos-ppmu.h"
25
26struct exynos_ppmu_data {
27 void __iomem *base;
28 struct clk *clk;
29};
30
31struct exynos_ppmu {
32 struct devfreq_event_dev **edev;
33 struct devfreq_event_desc *desc;
34 unsigned int num_events;
35
36 struct device *dev;
37 struct mutex lock;
38
39 struct exynos_ppmu_data ppmu;
40};
41
42#define PPMU_EVENT(name) \
43 { "ppmu-event0-"#name, PPMU_PMNCNT0 }, \
44 { "ppmu-event1-"#name, PPMU_PMNCNT1 }, \
45 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
46 { "ppmu-event3-"#name, PPMU_PMNCNT3 }
47
48struct __exynos_ppmu_events {
49 char *name;
50 int id;
51} ppmu_events[] = {
52 /* For Exynos3250, Exynos4 and Exynos5260 */
53 PPMU_EVENT(g3d),
54 PPMU_EVENT(fsys),
55
56 /* For Exynos4 SoCs and Exynos3250 */
57 PPMU_EVENT(dmc0),
58 PPMU_EVENT(dmc1),
59 PPMU_EVENT(cpu),
60 PPMU_EVENT(rightbus),
61 PPMU_EVENT(leftbus),
62 PPMU_EVENT(lcd0),
63 PPMU_EVENT(camif),
64
65 /* Only for Exynos3250 and Exynos5260 */
66 PPMU_EVENT(mfc),
67
68 /* Only for Exynos4 SoCs */
69 PPMU_EVENT(mfc-left),
70 PPMU_EVENT(mfc-right),
71
72 /* Only for Exynos5260 SoCs */
73 PPMU_EVENT(drex0-s0),
74 PPMU_EVENT(drex0-s1),
75 PPMU_EVENT(drex1-s0),
76 PPMU_EVENT(drex1-s1),
77 PPMU_EVENT(eagle),
78 PPMU_EVENT(kfc),
79 PPMU_EVENT(isp),
80 PPMU_EVENT(fimc),
81 PPMU_EVENT(gscl),
82 PPMU_EVENT(mscl),
83 PPMU_EVENT(fimd0x),
84 PPMU_EVENT(fimd1x),
85 { /* sentinel */ },
86};
87
88static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
89{
90 int i;
91
92 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
93 if (!strcmp(edev->desc->name, ppmu_events[i].name))
94 return ppmu_events[i].id;
95
96 return -EINVAL;
97}
98
99static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
100{
101 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
102 u32 pmnc;
103
104 /* Disable all counters */
105 __raw_writel(PPMU_CCNT_MASK |
106 PPMU_PMCNT0_MASK |
107 PPMU_PMCNT1_MASK |
108 PPMU_PMCNT2_MASK |
109 PPMU_PMCNT3_MASK,
110 info->ppmu.base + PPMU_CNTENC);
111
112 /* Disable PPMU */
113 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
114 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
115 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
116
117 return 0;
118}
119
120static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
121{
122 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
123 int id = exynos_ppmu_find_ppmu_id(edev);
124 u32 pmnc, cntens;
125
126 if (id < 0)
127 return id;
128
129 /* Enable specific counter */
130 cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
131 cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
132 __raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
133
134 /* Set the event of Read/Write data count */
135 __raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
136 info->ppmu.base + PPMU_BEVTxSEL(id));
137
138 /* Reset cycle counter/performance counter and enable PPMU */
139 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
140 pmnc &= ~(PPMU_PMNC_ENABLE_MASK
141 | PPMU_PMNC_COUNTER_RESET_MASK
142 | PPMU_PMNC_CC_RESET_MASK);
143 pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
144 pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
145 pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
146 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
147
148 return 0;
149}
150
151static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
152 struct devfreq_event_data *edata)
153{
154 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
155 int id = exynos_ppmu_find_ppmu_id(edev);
156 u32 pmnc, cntenc;
157
158 if (id < 0)
159 return -EINVAL;
160
161 /* Disable PPMU */
162 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
163 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
164 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
165
166 /* Read cycle count */
167 edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
168
169 /* Read performance count */
170 switch (id) {
171 case PPMU_PMNCNT0:
172 case PPMU_PMNCNT1:
173 case PPMU_PMNCNT2:
174 edata->load_count
175 = __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
176 break;
177 case PPMU_PMNCNT3:
178 edata->load_count =
179 ((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
180 | __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
181 break;
182 default:
183 return -EINVAL;
184 }
185
186 /* Disable specific counter */
187 cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
188 cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
189 __raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
190
191 dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
192 edata->load_count, edata->total_count);
193
194 return 0;
195}
196
197static struct devfreq_event_ops exynos_ppmu_ops = {
198 .disable = exynos_ppmu_disable,
199 .set_event = exynos_ppmu_set_event,
200 .get_event = exynos_ppmu_get_event,
201};
202
203static int of_get_devfreq_events(struct device_node *np,
204 struct exynos_ppmu *info)
205{
206 struct devfreq_event_desc *desc;
207 struct device *dev = info->dev;
208 struct device_node *events_np, *node;
209 int i, j, count;
210
211 events_np = of_get_child_by_name(np, "events");
212 if (!events_np) {
213 dev_err(dev,
214 "failed to get child node of devfreq-event devices\n");
215 return -EINVAL;
216 }
217
218 count = of_get_child_count(events_np);
219 desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
220 if (!desc)
221 return -ENOMEM;
222 info->num_events = count;
223
224 j = 0;
225 for_each_child_of_node(events_np, node) {
226 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
227 if (!ppmu_events[i].name)
228 continue;
229
230 if (!of_node_cmp(node->name, ppmu_events[i].name))
231 break;
232 }
233
234 if (i == ARRAY_SIZE(ppmu_events)) {
235 dev_warn(dev,
236 "don't know how to configure events : %s\n",
237 node->name);
238 continue;
239 }
240
241 desc[j].ops = &exynos_ppmu_ops;
242 desc[j].driver_data = info;
243
244 of_property_read_string(node, "event-name", &desc[j].name);
245
246 j++;
247
248 of_node_put(node);
249 }
250 info->desc = desc;
251
252 of_node_put(events_np);
253
254 return 0;
255}
256
257static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
258{
259 struct device *dev = info->dev;
260 struct device_node *np = dev->of_node;
261 int ret = 0;
262
263 if (!np) {
264 dev_err(dev, "failed to find devicetree node\n");
265 return -EINVAL;
266 }
267
268 /* Maps the memory mapped IO to control PPMU register */
269 info->ppmu.base = of_iomap(np, 0);
270 if (IS_ERR_OR_NULL(info->ppmu.base)) {
271 dev_err(dev, "failed to map memory region\n");
272 return -ENOMEM;
273 }
274
275 info->ppmu.clk = devm_clk_get(dev, "ppmu");
276 if (IS_ERR(info->ppmu.clk)) {
277 info->ppmu.clk = NULL;
278 dev_warn(dev, "cannot get PPMU clock\n");
279 }
280
281 ret = of_get_devfreq_events(np, info);
282 if (ret < 0) {
283 dev_err(dev, "failed to parse exynos ppmu dt node\n");
284 goto err;
285 }
286
287 return 0;
288
289err:
290 iounmap(info->ppmu.base);
291
292 return ret;
293}
294
295static int exynos_ppmu_probe(struct platform_device *pdev)
296{
297 struct exynos_ppmu *info;
298 struct devfreq_event_dev **edev;
299 struct devfreq_event_desc *desc;
300 int i, ret = 0, size;
301
302 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
303 if (!info)
304 return -ENOMEM;
305
306 mutex_init(&info->lock);
307 info->dev = &pdev->dev;
308
309 /* Parse dt data to get resource */
310 ret = exynos_ppmu_parse_dt(info);
311 if (ret < 0) {
312 dev_err(&pdev->dev,
313 "failed to parse devicetree for resource\n");
314 return ret;
315 }
316 desc = info->desc;
317
318 size = sizeof(struct devfreq_event_dev *) * info->num_events;
319 info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
320 if (!info->edev) {
321 dev_err(&pdev->dev,
322 "failed to allocate memory devfreq-event devices\n");
323 return -ENOMEM;
324 }
325 edev = info->edev;
326 platform_set_drvdata(pdev, info);
327
328 for (i = 0; i < info->num_events; i++) {
329 edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
330 if (IS_ERR(edev[i])) {
331 ret = PTR_ERR(edev[i]);
332 dev_err(&pdev->dev,
333 "failed to add devfreq-event device\n");
334 goto err;
335 }
336 }
337
338 clk_prepare_enable(info->ppmu.clk);
339
340 return 0;
341err:
342 iounmap(info->ppmu.base);
343
344 return ret;
345}
346
347static int exynos_ppmu_remove(struct platform_device *pdev)
348{
349 struct exynos_ppmu *info = platform_get_drvdata(pdev);
350
351 clk_disable_unprepare(info->ppmu.clk);
352 iounmap(info->ppmu.base);
353
354 return 0;
355}
356
357static struct of_device_id exynos_ppmu_id_match[] = {
358 { .compatible = "samsung,exynos-ppmu", },
359 { /* sentinel */ },
360};
361
362static struct platform_driver exynos_ppmu_driver = {
363 .probe = exynos_ppmu_probe,
364 .remove = exynos_ppmu_remove,
365 .driver = {
366 .name = "exynos-ppmu",
367 .of_match_table = exynos_ppmu_id_match,
368 },
369};
370module_platform_driver(exynos_ppmu_driver);
371
372MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
373MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
374MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
new file mode 100644
index 000000000000..4e831d48c138
--- /dev/null
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -0,0 +1,93 @@
1/*
2 * exynos_ppmu.h - EXYNOS PPMU header file
3 *
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author : Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __EXYNOS_PPMU_H__
13#define __EXYNOS_PPMU_H__
14
15enum ppmu_state {
16 PPMU_DISABLE = 0,
17 PPMU_ENABLE,
18};
19
20enum ppmu_counter {
21 PPMU_PMNCNT0 = 0,
22 PPMU_PMNCNT1,
23 PPMU_PMNCNT2,
24 PPMU_PMNCNT3,
25
26 PPMU_PMNCNT_MAX,
27};
28
29enum ppmu_event_type {
30 PPMU_RO_BUSY_CYCLE_CNT = 0x0,
31 PPMU_WO_BUSY_CYCLE_CNT = 0x1,
32 PPMU_RW_BUSY_CYCLE_CNT = 0x2,
33 PPMU_RO_REQUEST_CNT = 0x3,
34 PPMU_WO_REQUEST_CNT = 0x4,
35 PPMU_RO_DATA_CNT = 0x5,
36 PPMU_WO_DATA_CNT = 0x6,
37 PPMU_RO_LATENCY = 0x12,
38 PPMU_WO_LATENCY = 0x16,
39};
40
41enum ppmu_reg {
42 /* PPC control register */
43 PPMU_PMNC = 0x00,
44 PPMU_CNTENS = 0x10,
45 PPMU_CNTENC = 0x20,
46 PPMU_INTENS = 0x30,
47 PPMU_INTENC = 0x40,
48 PPMU_FLAG = 0x50,
49
50 /* Cycle Counter and Performance Event Counter Register */
51 PPMU_CCNT = 0x100,
52 PPMU_PMCNT0 = 0x110,
53 PPMU_PMCNT1 = 0x120,
54 PPMU_PMCNT2 = 0x130,
55 PPMU_PMCNT3_HIGH = 0x140,
56 PPMU_PMCNT3_LOW = 0x150,
57
58 /* Bus Event Generator */
59 PPMU_BEVT0SEL = 0x1000,
60 PPMU_BEVT1SEL = 0x1100,
61 PPMU_BEVT2SEL = 0x1200,
62 PPMU_BEVT3SEL = 0x1300,
63 PPMU_COUNTER_RESET = 0x1810,
64 PPMU_READ_OVERFLOW_CNT = 0x1810,
65 PPMU_READ_UNDERFLOW_CNT = 0x1814,
66 PPMU_WRITE_OVERFLOW_CNT = 0x1850,
67 PPMU_WRITE_UNDERFLOW_CNT = 0x1854,
68 PPMU_READ_PENDING_CNT = 0x1880,
69 PPMU_WRITE_PENDING_CNT = 0x1884
70};
71
72/* PMNC register */
73#define PPMU_PMNC_CC_RESET_SHIFT 2
74#define PPMU_PMNC_COUNTER_RESET_SHIFT 1
75#define PPMU_PMNC_ENABLE_SHIFT 0
76#define PPMU_PMNC_START_MODE_MASK BIT(16)
77#define PPMU_PMNC_CC_DIVIDER_MASK BIT(3)
78#define PPMU_PMNC_CC_RESET_MASK BIT(2)
79#define PPMU_PMNC_COUNTER_RESET_MASK BIT(1)
80#define PPMU_PMNC_ENABLE_MASK BIT(0)
81
82/* CNTENS/CNTENC/INTENS/INTENC/FLAG register */
83#define PPMU_CCNT_MASK BIT(31)
84#define PPMU_PMCNT3_MASK BIT(3)
85#define PPMU_PMCNT2_MASK BIT(2)
86#define PPMU_PMCNT1_MASK BIT(1)
87#define PPMU_PMCNT0_MASK BIT(0)
88
89/* PPMU_PMNCTx/PPMU_BETxSEL registers */
90#define PPMU_PMNCT(x) (PPMU_PMCNT0 + (0x10 * x))
91#define PPMU_BEVTxSEL(x) (PPMU_BEVT0SEL + (0x100 * x))
92
93#endif /* __EXYNOS_PPMU_H__ */
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
new file mode 100644
index 000000000000..34790961af5a
--- /dev/null
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -0,0 +1,718 @@
1/*
2 * A devfreq driver for NVIDIA Tegra SoCs
3 *
4 * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
5 * Copyright (C) 2014 Google, Inc
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 */
20
21#include <linux/clk.h>
22#include <linux/cpufreq.h>
23#include <linux/devfreq.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/module.h>
27#include <linux/platform_device.h>
28#include <linux/pm_opp.h>
29#include <linux/reset.h>
30
31#include "governor.h"
32
33#define ACTMON_GLB_STATUS 0x0
34#define ACTMON_GLB_PERIOD_CTRL 0x4
35
36#define ACTMON_DEV_CTRL 0x0
37#define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
38#define ACTMON_DEV_CTRL_ENB_PERIODIC BIT(18)
39#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN BIT(20)
40#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN BIT(21)
41#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT 23
42#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT 26
43#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN BIT(29)
44#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
45#define ACTMON_DEV_CTRL_ENB BIT(31)
46
47#define ACTMON_DEV_UPPER_WMARK 0x4
48#define ACTMON_DEV_LOWER_WMARK 0x8
49#define ACTMON_DEV_INIT_AVG 0xc
50#define ACTMON_DEV_AVG_UPPER_WMARK 0x10
51#define ACTMON_DEV_AVG_LOWER_WMARK 0x14
52#define ACTMON_DEV_COUNT_WEIGHT 0x18
53#define ACTMON_DEV_AVG_COUNT 0x20
54#define ACTMON_DEV_INTR_STATUS 0x24
55
56#define ACTMON_INTR_STATUS_CLEAR 0xffffffff
57
58#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER BIT(31)
59#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER BIT(30)
60
61#define ACTMON_ABOVE_WMARK_WINDOW 1
62#define ACTMON_BELOW_WMARK_WINDOW 3
63#define ACTMON_BOOST_FREQ_STEP 16000
64
65/* activity counter is incremented every 256 memory transactions, and each
66 * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
67 * 4 * 256 = 1024.
68 */
69#define ACTMON_COUNT_WEIGHT 0x400
70
71/*
72 * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
73 * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
74 */
75#define ACTMON_AVERAGE_WINDOW_LOG2 6
76#define ACTMON_SAMPLING_PERIOD 12 /* ms */
77#define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */
78
79#define KHZ 1000
80
81/* Assume that the bus is saturated if the utilization is 25% */
82#define BUS_SATURATION_RATIO 25
83
84/**
85 * struct tegra_devfreq_device_config - configuration specific to an ACTMON
86 * device
87 *
88 * Coefficients and thresholds are in %
89 */
90struct tegra_devfreq_device_config {
91 u32 offset;
92 u32 irq_mask;
93
94 unsigned int boost_up_coeff;
95 unsigned int boost_down_coeff;
96 unsigned int boost_up_threshold;
97 unsigned int boost_down_threshold;
98 u32 avg_dependency_threshold;
99};
100
101enum tegra_actmon_device {
102 MCALL = 0,
103 MCCPU,
104};
105
106static struct tegra_devfreq_device_config actmon_device_configs[] = {
107 {
108 /* MCALL */
109 .offset = 0x1c0,
110 .irq_mask = 1 << 26,
111 .boost_up_coeff = 200,
112 .boost_down_coeff = 50,
113 .boost_up_threshold = 60,
114 .boost_down_threshold = 40,
115 },
116 {
117 /* MCCPU */
118 .offset = 0x200,
119 .irq_mask = 1 << 25,
120 .boost_up_coeff = 800,
121 .boost_down_coeff = 90,
122 .boost_up_threshold = 27,
123 .boost_down_threshold = 10,
124 .avg_dependency_threshold = 50000,
125 },
126};
127
128/**
129 * struct tegra_devfreq_device - state specific to an ACTMON device
130 *
131 * Frequencies are in kHz.
132 */
133struct tegra_devfreq_device {
134 const struct tegra_devfreq_device_config *config;
135
136 void __iomem *regs;
137 u32 avg_band_freq;
138 u32 avg_count;
139
140 unsigned long target_freq;
141 unsigned long boost_freq;
142};
143
144struct tegra_devfreq {
145 struct devfreq *devfreq;
146
147 struct platform_device *pdev;
148 struct reset_control *reset;
149 struct clk *clock;
150 void __iomem *regs;
151
152 spinlock_t lock;
153
154 struct clk *emc_clock;
155 unsigned long max_freq;
156 unsigned long cur_freq;
157 struct notifier_block rate_change_nb;
158
159 struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
160};
161
162struct tegra_actmon_emc_ratio {
163 unsigned long cpu_freq;
164 unsigned long emc_freq;
165};
166
167static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
168 { 1400000, ULONG_MAX },
169 { 1200000, 750000 },
170 { 1100000, 600000 },
171 { 1000000, 500000 },
172 { 800000, 375000 },
173 { 500000, 200000 },
174 { 250000, 100000 },
175};
176
177static unsigned long do_percent(unsigned long val, unsigned int pct)
178{
179 return val * pct / 100;
180}
181
182static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq_device *dev)
183{
184 u32 avg = dev->avg_count;
185 u32 band = dev->avg_band_freq * ACTMON_SAMPLING_PERIOD;
186
187 writel(avg + band, dev->regs + ACTMON_DEV_AVG_UPPER_WMARK);
188 avg = max(avg, band);
189 writel(avg - band, dev->regs + ACTMON_DEV_AVG_LOWER_WMARK);
190}
191
192static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
193 struct tegra_devfreq_device *dev)
194{
195 u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
196
197 writel(do_percent(val, dev->config->boost_up_threshold),
198 dev->regs + ACTMON_DEV_UPPER_WMARK);
199
200 writel(do_percent(val, dev->config->boost_down_threshold),
201 dev->regs + ACTMON_DEV_LOWER_WMARK);
202}
203
204static void actmon_write_barrier(struct tegra_devfreq *tegra)
205{
206 /* ensure the update has reached the ACTMON */
207 wmb();
208 readl(tegra->regs + ACTMON_GLB_STATUS);
209}
210
211static irqreturn_t actmon_isr(int irq, void *data)
212{
213 struct tegra_devfreq *tegra = data;
214 struct tegra_devfreq_device *dev = NULL;
215 unsigned long flags;
216 u32 val;
217 unsigned int i;
218
219 val = readl(tegra->regs + ACTMON_GLB_STATUS);
220
221 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
222 if (val & tegra->devices[i].config->irq_mask) {
223 dev = tegra->devices + i;
224 break;
225 }
226 }
227
228 if (!dev)
229 return IRQ_NONE;
230
231 spin_lock_irqsave(&tegra->lock, flags);
232
233 dev->avg_count = readl(dev->regs + ACTMON_DEV_AVG_COUNT);
234 tegra_devfreq_update_avg_wmark(dev);
235
236 val = readl(dev->regs + ACTMON_DEV_INTR_STATUS);
237 if (val & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
238 val = readl(dev->regs + ACTMON_DEV_CTRL) |
239 ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
240 ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
241
242 /*
243 * new_boost = min(old_boost * up_coef + step, max_freq)
244 */
245 dev->boost_freq = do_percent(dev->boost_freq,
246 dev->config->boost_up_coeff);
247 dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
248 if (dev->boost_freq >= tegra->max_freq) {
249 dev->boost_freq = tegra->max_freq;
250 val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
251 }
252 writel(val, dev->regs + ACTMON_DEV_CTRL);
253 } else if (val & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
254 val = readl(dev->regs + ACTMON_DEV_CTRL) |
255 ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
256 ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
257
258 /*
259 * new_boost = old_boost * down_coef
260 * or 0 if (old_boost * down_coef < step / 2)
261 */
262 dev->boost_freq = do_percent(dev->boost_freq,
263 dev->config->boost_down_coeff);
264 if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
265 dev->boost_freq = 0;
266 val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
267 }
268 writel(val, dev->regs + ACTMON_DEV_CTRL);
269 }
270
271 if (dev->config->avg_dependency_threshold) {
272 val = readl(dev->regs + ACTMON_DEV_CTRL);
273 if (dev->avg_count >= dev->config->avg_dependency_threshold)
274 val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
275 else if (dev->boost_freq == 0)
276 val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
277 writel(val, dev->regs + ACTMON_DEV_CTRL);
278 }
279
280 writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
281
282 actmon_write_barrier(tegra);
283
284 spin_unlock_irqrestore(&tegra->lock, flags);
285
286 return IRQ_WAKE_THREAD;
287}
288
289static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
290 unsigned long cpu_freq)
291{
292 unsigned int i;
293 struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
294
295 for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
296 if (cpu_freq >= ratio->cpu_freq) {
297 if (ratio->emc_freq >= tegra->max_freq)
298 return tegra->max_freq;
299 else
300 return ratio->emc_freq;
301 }
302 }
303
304 return 0;
305}
306
307static void actmon_update_target(struct tegra_devfreq *tegra,
308 struct tegra_devfreq_device *dev)
309{
310 unsigned long cpu_freq = 0;
311 unsigned long static_cpu_emc_freq = 0;
312 unsigned int avg_sustain_coef;
313 unsigned long flags;
314
315 if (dev->config->avg_dependency_threshold) {
316 cpu_freq = cpufreq_get(0);
317 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
318 }
319
320 spin_lock_irqsave(&tegra->lock, flags);
321
322 dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
323 avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
324 dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
325 dev->target_freq += dev->boost_freq;
326
327 if (dev->avg_count >= dev->config->avg_dependency_threshold)
328 dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
329
330 spin_unlock_irqrestore(&tegra->lock, flags);
331}
332
333static irqreturn_t actmon_thread_isr(int irq, void *data)
334{
335 struct tegra_devfreq *tegra = data;
336
337 mutex_lock(&tegra->devfreq->lock);
338 update_devfreq(tegra->devfreq);
339 mutex_unlock(&tegra->devfreq->lock);
340
341 return IRQ_HANDLED;
342}
343
344static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
345 unsigned long action, void *ptr)
346{
347 struct clk_notifier_data *data = ptr;
348 struct tegra_devfreq *tegra = container_of(nb, struct tegra_devfreq,
349 rate_change_nb);
350 unsigned int i;
351 unsigned long flags;
352
353 spin_lock_irqsave(&tegra->lock, flags);
354
355 switch (action) {
356 case POST_RATE_CHANGE:
357 tegra->cur_freq = data->new_rate / KHZ;
358
359 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
360 tegra_devfreq_update_wmark(tegra, tegra->devices + i);
361
362 actmon_write_barrier(tegra);
363 break;
364 case PRE_RATE_CHANGE:
365 /* fall through */
366 case ABORT_RATE_CHANGE:
367 break;
368 };
369
370 spin_unlock_irqrestore(&tegra->lock, flags);
371
372 return NOTIFY_OK;
373}
374
375static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
376 struct tegra_devfreq_device *dev)
377{
378 u32 val;
379
380 dev->avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
381 dev->target_freq = tegra->cur_freq;
382
383 dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
384 writel(dev->avg_count, dev->regs + ACTMON_DEV_INIT_AVG);
385
386 tegra_devfreq_update_avg_wmark(dev);
387 tegra_devfreq_update_wmark(tegra, dev);
388
389 writel(ACTMON_COUNT_WEIGHT, dev->regs + ACTMON_DEV_COUNT_WEIGHT);
390 writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
391
392 val = 0;
393 val |= ACTMON_DEV_CTRL_ENB_PERIODIC |
394 ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN |
395 ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
396 val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
397 << ACTMON_DEV_CTRL_K_VAL_SHIFT;
398 val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
399 << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
400 val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
401 << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
402 val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN |
403 ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
404
405 writel(val, dev->regs + ACTMON_DEV_CTRL);
406
407 actmon_write_barrier(tegra);
408
409 val = readl(dev->regs + ACTMON_DEV_CTRL);
410 val |= ACTMON_DEV_CTRL_ENB;
411 writel(val, dev->regs + ACTMON_DEV_CTRL);
412
413 actmon_write_barrier(tegra);
414}
415
416static int tegra_devfreq_suspend(struct device *dev)
417{
418 struct platform_device *pdev;
419 struct tegra_devfreq *tegra;
420 struct tegra_devfreq_device *actmon_dev;
421 unsigned int i;
422 u32 val;
423
424 pdev = container_of(dev, struct platform_device, dev);
425 tegra = platform_get_drvdata(pdev);
426
427 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
428 actmon_dev = &tegra->devices[i];
429
430 val = readl(actmon_dev->regs + ACTMON_DEV_CTRL);
431 val &= ~ACTMON_DEV_CTRL_ENB;
432 writel(val, actmon_dev->regs + ACTMON_DEV_CTRL);
433
434 writel(ACTMON_INTR_STATUS_CLEAR,
435 actmon_dev->regs + ACTMON_DEV_INTR_STATUS);
436
437 actmon_write_barrier(tegra);
438 }
439
440 return 0;
441}
442
443static int tegra_devfreq_resume(struct device *dev)
444{
445 struct platform_device *pdev;
446 struct tegra_devfreq *tegra;
447 struct tegra_devfreq_device *actmon_dev;
448 unsigned int i;
449
450 pdev = container_of(dev, struct platform_device, dev);
451 tegra = platform_get_drvdata(pdev);
452
453 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
454 actmon_dev = &tegra->devices[i];
455
456 tegra_actmon_configure_device(tegra, actmon_dev);
457 }
458
459 return 0;
460}
461
462static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
463 u32 flags)
464{
465 struct platform_device *pdev;
466 struct tegra_devfreq *tegra;
467 struct dev_pm_opp *opp;
468 unsigned long rate = *freq * KHZ;
469
470 pdev = container_of(dev, struct platform_device, dev);
471 tegra = platform_get_drvdata(pdev);
472
473 rcu_read_lock();
474 opp = devfreq_recommended_opp(dev, &rate, flags);
475 if (IS_ERR(opp)) {
476 rcu_read_unlock();
477 dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
478 return PTR_ERR(opp);
479 }
480 rate = dev_pm_opp_get_freq(opp);
481 rcu_read_unlock();
482
483 /* TODO: Once we have per-user clk constraints, set a floor */
484 clk_set_rate(tegra->emc_clock, rate);
485
486 /* TODO: Set voltage as well */
487
488 return 0;
489}
490
491static int tegra_devfreq_get_dev_status(struct device *dev,
492 struct devfreq_dev_status *stat)
493{
494 struct platform_device *pdev;
495 struct tegra_devfreq *tegra;
496 struct tegra_devfreq_device *actmon_dev;
497
498 pdev = container_of(dev, struct platform_device, dev);
499 tegra = platform_get_drvdata(pdev);
500
501 stat->current_frequency = tegra->cur_freq;
502
503 /* To be used by the tegra governor */
504 stat->private_data = tegra;
505
506 /* The below are to be used by the other governors */
507
508 actmon_dev = &tegra->devices[MCALL];
509
510 /* Number of cycles spent on memory access */
511 stat->busy_time = actmon_dev->avg_count;
512
513 /* The bus can be considered to be saturated way before 100% */
514 stat->busy_time *= 100 / BUS_SATURATION_RATIO;
515
516 /* Number of cycles in a sampling period */
517 stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq;
518
519 return 0;
520}
521
522static int tegra_devfreq_get_target(struct devfreq *devfreq,
523 unsigned long *freq)
524{
525 struct devfreq_dev_status stat;
526 struct tegra_devfreq *tegra;
527 struct tegra_devfreq_device *dev;
528 unsigned long target_freq = 0;
529 unsigned int i;
530 int err;
531
532 err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat);
533 if (err)
534 return err;
535
536 tegra = stat.private_data;
537
538 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
539 dev = &tegra->devices[i];
540
541 actmon_update_target(tegra, dev);
542
543 target_freq = max(target_freq, dev->target_freq);
544 }
545
546 *freq = target_freq;
547
548 return 0;
549}
550
551static int tegra_devfreq_event_handler(struct devfreq *devfreq,
552 unsigned int event, void *data)
553{
554 return 0;
555}
556
557static struct devfreq_governor tegra_devfreq_governor = {
558 .name = "tegra",
559 .get_target_freq = tegra_devfreq_get_target,
560 .event_handler = tegra_devfreq_event_handler,
561};
562
563static struct devfreq_dev_profile tegra_devfreq_profile = {
564 .polling_ms = 0,
565 .target = tegra_devfreq_target,
566 .get_dev_status = tegra_devfreq_get_dev_status,
567};
568
569static int tegra_devfreq_probe(struct platform_device *pdev)
570{
571 struct tegra_devfreq *tegra;
572 struct tegra_devfreq_device *dev;
573 struct resource *res;
574 unsigned long max_freq;
575 unsigned int i;
576 int irq;
577 int err;
578
579 tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
580 if (!tegra)
581 return -ENOMEM;
582
583 spin_lock_init(&tegra->lock);
584
585 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
586 if (!res) {
587 dev_err(&pdev->dev, "Failed to get regs resource\n");
588 return -ENODEV;
589 }
590
591 tegra->regs = devm_ioremap_resource(&pdev->dev, res);
592 if (IS_ERR(tegra->regs)) {
593 dev_err(&pdev->dev, "Failed to get IO memory\n");
594 return PTR_ERR(tegra->regs);
595 }
596
597 tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
598 if (IS_ERR(tegra->reset)) {
599 dev_err(&pdev->dev, "Failed to get reset\n");
600 return PTR_ERR(tegra->reset);
601 }
602
603 tegra->clock = devm_clk_get(&pdev->dev, "actmon");
604 if (IS_ERR(tegra->clock)) {
605 dev_err(&pdev->dev, "Failed to get actmon clock\n");
606 return PTR_ERR(tegra->clock);
607 }
608
609 tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
610 if (IS_ERR(tegra->emc_clock)) {
611 dev_err(&pdev->dev, "Failed to get emc clock\n");
612 return PTR_ERR(tegra->emc_clock);
613 }
614
615 err = of_init_opp_table(&pdev->dev);
616 if (err) {
617 dev_err(&pdev->dev, "Failed to init operating point table\n");
618 return err;
619 }
620
621 tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
622 err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
623 if (err) {
624 dev_err(&pdev->dev,
625 "Failed to register rate change notifier\n");
626 return err;
627 }
628
629 reset_control_assert(tegra->reset);
630
631 err = clk_prepare_enable(tegra->clock);
632 if (err) {
633 reset_control_deassert(tegra->reset);
634 return err;
635 }
636
637 reset_control_deassert(tegra->reset);
638
639 max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX);
640 tegra->max_freq = max_freq / KHZ;
641
642 clk_set_rate(tegra->emc_clock, max_freq);
643
644 tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
645
646 writel(ACTMON_SAMPLING_PERIOD - 1,
647 tegra->regs + ACTMON_GLB_PERIOD_CTRL);
648
649 for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
650 dev = tegra->devices + i;
651 dev->config = actmon_device_configs + i;
652 dev->regs = tegra->regs + dev->config->offset;
653
654 tegra_actmon_configure_device(tegra, tegra->devices + i);
655 }
656
657 err = devfreq_add_governor(&tegra_devfreq_governor);
658 if (err) {
659 dev_err(&pdev->dev, "Failed to add governor\n");
660 return err;
661 }
662
663 tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
664 tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
665 &tegra_devfreq_profile,
666 "tegra",
667 NULL);
668
669 irq = platform_get_irq(pdev, 0);
670 err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr,
671 actmon_thread_isr, IRQF_SHARED,
672 "tegra-devfreq", tegra);
673 if (err) {
674 dev_err(&pdev->dev, "Interrupt request failed\n");
675 return err;
676 }
677
678 platform_set_drvdata(pdev, tegra);
679
680 return 0;
681}
682
683static int tegra_devfreq_remove(struct platform_device *pdev)
684{
685 struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
686
687 clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
688
689 clk_disable_unprepare(tegra->clock);
690
691 return 0;
692}
693
694static SIMPLE_DEV_PM_OPS(tegra_devfreq_pm_ops,
695 tegra_devfreq_suspend,
696 tegra_devfreq_resume);
697
698static struct of_device_id tegra_devfreq_of_match[] = {
699 { .compatible = "nvidia,tegra124-actmon" },
700 { },
701};
702
703static struct platform_driver tegra_devfreq_driver = {
704 .probe = tegra_devfreq_probe,
705 .remove = tegra_devfreq_remove,
706 .driver = {
707 .name = "tegra-devfreq",
708 .owner = THIS_MODULE,
709 .of_match_table = tegra_devfreq_of_match,
710 .pm = &tegra_devfreq_pm_ops,
711 },
712};
713module_platform_driver(tegra_devfreq_driver);
714
715MODULE_LICENSE("GPL");
716MODULE_DESCRIPTION("Tegra devfreq driver");
717MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
718MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index de361a156b34..5a635646e05c 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -43,7 +43,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
43{ 43{
44 const struct acpi_csrt_shared_info *si; 44 const struct acpi_csrt_shared_info *si;
45 struct list_head resource_list; 45 struct list_head resource_list;
46 struct resource_list_entry *rentry; 46 struct resource_entry *rentry;
47 resource_size_t mem = 0, irq = 0; 47 resource_size_t mem = 0, irq = 0;
48 int ret; 48 int ret;
49 49
@@ -56,10 +56,10 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
56 return 0; 56 return 0;
57 57
58 list_for_each_entry(rentry, &resource_list, node) { 58 list_for_each_entry(rentry, &resource_list, node) {
59 if (resource_type(&rentry->res) == IORESOURCE_MEM) 59 if (resource_type(rentry->res) == IORESOURCE_MEM)
60 mem = rentry->res.start; 60 mem = rentry->res->start;
61 else if (resource_type(&rentry->res) == IORESOURCE_IRQ) 61 else if (resource_type(rentry->res) == IORESOURCE_IRQ)
62 irq = rentry->res.start; 62 irq = rentry->res->start;
63 } 63 }
64 64
65 acpi_dev_free_resource_list(&resource_list); 65 acpi_dev_free_resource_list(&resource_list);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 49c265255a07..cb59619df23f 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -385,4 +385,11 @@ config EDAC_ALTERA_MC
385 preloader must initialize the SDRAM before loading 385 preloader must initialize the SDRAM before loading
386 the kernel. 386 the kernel.
387 387
388config EDAC_SYNOPSYS
389 tristate "Synopsys DDR Memory Controller"
390 depends on EDAC_MM_EDAC && ARCH_ZYNQ
391 help
392 Support for error detection and correction on the Synopsys DDR
393 memory controller.
394
388endif # EDAC 395endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index d40c69a04df7..b255f362b1db 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -67,3 +67,4 @@ obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o
67obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o 67obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
68 68
69obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o 69obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o
70obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 670d2829c547..c84eecb191ef 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -157,7 +157,7 @@ struct dev_ch_attribute {
157}; 157};
158 158
159#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ 159#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
160 struct dev_ch_attribute dev_attr_legacy_##_name = \ 160 static struct dev_ch_attribute dev_attr_legacy_##_name = \
161 { __ATTR(_name, _mode, _show, _store), (_var) } 161 { __ATTR(_name, _mode, _show, _store), (_var) }
162 162
163#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel) 163#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
@@ -850,20 +850,20 @@ static const struct file_operations debug_fake_inject_fops = {
850#endif 850#endif
851 851
852/* default Control file */ 852/* default Control file */
853DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); 853static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
854 854
855/* default Attribute files */ 855/* default Attribute files */
856DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); 856static DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
857DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); 857static DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
858DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); 858static DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
859DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); 859static DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
860DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); 860static DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
861DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); 861static DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
862DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); 862static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
863DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL); 863static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
864 864
865/* memory scrubber attribute file */ 865/* memory scrubber attribute file */
866DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL); 866static DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
867 867
868static struct attribute *mci_attrs[] = { 868static struct attribute *mci_attrs[] = {
869 &dev_attr_reset_counters.attr, 869 &dev_attr_reset_counters.attr,
@@ -989,7 +989,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
989 989
990 err = bus_register(mci->bus); 990 err = bus_register(mci->bus);
991 if (err < 0) 991 if (err < 0)
992 return err; 992 goto fail_free_name;
993 993
994 /* get the /sys/devices/system/edac subsys reference */ 994 /* get the /sys/devices/system/edac subsys reference */
995 mci->dev.type = &mci_attr_type; 995 mci->dev.type = &mci_attr_type;
@@ -1005,9 +1005,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1005 err = device_add(&mci->dev); 1005 err = device_add(&mci->dev);
1006 if (err < 0) { 1006 if (err < 0) {
1007 edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); 1007 edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
1008 bus_unregister(mci->bus); 1008 goto fail_unregister_bus;
1009 kfree(mci->bus->name);
1010 return err;
1011 } 1009 }
1012 1010
1013 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) { 1011 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
@@ -1015,15 +1013,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1015 dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO; 1013 dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
1016 dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show; 1014 dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
1017 } 1015 }
1016
1018 if (mci->set_sdram_scrub_rate) { 1017 if (mci->set_sdram_scrub_rate) {
1019 dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR; 1018 dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
1020 dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store; 1019 dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
1021 } 1020 }
1022 err = device_create_file(&mci->dev, 1021
1023 &dev_attr_sdram_scrub_rate); 1022 err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate);
1024 if (err) { 1023 if (err) {
1025 edac_dbg(1, "failure: create sdram_scrub_rate\n"); 1024 edac_dbg(1, "failure: create sdram_scrub_rate\n");
1026 goto fail2; 1025 goto fail_unregister_dev;
1027 } 1026 }
1028 } 1027 }
1029 /* 1028 /*
@@ -1032,8 +1031,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1032 for (i = 0; i < mci->tot_dimms; i++) { 1031 for (i = 0; i < mci->tot_dimms; i++) {
1033 struct dimm_info *dimm = mci->dimms[i]; 1032 struct dimm_info *dimm = mci->dimms[i];
1034 /* Only expose populated DIMMs */ 1033 /* Only expose populated DIMMs */
1035 if (dimm->nr_pages == 0) 1034 if (!dimm->nr_pages)
1036 continue; 1035 continue;
1036
1037#ifdef CONFIG_EDAC_DEBUG 1037#ifdef CONFIG_EDAC_DEBUG
1038 edac_dbg(1, "creating dimm%d, located at ", i); 1038 edac_dbg(1, "creating dimm%d, located at ", i);
1039 if (edac_debug_level >= 1) { 1039 if (edac_debug_level >= 1) {
@@ -1048,14 +1048,14 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1048 err = edac_create_dimm_object(mci, dimm, i); 1048 err = edac_create_dimm_object(mci, dimm, i);
1049 if (err) { 1049 if (err) {
1050 edac_dbg(1, "failure: create dimm %d obj\n", i); 1050 edac_dbg(1, "failure: create dimm %d obj\n", i);
1051 goto fail; 1051 goto fail_unregister_dimm;
1052 } 1052 }
1053 } 1053 }
1054 1054
1055#ifdef CONFIG_EDAC_LEGACY_SYSFS 1055#ifdef CONFIG_EDAC_LEGACY_SYSFS
1056 err = edac_create_csrow_objects(mci); 1056 err = edac_create_csrow_objects(mci);
1057 if (err < 0) 1057 if (err < 0)
1058 goto fail; 1058 goto fail_unregister_dimm;
1059#endif 1059#endif
1060 1060
1061#ifdef CONFIG_EDAC_DEBUG 1061#ifdef CONFIG_EDAC_DEBUG
@@ -1063,16 +1063,19 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1063#endif 1063#endif
1064 return 0; 1064 return 0;
1065 1065
1066fail: 1066fail_unregister_dimm:
1067 for (i--; i >= 0; i--) { 1067 for (i--; i >= 0; i--) {
1068 struct dimm_info *dimm = mci->dimms[i]; 1068 struct dimm_info *dimm = mci->dimms[i];
1069 if (dimm->nr_pages == 0) 1069 if (!dimm->nr_pages)
1070 continue; 1070 continue;
1071
1071 device_unregister(&dimm->dev); 1072 device_unregister(&dimm->dev);
1072 } 1073 }
1073fail2: 1074fail_unregister_dev:
1074 device_unregister(&mci->dev); 1075 device_unregister(&mci->dev);
1076fail_unregister_bus:
1075 bus_unregister(mci->bus); 1077 bus_unregister(mci->bus);
1078fail_free_name:
1076 kfree(mci->bus->name); 1079 kfree(mci->bus->name);
1077 return err; 1080 return err;
1078} 1081}
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 6247d186177e..e9f8a393915a 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -279,11 +279,6 @@ static inline u32 i5100_recmema_rank(u32 a)
279 return i5100_nrecmema_rank(a); 279 return i5100_nrecmema_rank(a);
280} 280}
281 281
282static inline u32 i5100_recmema_dm_buf_id(u32 a)
283{
284 return i5100_nrecmema_dm_buf_id(a);
285}
286
287static inline u32 i5100_recmemb_cas(u32 a) 282static inline u32 i5100_recmemb_cas(u32 a)
288{ 283{
289 return i5100_nrecmemb_cas(a); 284 return i5100_nrecmemb_cas(a);
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 0bd91a802c67..f7681b553fd5 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -197,7 +197,7 @@ static int inj_bank_get(void *data, u64 *val)
197 197
198DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n"); 198DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
199 199
200struct dfs_node { 200static struct dfs_node {
201 char *name; 201 char *name;
202 struct dentry *d; 202 struct dentry *d;
203 const struct file_operations *fops; 203 const struct file_operations *fops;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index ffb1a9a15ccd..1fa76a588af3 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Freescale MPC85xx Memory Controller kenel module 2 * Freescale MPC85xx Memory Controller kernel module
3 * 3 *
4 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc. 4 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
5 * 5 *
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 8c6256436227..4498baf9ce05 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Freescale MPC85xx Memory Controller kenel module 2 * Freescale MPC85xx Memory Controller kernel module
3 * Author: Dave Jiang <djiang@mvista.com> 3 * Author: Dave Jiang <djiang@mvista.com>
4 * 4 *
5 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under 5 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 6366e880f978..0574e1bbe45c 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -789,7 +789,8 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
789 ctl = (ctl & 0xff00ffff) | 0x10000; 789 ctl = (ctl & 0xff00ffff) | 0x10000;
790 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); 790 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
791 791
792 if (edac_mc_add_mc(mci)) { 792 res = edac_mc_add_mc(mci);
793 if (res) {
793 edac_dbg(3, "failed edac_mc_add_mc()\n"); 794 edac_dbg(3, "failed edac_mc_add_mc()\n");
794 goto err; 795 goto err;
795 } 796 }
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
new file mode 100644
index 000000000000..1c9691535e13
--- /dev/null
+++ b/drivers/edac/synopsys_edac.c
@@ -0,0 +1,535 @@
1/*
2 * Synopsys DDR ECC Driver
3 * This driver is based on ppc4xx_edac.c drivers
4 *
5 * Copyright (C) 2012 - 2014 Xilinx, Inc.
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
19 * for more details
20 */
21
22#include <linux/edac.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25
26#include "edac_core.h"
27
28/* Number of cs_rows needed per memory controller */
29#define SYNPS_EDAC_NR_CSROWS 1
30
31/* Number of channels per memory controller */
32#define SYNPS_EDAC_NR_CHANS 1
33
34/* Granularity of reported error in bytes */
35#define SYNPS_EDAC_ERR_GRAIN 1
36
37#define SYNPS_EDAC_MSG_SIZE 256
38
39#define SYNPS_EDAC_MOD_STRING "synps_edac"
40#define SYNPS_EDAC_MOD_VER "1"
41
42/* Synopsys DDR memory controller registers that are relevant to ECC */
43#define CTRL_OFST 0x0
44#define T_ZQ_OFST 0xA4
45
46/* ECC control register */
47#define ECC_CTRL_OFST 0xC4
48/* ECC log register */
49#define CE_LOG_OFST 0xC8
50/* ECC address register */
51#define CE_ADDR_OFST 0xCC
52/* ECC data[31:0] register */
53#define CE_DATA_31_0_OFST 0xD0
54
55/* Uncorrectable error info registers */
56#define UE_LOG_OFST 0xDC
57#define UE_ADDR_OFST 0xE0
58#define UE_DATA_31_0_OFST 0xE4
59
60#define STAT_OFST 0xF0
61#define SCRUB_OFST 0xF4
62
63/* Control register bit field definitions */
64#define CTRL_BW_MASK 0xC
65#define CTRL_BW_SHIFT 2
66
67#define DDRCTL_WDTH_16 1
68#define DDRCTL_WDTH_32 0
69
70/* ZQ register bit field definitions */
71#define T_ZQ_DDRMODE_MASK 0x2
72
73/* ECC control register bit field definitions */
74#define ECC_CTRL_CLR_CE_ERR 0x2
75#define ECC_CTRL_CLR_UE_ERR 0x1
76
77/* ECC correctable/uncorrectable error log register definitions */
78#define LOG_VALID 0x1
79#define CE_LOG_BITPOS_MASK 0xFE
80#define CE_LOG_BITPOS_SHIFT 1
81
82/* ECC correctable/uncorrectable error address register definitions */
83#define ADDR_COL_MASK 0xFFF
84#define ADDR_ROW_MASK 0xFFFF000
85#define ADDR_ROW_SHIFT 12
86#define ADDR_BANK_MASK 0x70000000
87#define ADDR_BANK_SHIFT 28
88
89/* ECC statistic register definitions */
90#define STAT_UECNT_MASK 0xFF
91#define STAT_CECNT_MASK 0xFF00
92#define STAT_CECNT_SHIFT 8
93
94/* ECC scrub register definitions */
95#define SCRUB_MODE_MASK 0x7
96#define SCRUB_MODE_SECDED 0x4
97
98/**
99 * struct ecc_error_info - ECC error log information
100 * @row: Row number
101 * @col: Column number
102 * @bank: Bank number
103 * @bitpos: Bit position
104 * @data: Data causing the error
105 */
106struct ecc_error_info {
107 u32 row;
108 u32 col;
109 u32 bank;
110 u32 bitpos;
111 u32 data;
112};
113
114/**
115 * struct synps_ecc_status - ECC status information to report
116 * @ce_cnt: Correctable error count
117 * @ue_cnt: Uncorrectable error count
118 * @ceinfo: Correctable error log information
119 * @ueinfo: Uncorrectable error log information
120 */
121struct synps_ecc_status {
122 u32 ce_cnt;
123 u32 ue_cnt;
124 struct ecc_error_info ceinfo;
125 struct ecc_error_info ueinfo;
126};
127
128/**
129 * struct synps_edac_priv - DDR memory controller private instance data
130 * @baseaddr: Base address of the DDR controller
131 * @message: Buffer for framing the event specific info
132 * @stat: ECC status information
133 * @ce_cnt: Correctable Error count
134 * @ue_cnt: Uncorrectable Error count
135 */
136struct synps_edac_priv {
137 void __iomem *baseaddr;
138 char message[SYNPS_EDAC_MSG_SIZE];
139 struct synps_ecc_status stat;
140 u32 ce_cnt;
141 u32 ue_cnt;
142};
143
144/**
145 * synps_edac_geterror_info - Get the current ecc error info
146 * @base: Pointer to the base address of the ddr memory controller
147 * @p: Pointer to the synopsys ecc status structure
148 *
149 * Determines there is any ecc error or not
150 *
151 * Return: one if there is no error otherwise returns zero
152 */
153static int synps_edac_geterror_info(void __iomem *base,
154 struct synps_ecc_status *p)
155{
156 u32 regval, clearval = 0;
157
158 regval = readl(base + STAT_OFST);
159 if (!regval)
160 return 1;
161
162 p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
163 p->ue_cnt = regval & STAT_UECNT_MASK;
164
165 regval = readl(base + CE_LOG_OFST);
166 if (!(p->ce_cnt && (regval & LOG_VALID)))
167 goto ue_err;
168
169 p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
170 regval = readl(base + CE_ADDR_OFST);
171 p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
172 p->ceinfo.col = regval & ADDR_COL_MASK;
173 p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
174 p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
175 edac_dbg(3, "ce bit position: %d data: %d\n", p->ceinfo.bitpos,
176 p->ceinfo.data);
177 clearval = ECC_CTRL_CLR_CE_ERR;
178
179ue_err:
180 regval = readl(base + UE_LOG_OFST);
181 if (!(p->ue_cnt && (regval & LOG_VALID)))
182 goto out;
183
184 regval = readl(base + UE_ADDR_OFST);
185 p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
186 p->ueinfo.col = regval & ADDR_COL_MASK;
187 p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
188 p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
189 clearval |= ECC_CTRL_CLR_UE_ERR;
190
191out:
192 writel(clearval, base + ECC_CTRL_OFST);
193 writel(0x0, base + ECC_CTRL_OFST);
194
195 return 0;
196}
197
198/**
199 * synps_edac_handle_error - Handle controller error types CE and UE
200 * @mci: Pointer to the edac memory controller instance
201 * @p: Pointer to the synopsys ecc status structure
202 *
203 * Handles the controller ECC correctable and un correctable error.
204 */
205static void synps_edac_handle_error(struct mem_ctl_info *mci,
206 struct synps_ecc_status *p)
207{
208 struct synps_edac_priv *priv = mci->pvt_info;
209 struct ecc_error_info *pinf;
210
211 if (p->ce_cnt) {
212 pinf = &p->ceinfo;
213 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
214 "DDR ECC error type :%s Row %d Bank %d Col %d ",
215 "CE", pinf->row, pinf->bank, pinf->col);
216 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
217 p->ce_cnt, 0, 0, 0, 0, 0, -1,
218 priv->message, "");
219 }
220
221 if (p->ue_cnt) {
222 pinf = &p->ueinfo;
223 snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
224 "DDR ECC error type :%s Row %d Bank %d Col %d ",
225 "UE", pinf->row, pinf->bank, pinf->col);
226 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
227 p->ue_cnt, 0, 0, 0, 0, 0, -1,
228 priv->message, "");
229 }
230
231 memset(p, 0, sizeof(*p));
232}
233
234/**
235 * synps_edac_check - Check controller for ECC errors
236 * @mci: Pointer to the edac memory controller instance
237 *
238 * Used to check and post ECC errors. Called by the polling thread
239 */
240static void synps_edac_check(struct mem_ctl_info *mci)
241{
242 struct synps_edac_priv *priv = mci->pvt_info;
243 int status;
244
245 status = synps_edac_geterror_info(priv->baseaddr, &priv->stat);
246 if (status)
247 return;
248
249 priv->ce_cnt += priv->stat.ce_cnt;
250 priv->ue_cnt += priv->stat.ue_cnt;
251 synps_edac_handle_error(mci, &priv->stat);
252
253 edac_dbg(3, "Total error count ce %d ue %d\n",
254 priv->ce_cnt, priv->ue_cnt);
255}
256
257/**
258 * synps_edac_get_dtype - Return the controller memory width
259 * @base: Pointer to the ddr memory controller base address
260 *
261 * Get the EDAC device type width appropriate for the current controller
262 * configuration.
263 *
264 * Return: a device type width enumeration.
265 */
266static enum dev_type synps_edac_get_dtype(const void __iomem *base)
267{
268 enum dev_type dt;
269 u32 width;
270
271 width = readl(base + CTRL_OFST);
272 width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
273
274 switch (width) {
275 case DDRCTL_WDTH_16:
276 dt = DEV_X2;
277 break;
278 case DDRCTL_WDTH_32:
279 dt = DEV_X4;
280 break;
281 default:
282 dt = DEV_UNKNOWN;
283 }
284
285 return dt;
286}
287
288/**
289 * synps_edac_get_eccstate - Return the controller ecc enable/disable status
290 * @base: Pointer to the ddr memory controller base address
291 *
292 * Get the ECC enable/disable status for the controller
293 *
294 * Return: a ecc status boolean i.e true/false - enabled/disabled.
295 */
296static bool synps_edac_get_eccstate(void __iomem *base)
297{
298 enum dev_type dt;
299 u32 ecctype;
300 bool state = false;
301
302 dt = synps_edac_get_dtype(base);
303 if (dt == DEV_UNKNOWN)
304 return state;
305
306 ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
307 if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
308 state = true;
309
310 return state;
311}
312
313/**
314 * synps_edac_get_memsize - reads the size of the attached memory device
315 *
316 * Return: the memory size in bytes
317 */
318static u32 synps_edac_get_memsize(void)
319{
320 struct sysinfo inf;
321
322 si_meminfo(&inf);
323
324 return inf.totalram * inf.mem_unit;
325}
326
327/**
328 * synps_edac_get_mtype - Returns controller memory type
329 * @base: pointer to the synopsys ecc status structure
330 *
331 * Get the EDAC memory type appropriate for the current controller
332 * configuration.
333 *
334 * Return: a memory type enumeration.
335 */
336static enum mem_type synps_edac_get_mtype(const void __iomem *base)
337{
338 enum mem_type mt;
339 u32 memtype;
340
341 memtype = readl(base + T_ZQ_OFST);
342
343 if (memtype & T_ZQ_DDRMODE_MASK)
344 mt = MEM_DDR3;
345 else
346 mt = MEM_DDR2;
347
348 return mt;
349}
350
351/**
352 * synps_edac_init_csrows - Initialize the cs row data
353 * @mci: Pointer to the edac memory controller instance
354 *
355 * Initializes the chip select rows associated with the EDAC memory
356 * controller instance
357 *
358 * Return: Unconditionally 0.
359 */
360static int synps_edac_init_csrows(struct mem_ctl_info *mci)
361{
362 struct csrow_info *csi;
363 struct dimm_info *dimm;
364 struct synps_edac_priv *priv = mci->pvt_info;
365 u32 size;
366 int row, j;
367
368 for (row = 0; row < mci->nr_csrows; row++) {
369 csi = mci->csrows[row];
370 size = synps_edac_get_memsize();
371
372 for (j = 0; j < csi->nr_channels; j++) {
373 dimm = csi->channels[j]->dimm;
374 dimm->edac_mode = EDAC_FLAG_SECDED;
375 dimm->mtype = synps_edac_get_mtype(priv->baseaddr);
376 dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
377 dimm->grain = SYNPS_EDAC_ERR_GRAIN;
378 dimm->dtype = synps_edac_get_dtype(priv->baseaddr);
379 }
380 }
381
382 return 0;
383}
384
385/**
386 * synps_edac_mc_init - Initialize driver instance
387 * @mci: Pointer to the edac memory controller instance
388 * @pdev: Pointer to the platform_device struct
389 *
390 * Performs initialization of the EDAC memory controller instance and
391 * related driver-private data associated with the memory controller the
392 * instance is bound to.
393 *
394 * Return: Always zero.
395 */
396static int synps_edac_mc_init(struct mem_ctl_info *mci,
397 struct platform_device *pdev)
398{
399 int status;
400 struct synps_edac_priv *priv;
401
402 mci->pdev = &pdev->dev;
403 priv = mci->pvt_info;
404 platform_set_drvdata(pdev, mci);
405
406 /* Initialize controller capabilities and configuration */
407 mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
408 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
409 mci->scrub_cap = SCRUB_HW_SRC;
410 mci->scrub_mode = SCRUB_NONE;
411
412 mci->edac_cap = EDAC_FLAG_SECDED;
413 mci->ctl_name = "synps_ddr_controller";
414 mci->dev_name = SYNPS_EDAC_MOD_STRING;
415 mci->mod_name = SYNPS_EDAC_MOD_VER;
416 mci->mod_ver = "1";
417
418 edac_op_state = EDAC_OPSTATE_POLL;
419 mci->edac_check = synps_edac_check;
420 mci->ctl_page_to_phys = NULL;
421
422 status = synps_edac_init_csrows(mci);
423
424 return status;
425}
426
427/**
428 * synps_edac_mc_probe - Check controller and bind driver
429 * @pdev: Pointer to the platform_device struct
430 *
431 * Probes a specific controller instance for binding with the driver.
432 *
433 * Return: 0 if the controller instance was successfully bound to the
434 * driver; otherwise, < 0 on error.
435 */
436static int synps_edac_mc_probe(struct platform_device *pdev)
437{
438 struct mem_ctl_info *mci;
439 struct edac_mc_layer layers[2];
440 struct synps_edac_priv *priv;
441 int rc;
442 struct resource *res;
443 void __iomem *baseaddr;
444
445 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
446 baseaddr = devm_ioremap_resource(&pdev->dev, res);
447 if (IS_ERR(baseaddr))
448 return PTR_ERR(baseaddr);
449
450 if (!synps_edac_get_eccstate(baseaddr)) {
451 edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
452 return -ENXIO;
453 }
454
455 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
456 layers[0].size = SYNPS_EDAC_NR_CSROWS;
457 layers[0].is_virt_csrow = true;
458 layers[1].type = EDAC_MC_LAYER_CHANNEL;
459 layers[1].size = SYNPS_EDAC_NR_CHANS;
460 layers[1].is_virt_csrow = false;
461
462 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
463 sizeof(struct synps_edac_priv));
464 if (!mci) {
465 edac_printk(KERN_ERR, EDAC_MC,
466 "Failed memory allocation for mc instance\n");
467 return -ENOMEM;
468 }
469
470 priv = mci->pvt_info;
471 priv->baseaddr = baseaddr;
472 rc = synps_edac_mc_init(mci, pdev);
473 if (rc) {
474 edac_printk(KERN_ERR, EDAC_MC,
475 "Failed to initialize instance\n");
476 goto free_edac_mc;
477 }
478
479 rc = edac_mc_add_mc(mci);
480 if (rc) {
481 edac_printk(KERN_ERR, EDAC_MC,
482 "Failed to register with EDAC core\n");
483 goto free_edac_mc;
484 }
485
486 /*
487 * Start capturing the correctable and uncorrectable errors. A write of
488 * 0 starts the counters.
489 */
490 writel(0x0, baseaddr + ECC_CTRL_OFST);
491 return rc;
492
493free_edac_mc:
494 edac_mc_free(mci);
495
496 return rc;
497}
498
499/**
500 * synps_edac_mc_remove - Unbind driver from controller
501 * @pdev: Pointer to the platform_device struct
502 *
503 * Return: Unconditionally 0
504 */
505static int synps_edac_mc_remove(struct platform_device *pdev)
506{
507 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
508
509 edac_mc_del_mc(&pdev->dev);
510 edac_mc_free(mci);
511
512 return 0;
513}
514
515static struct of_device_id synps_edac_match[] = {
516 { .compatible = "xlnx,zynq-ddrc-a05", },
517 { /* end of table */ }
518};
519
520MODULE_DEVICE_TABLE(of, synps_edac_match);
521
522static struct platform_driver synps_edac_mc_driver = {
523 .driver = {
524 .name = "synopsys-edac",
525 .of_match_table = synps_edac_match,
526 },
527 .probe = synps_edac_mc_probe,
528 .remove = synps_edac_mc_remove,
529};
530
531module_platform_driver(synps_edac_mc_driver);
532
533MODULE_AUTHOR("Xilinx Inc");
534MODULE_DESCRIPTION("Synopsys DDR ECC driver");
535MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index f712d47f30d8..8de4da5c9ab6 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -12,11 +12,11 @@ config EFI_VARS
12 12
13 Note that using this driver in concert with efibootmgr requires 13 Note that using this driver in concert with efibootmgr requires
14 at least test release version 0.5.0-test3 or later, which is 14 at least test release version 0.5.0-test3 or later, which is
15 available from Matt Domsch's website located at: 15 available from:
16 <http://linux.dell.com/efibootmgr/testing/efibootmgr-0.5.0-test3.tar.gz> 16 <http://linux.dell.com/efibootmgr/testing/efibootmgr-0.5.0-test3.tar.gz>
17 17
18 Subsequent efibootmgr releases may be found at: 18 Subsequent efibootmgr releases may be found at:
19 <http://linux.dell.com/efibootmgr> 19 <http://github.com/vathpela/efibootmgr>
20 20
21config EFI_VARS_PSTORE 21config EFI_VARS_PSTORE
22 tristate "Register efivars backend for pstore" 22 tristate "Register efivars backend for pstore"
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9035c1b74d58..fccb464928c3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -115,15 +115,24 @@ EFI_ATTR_SHOW(fw_vendor);
115EFI_ATTR_SHOW(runtime); 115EFI_ATTR_SHOW(runtime);
116EFI_ATTR_SHOW(config_table); 116EFI_ATTR_SHOW(config_table);
117 117
118static ssize_t fw_platform_size_show(struct kobject *kobj,
119 struct kobj_attribute *attr, char *buf)
120{
121 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
122}
123
118static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor); 124static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
119static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime); 125static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
120static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table); 126static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
127static struct kobj_attribute efi_attr_fw_platform_size =
128 __ATTR_RO(fw_platform_size);
121 129
122static struct attribute *efi_subsys_attrs[] = { 130static struct attribute *efi_subsys_attrs[] = {
123 &efi_attr_systab.attr, 131 &efi_attr_systab.attr,
124 &efi_attr_fw_vendor.attr, 132 &efi_attr_fw_vendor.attr,
125 &efi_attr_runtime.attr, 133 &efi_attr_runtime.attr,
126 &efi_attr_config_table.attr, 134 &efi_attr_config_table.attr,
135 &efi_attr_fw_platform_size.attr,
127 NULL, 136 NULL,
128}; 137};
129 138
@@ -272,15 +281,10 @@ static __init int match_config_table(efi_guid_t *guid,
272 unsigned long table, 281 unsigned long table,
273 efi_config_table_type_t *table_types) 282 efi_config_table_type_t *table_types)
274{ 283{
275 u8 str[EFI_VARIABLE_GUID_LEN + 1];
276 int i; 284 int i;
277 285
278 if (table_types) { 286 if (table_types) {
279 efi_guid_unparse(guid, str);
280
281 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 287 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
282 efi_guid_unparse(&table_types[i].guid, str);
283
284 if (!efi_guidcmp(*guid, table_types[i].guid)) { 288 if (!efi_guidcmp(*guid, table_types[i].guid)) {
285 *(table_types[i].ptr) = table; 289 *(table_types[i].ptr) = table;
286 pr_cont(" %s=0x%lx ", 290 pr_cont(" %s=0x%lx ",
@@ -403,8 +407,7 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
403 u64 val; 407 u64 val;
404 int i, len; 408 int i, len;
405 409
406 if (depth != 1 || 410 if (depth != 1 || strcmp(uname, "chosen") != 0)
407 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
408 return 0; 411 return 0;
409 412
410 for (i = 0; i < ARRAY_SIZE(dt_params); i++) { 413 for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index f256ecd8a176..7b2e0496e0c0 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -39,7 +39,7 @@
39 * fix locking per Peter Chubb's findings 39 * fix locking per Peter Chubb's findings
40 * 40 *
41 * 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com> 41 * 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
42 * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_unparse() 42 * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_to_str()
43 * 43 *
44 * 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com> 44 * 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
45 * use list_for_each_safe when deleting vars. 45 * use list_for_each_safe when deleting vars.
@@ -128,7 +128,7 @@ efivar_guid_read(struct efivar_entry *entry, char *buf)
128 if (!entry || !buf) 128 if (!entry || !buf)
129 return 0; 129 return 0;
130 130
131 efi_guid_unparse(&var->VendorGuid, str); 131 efi_guid_to_str(&var->VendorGuid, str);
132 str += strlen(str); 132 str += strlen(str);
133 str += sprintf(str, "\n"); 133 str += sprintf(str, "\n");
134 134
@@ -569,7 +569,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
569 private variables from another's. */ 569 private variables from another's. */
570 570
571 *(short_name + strlen(short_name)) = '-'; 571 *(short_name + strlen(short_name)) = '-';
572 efi_guid_unparse(&new_var->var.VendorGuid, 572 efi_guid_to_str(&new_var->var.VendorGuid,
573 short_name + strlen(short_name)); 573 short_name + strlen(short_name));
574 574
575 new_var->kobj.kset = efivars_kset; 575 new_var->kobj.kset = efivars_kset;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index b14bc2b9fb4d..8902f52e0998 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -24,3 +24,17 @@ lib-y := efi-stub-helper.o
24lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o 24lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o
25 25
26CFLAGS_fdt.o += -I$(srctree)/scripts/dtc/libfdt/ 26CFLAGS_fdt.o += -I$(srctree)/scripts/dtc/libfdt/
27
28#
29# arm64 puts the stub in the kernel proper, which will unnecessarily retain all
30# code indefinitely unless it is annotated as __init/__initdata/__initconst etc.
31# So let's apply the __init annotations at the section level, by prefixing
32# the section names directly. This will ensure that even all the inline string
33# literals are covered.
34#
35extra-$(CONFIG_ARM64) := $(lib-y)
36lib-$(CONFIG_ARM64) := $(patsubst %.o,%.init.o,$(lib-y))
37
38OBJCOPYFLAGS := --prefix-alloc-sections=.init
39$(obj)/%.init.o: $(obj)/%.o FORCE
40 $(call if_changed,objcopy)
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index eb48a1a1a576..2b3814702dcf 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -17,10 +17,10 @@
17 17
18#include "efistub.h" 18#include "efistub.h"
19 19
20static int __init efi_secureboot_enabled(efi_system_table_t *sys_table_arg) 20static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
21{ 21{
22 static efi_guid_t const var_guid __initconst = EFI_GLOBAL_VARIABLE_GUID; 22 static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
23 static efi_char16_t const var_name[] __initconst = { 23 static efi_char16_t const var_name[] = {
24 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 }; 24 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 };
25 25
26 efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable; 26 efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable;
@@ -164,7 +164,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
164 * for both archictectures, with the arch-specific code provided in the 164 * for both archictectures, with the arch-specific code provided in the
165 * handle_kernel_image() function. 165 * handle_kernel_image() function.
166 */ 166 */
167unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table, 167unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
168 unsigned long *image_addr) 168 unsigned long *image_addr)
169{ 169{
170 efi_loaded_image_t *image; 170 efi_loaded_image_t *image;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index a920fec8fe88..d073e3946383 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -66,25 +66,29 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
66 unsigned long key; 66 unsigned long key;
67 u32 desc_version; 67 u32 desc_version;
68 68
69 *map_size = sizeof(*m) * 32; 69 *map_size = 0;
70again: 70 *desc_size = 0;
71 key = 0;
72 status = efi_call_early(get_memory_map, map_size, NULL,
73 &key, desc_size, &desc_version);
74 if (status != EFI_BUFFER_TOO_SMALL)
75 return EFI_LOAD_ERROR;
76
71 /* 77 /*
72 * Add an additional efi_memory_desc_t because we're doing an 78 * Add an additional efi_memory_desc_t because we're doing an
73 * allocation which may be in a new descriptor region. 79 * allocation which may be in a new descriptor region.
74 */ 80 */
75 *map_size += sizeof(*m); 81 *map_size += *desc_size;
76 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 82 status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
77 *map_size, (void **)&m); 83 *map_size, (void **)&m);
78 if (status != EFI_SUCCESS) 84 if (status != EFI_SUCCESS)
79 goto fail; 85 goto fail;
80 86
81 *desc_size = 0;
82 key = 0;
83 status = efi_call_early(get_memory_map, map_size, m, 87 status = efi_call_early(get_memory_map, map_size, m,
84 &key, desc_size, &desc_version); 88 &key, desc_size, &desc_version);
85 if (status == EFI_BUFFER_TOO_SMALL) { 89 if (status == EFI_BUFFER_TOO_SMALL) {
86 efi_call_early(free_pool, m); 90 efi_call_early(free_pool, m);
87 goto again; 91 return EFI_LOAD_ERROR;
88 } 92 }
89 93
90 if (status != EFI_SUCCESS) 94 if (status != EFI_SUCCESS)
@@ -101,7 +105,7 @@ fail:
101} 105}
102 106
103 107
104unsigned long __init get_dram_base(efi_system_table_t *sys_table_arg) 108unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
105{ 109{
106 efi_status_t status; 110 efi_status_t status;
107 unsigned long map_size; 111 unsigned long map_size;
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 018c29a26615..87b8e3b900d2 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -191,7 +191,7 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj)
191 191
192 return 0; 192 return 0;
193out_add_entry: 193out_add_entry:
194 for (j = i - 1; j > 0; j--) { 194 for (j = i - 1; j >= 0; j--) {
195 entry = *(map_entries + j); 195 entry = *(map_entries + j);
196 kobject_put(&entry->kobj); 196 kobject_put(&entry->kobj);
197 } 197 }
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 4d6b26979fbd..bb3725b672cf 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -861,8 +861,8 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
861 break; 861 break;
862 862
863 case ACPI_RESOURCE_TYPE_ADDRESS64: 863 case ACPI_RESOURCE_TYPE_ADDRESS64:
864 hyperv_mmio.start = res->data.address64.minimum; 864 hyperv_mmio.start = res->data.address64.address.minimum;
865 hyperv_mmio.end = res->data.address64.maximum; 865 hyperv_mmio.end = res->data.address64.address.maximum;
866 break; 866 break;
867 } 867 }
868 868
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index a7de26d1ac80..d931cbbed240 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1389,6 +1389,7 @@ config SENSORS_ADS1015
1389config SENSORS_ADS7828 1389config SENSORS_ADS7828
1390 tristate "Texas Instruments ADS7828 and compatibles" 1390 tristate "Texas Instruments ADS7828 and compatibles"
1391 depends on I2C 1391 depends on I2C
1392 select REGMAP_I2C
1392 help 1393 help
1393 If you say yes here you get support for Texas Instruments ADS7828 and 1394 If you say yes here you get support for Texas Instruments ADS7828 and
1394 ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while 1395 ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while
@@ -1430,8 +1431,8 @@ config SENSORS_INA2XX
1430 tristate "Texas Instruments INA219 and compatibles" 1431 tristate "Texas Instruments INA219 and compatibles"
1431 depends on I2C 1432 depends on I2C
1432 help 1433 help
1433 If you say yes here you get support for INA219, INA220, INA226, and 1434 If you say yes here you get support for INA219, INA220, INA226,
1434 INA230 power monitor chips. 1435 INA230, and INA231 power monitor chips.
1435 1436
1436 The INA2xx driver is configured for the default configuration of 1437 The INA2xx driver is configured for the default configuration of
1437 the part as described in the datasheet. 1438 the part as described in the datasheet.
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 13875968c844..6cb89c0ebab6 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -221,7 +221,7 @@ static ssize_t show_min(struct device *dev,
221 struct abx500_temp *data = dev_get_drvdata(dev); 221 struct abx500_temp *data = dev_get_drvdata(dev);
222 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 222 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
223 223
224 return sprintf(buf, "%ld\n", data->min[attr->index]); 224 return sprintf(buf, "%lu\n", data->min[attr->index]);
225} 225}
226 226
227static ssize_t show_max(struct device *dev, 227static ssize_t show_max(struct device *dev,
@@ -230,7 +230,7 @@ static ssize_t show_max(struct device *dev,
230 struct abx500_temp *data = dev_get_drvdata(dev); 230 struct abx500_temp *data = dev_get_drvdata(dev);
231 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 231 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
232 232
233 return sprintf(buf, "%ld\n", data->max[attr->index]); 233 return sprintf(buf, "%lu\n", data->max[attr->index]);
234} 234}
235 235
236static ssize_t show_max_hyst(struct device *dev, 236static ssize_t show_max_hyst(struct device *dev,
@@ -239,7 +239,7 @@ static ssize_t show_max_hyst(struct device *dev,
239 struct abx500_temp *data = dev_get_drvdata(dev); 239 struct abx500_temp *data = dev_get_drvdata(dev);
240 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 240 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
241 241
242 return sprintf(buf, "%ld\n", data->max_hyst[attr->index]); 242 return sprintf(buf, "%lu\n", data->max_hyst[attr->index]);
243} 243}
244 244
245static ssize_t show_min_alarm(struct device *dev, 245static ssize_t show_min_alarm(struct device *dev,
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index f4f9b219bf16..11955467fc0f 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/hwmon.h> 17#include <linux/hwmon.h>
18#include <linux/hwmon-sysfs.h> 18#include <linux/hwmon-sysfs.h>
19#include <linux/bitops.h>
19 20
20/* 21/*
21 * AD7314 temperature masks 22 * AD7314 temperature masks
@@ -67,7 +68,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
67 switch (spi_get_device_id(chip->spi_dev)->driver_data) { 68 switch (spi_get_device_id(chip->spi_dev)->driver_data) {
68 case ad7314: 69 case ad7314:
69 data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; 70 data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
70 data = (data << 6) >> 6; 71 data = sign_extend32(data, 9);
71 72
72 return sprintf(buf, "%d\n", 250 * data); 73 return sprintf(buf, "%d\n", 250 * data);
73 case adt7301: 74 case adt7301:
@@ -78,7 +79,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
78 * register. 1lsb - 31.25 milli degrees centigrade 79 * register. 1lsb - 31.25 milli degrees centigrade
79 */ 80 */
80 data = ret & ADT7301_TEMP_MASK; 81 data = ret & ADT7301_TEMP_MASK;
81 data = (data << 2) >> 2; 82 data = sign_extend32(data, 13);
82 83
83 return sprintf(buf, "%d\n", 84 return sprintf(buf, "%d\n",
84 DIV_ROUND_CLOSEST(data * 3125, 100)); 85 DIV_ROUND_CLOSEST(data * 3125, 100));
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 0625e50d7a6e..ad2b47e40345 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -27,6 +27,7 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/regulator/consumer.h> 28#include <linux/regulator/consumer.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/bitops.h>
30 31
31/* Addresses to scan 32/* Addresses to scan
32 * The chip also supports addresses 0x35..0x37. Don't scan those addresses 33 * The chip also supports addresses 0x35..0x37. Don't scan those addresses
@@ -189,7 +190,7 @@ static ssize_t adc128_show_temp(struct device *dev,
189 if (IS_ERR(data)) 190 if (IS_ERR(data))
190 return PTR_ERR(data); 191 return PTR_ERR(data);
191 192
192 temp = (data->temp[index] << 7) >> 7; /* sign extend */ 193 temp = sign_extend32(data->temp[index], 8);
193 return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */ 194 return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */
194} 195}
195 196
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index a622d40eec17..bce4e9ff21bf 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -30,14 +30,12 @@
30#include <linux/hwmon-sysfs.h> 30#include <linux/hwmon-sysfs.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/jiffies.h>
34#include <linux/module.h> 33#include <linux/module.h>
35#include <linux/mutex.h>
36#include <linux/platform_data/ads7828.h> 34#include <linux/platform_data/ads7828.h>
35#include <linux/regmap.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38 37
39/* The ADS7828 registers */ 38/* The ADS7828 registers */
40#define ADS7828_NCH 8 /* 8 channels supported */
41#define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */ 39#define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */
42#define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */ 40#define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */
43#define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */ 41#define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */
@@ -50,17 +48,9 @@ enum ads7828_chips { ads7828, ads7830 };
50 48
51/* Client specific data */ 49/* Client specific data */
52struct ads7828_data { 50struct ads7828_data {
53 struct i2c_client *client; 51 struct regmap *regmap;
54 struct mutex update_lock; /* Mutex protecting updates */
55 unsigned long last_updated; /* Last updated time (in jiffies) */
56 u16 adc_input[ADS7828_NCH]; /* ADS7828_NCH samples */
57 bool valid; /* Validity flag */
58 bool diff_input; /* Differential input */
59 bool ext_vref; /* External voltage reference */
60 unsigned int vref_mv; /* voltage reference value */
61 u8 cmd_byte; /* Command byte without channel bits */ 52 u8 cmd_byte; /* Command byte without channel bits */
62 unsigned int lsb_resol; /* Resolution of the ADC sample LSB */ 53 unsigned int lsb_resol; /* Resolution of the ADC sample LSB */
63 s32 (*read_channel)(const struct i2c_client *client, u8 command);
64}; 54};
65 55
66/* Command byte C2,C1,C0 - see datasheet */ 56/* Command byte C2,C1,C0 - see datasheet */
@@ -69,42 +59,22 @@ static inline u8 ads7828_cmd_byte(u8 cmd, int ch)
69 return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4); 59 return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4);
70} 60}
71 61
72/* Update data for the device (all 8 channels) */
73static struct ads7828_data *ads7828_update_device(struct device *dev)
74{
75 struct ads7828_data *data = dev_get_drvdata(dev);
76 struct i2c_client *client = data->client;
77
78 mutex_lock(&data->update_lock);
79
80 if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
81 || !data->valid) {
82 unsigned int ch;
83 dev_dbg(&client->dev, "Starting ads7828 update\n");
84
85 for (ch = 0; ch < ADS7828_NCH; ch++) {
86 u8 cmd = ads7828_cmd_byte(data->cmd_byte, ch);
87 data->adc_input[ch] = data->read_channel(client, cmd);
88 }
89 data->last_updated = jiffies;
90 data->valid = true;
91 }
92
93 mutex_unlock(&data->update_lock);
94
95 return data;
96}
97
98/* sysfs callback function */ 62/* sysfs callback function */
99static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da, 63static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da,
100 char *buf) 64 char *buf)
101{ 65{
102 struct sensor_device_attribute *attr = to_sensor_dev_attr(da); 66 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
103 struct ads7828_data *data = ads7828_update_device(dev); 67 struct ads7828_data *data = dev_get_drvdata(dev);
104 unsigned int value = DIV_ROUND_CLOSEST(data->adc_input[attr->index] * 68 u8 cmd = ads7828_cmd_byte(data->cmd_byte, attr->index);
105 data->lsb_resol, 1000); 69 unsigned int regval;
70 int err;
106 71
107 return sprintf(buf, "%d\n", value); 72 err = regmap_read(data->regmap, cmd, &regval);
73 if (err < 0)
74 return err;
75
76 return sprintf(buf, "%d\n",
77 DIV_ROUND_CLOSEST(regval * data->lsb_resol, 1000));
108} 78}
109 79
110static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0); 80static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0);
@@ -130,6 +100,16 @@ static struct attribute *ads7828_attrs[] = {
130 100
131ATTRIBUTE_GROUPS(ads7828); 101ATTRIBUTE_GROUPS(ads7828);
132 102
103static const struct regmap_config ads2828_regmap_config = {
104 .reg_bits = 8,
105 .val_bits = 16,
106};
107
108static const struct regmap_config ads2830_regmap_config = {
109 .reg_bits = 8,
110 .val_bits = 8,
111};
112
133static int ads7828_probe(struct i2c_client *client, 113static int ads7828_probe(struct i2c_client *client,
134 const struct i2c_device_id *id) 114 const struct i2c_device_id *id)
135{ 115{
@@ -137,42 +117,40 @@ static int ads7828_probe(struct i2c_client *client,
137 struct ads7828_platform_data *pdata = dev_get_platdata(dev); 117 struct ads7828_platform_data *pdata = dev_get_platdata(dev);
138 struct ads7828_data *data; 118 struct ads7828_data *data;
139 struct device *hwmon_dev; 119 struct device *hwmon_dev;
120 unsigned int vref_mv = ADS7828_INT_VREF_MV;
121 bool diff_input = false;
122 bool ext_vref = false;
140 123
141 data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL); 124 data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
142 if (!data) 125 if (!data)
143 return -ENOMEM; 126 return -ENOMEM;
144 127
145 if (pdata) { 128 if (pdata) {
146 data->diff_input = pdata->diff_input; 129 diff_input = pdata->diff_input;
147 data->ext_vref = pdata->ext_vref; 130 ext_vref = pdata->ext_vref;
148 if (data->ext_vref) 131 if (ext_vref && pdata->vref_mv)
149 data->vref_mv = pdata->vref_mv; 132 vref_mv = pdata->vref_mv;
150 } 133 }
151 134
152 /* Bound Vref with min/max values if it was provided */ 135 /* Bound Vref with min/max values */
153 if (data->vref_mv) 136 vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN,
154 data->vref_mv = clamp_val(data->vref_mv, 137 ADS7828_EXT_VREF_MV_MAX);
155 ADS7828_EXT_VREF_MV_MIN,
156 ADS7828_EXT_VREF_MV_MAX);
157 else
158 data->vref_mv = ADS7828_INT_VREF_MV;
159 138
160 /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */ 139 /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */
161 if (id->driver_data == ads7828) { 140 if (id->driver_data == ads7828) {
162 data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 4096); 141 data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 4096);
163 data->read_channel = i2c_smbus_read_word_swapped; 142 data->regmap = devm_regmap_init_i2c(client,
143 &ads2828_regmap_config);
164 } else { 144 } else {
165 data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 256); 145 data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 256);
166 data->read_channel = i2c_smbus_read_byte_data; 146 data->regmap = devm_regmap_init_i2c(client,
147 &ads2830_regmap_config);
167 } 148 }
168 149
169 data->cmd_byte = data->ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; 150 data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
170 if (!data->diff_input) 151 if (!diff_input)
171 data->cmd_byte |= ADS7828_CMD_SD_SE; 152 data->cmd_byte |= ADS7828_CMD_SD_SE;
172 153
173 data->client = client;
174 mutex_init(&data->update_lock);
175
176 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 154 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
177 data, 155 data,
178 ads7828_groups); 156 ads7828_groups);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e01feba909c3..d1542b7d4bc3 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -35,6 +35,7 @@
35#include <linux/hwmon-sysfs.h> 35#include <linux/hwmon-sysfs.h>
36#include <linux/jiffies.h> 36#include <linux/jiffies.h>
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/delay.h>
38 39
39#include <linux/platform_data/ina2xx.h> 40#include <linux/platform_data/ina2xx.h>
40 41
@@ -51,7 +52,6 @@
51#define INA226_ALERT_LIMIT 0x07 52#define INA226_ALERT_LIMIT 0x07
52#define INA226_DIE_ID 0xFF 53#define INA226_DIE_ID 0xFF
53 54
54
55/* register count */ 55/* register count */
56#define INA219_REGISTERS 6 56#define INA219_REGISTERS 6
57#define INA226_REGISTERS 8 57#define INA226_REGISTERS 8
@@ -64,6 +64,24 @@
64 64
65/* worst case is 68.10 ms (~14.6Hz, ina219) */ 65/* worst case is 68.10 ms (~14.6Hz, ina219) */
66#define INA2XX_CONVERSION_RATE 15 66#define INA2XX_CONVERSION_RATE 15
67#define INA2XX_MAX_DELAY 69 /* worst case delay in ms */
68
69#define INA2XX_RSHUNT_DEFAULT 10000
70
71/* bit mask for reading the averaging setting in the configuration register */
72#define INA226_AVG_RD_MASK 0x0E00
73
74#define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9)
75#define INA226_SHIFT_AVG(val) ((val) << 9)
76
77/* common attrs, ina226 attrs and NULL */
78#define INA2XX_MAX_ATTRIBUTE_GROUPS 3
79
80/*
81 * Both bus voltage and shunt voltage conversion times for ina226 are set
82 * to 0b0100 on POR, which translates to 2200 microseconds in total.
83 */
84#define INA226_TOTAL_CONV_TIME_DEFAULT 2200
67 85
68enum ina2xx_ids { ina219, ina226 }; 86enum ina2xx_ids { ina219, ina226 };
69 87
@@ -81,11 +99,16 @@ struct ina2xx_data {
81 struct i2c_client *client; 99 struct i2c_client *client;
82 const struct ina2xx_config *config; 100 const struct ina2xx_config *config;
83 101
102 long rshunt;
103 u16 curr_config;
104
84 struct mutex update_lock; 105 struct mutex update_lock;
85 bool valid; 106 bool valid;
86 unsigned long last_updated; 107 unsigned long last_updated;
108 int update_interval; /* in jiffies */
87 109
88 int kind; 110 int kind;
111 const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS];
89 u16 regs[INA2XX_MAX_REGISTERS]; 112 u16 regs[INA2XX_MAX_REGISTERS];
90}; 113};
91 114
@@ -110,34 +133,156 @@ static const struct ina2xx_config ina2xx_config[] = {
110 }, 133 },
111}; 134};
112 135
113static struct ina2xx_data *ina2xx_update_device(struct device *dev) 136/*
137 * Available averaging rates for ina226. The indices correspond with
138 * the bit values expected by the chip (according to the ina226 datasheet,
139 * table 3 AVG bit settings, found at
140 * http://www.ti.com/lit/ds/symlink/ina226.pdf.
141 */
142static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
143
144static int ina226_avg_bits(int avg)
145{
146 int i;
147
148 /* Get the closest average from the tab. */
149 for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) {
150 if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2)
151 break;
152 }
153
154 return i; /* Return 0b0111 for values greater than 1024. */
155}
156
157static int ina226_reg_to_interval(u16 config)
158{
159 int avg = ina226_avg_tab[INA226_READ_AVG(config)];
160
161 /*
162 * Multiply the total conversion time by the number of averages.
163 * Return the result in milliseconds.
164 */
165 return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000);
166}
167
168static u16 ina226_interval_to_reg(int interval, u16 config)
169{
170 int avg, avg_bits;
171
172 avg = DIV_ROUND_CLOSEST(interval * 1000,
173 INA226_TOTAL_CONV_TIME_DEFAULT);
174 avg_bits = ina226_avg_bits(avg);
175
176 return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits);
177}
178
179static void ina226_set_update_interval(struct ina2xx_data *data)
180{
181 int ms;
182
183 ms = ina226_reg_to_interval(data->curr_config);
184 data->update_interval = msecs_to_jiffies(ms);
185}
186
187static int ina2xx_calibrate(struct ina2xx_data *data)
188{
189 u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
190 data->rshunt);
191
192 return i2c_smbus_write_word_swapped(data->client,
193 INA2XX_CALIBRATION, val);
194}
195
196/*
197 * Initialize the configuration and calibration registers.
198 */
199static int ina2xx_init(struct ina2xx_data *data)
114{ 200{
115 struct ina2xx_data *data = dev_get_drvdata(dev);
116 struct i2c_client *client = data->client; 201 struct i2c_client *client = data->client;
117 struct ina2xx_data *ret = data; 202 int ret;
118 203
119 mutex_lock(&data->update_lock); 204 /* device configuration */
205 ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
206 data->curr_config);
207 if (ret < 0)
208 return ret;
120 209
121 if (time_after(jiffies, data->last_updated + 210 /*
122 HZ / INA2XX_CONVERSION_RATE) || !data->valid) { 211 * Set current LSB to 1mA, shunt is in uOhms
212 * (equation 13 in datasheet).
213 */
214 return ina2xx_calibrate(data);
215}
123 216
124 int i; 217static int ina2xx_do_update(struct device *dev)
218{
219 struct ina2xx_data *data = dev_get_drvdata(dev);
220 struct i2c_client *client = data->client;
221 int i, rv, retry;
125 222
126 dev_dbg(&client->dev, "Starting ina2xx update\n"); 223 dev_dbg(&client->dev, "Starting ina2xx update\n");
127 224
225 for (retry = 5; retry; retry--) {
128 /* Read all registers */ 226 /* Read all registers */
129 for (i = 0; i < data->config->registers; i++) { 227 for (i = 0; i < data->config->registers; i++) {
130 int rv = i2c_smbus_read_word_swapped(client, i); 228 rv = i2c_smbus_read_word_swapped(client, i);
131 if (rv < 0) { 229 if (rv < 0)
132 ret = ERR_PTR(rv); 230 return rv;
133 goto abort;
134 }
135 data->regs[i] = rv; 231 data->regs[i] = rv;
136 } 232 }
233
234 /*
235 * If the current value in the calibration register is 0, the
236 * power and current registers will also remain at 0. In case
237 * the chip has been reset let's check the calibration
238 * register and reinitialize if needed.
239 */
240 if (data->regs[INA2XX_CALIBRATION] == 0) {
241 dev_warn(dev, "chip not calibrated, reinitializing\n");
242
243 rv = ina2xx_init(data);
244 if (rv < 0)
245 return rv;
246
247 /*
248 * Let's make sure the power and current registers
249 * have been updated before trying again.
250 */
251 msleep(INA2XX_MAX_DELAY);
252 continue;
253 }
254
137 data->last_updated = jiffies; 255 data->last_updated = jiffies;
138 data->valid = 1; 256 data->valid = 1;
257
258 return 0;
139 } 259 }
140abort: 260
261 /*
262 * If we're here then although all write operations succeeded, the
263 * chip still returns 0 in the calibration register. Nothing more we
264 * can do here.
265 */
266 dev_err(dev, "unable to reinitialize the chip\n");
267 return -ENODEV;
268}
269
270static struct ina2xx_data *ina2xx_update_device(struct device *dev)
271{
272 struct ina2xx_data *data = dev_get_drvdata(dev);
273 struct ina2xx_data *ret = data;
274 unsigned long after;
275 int rv;
276
277 mutex_lock(&data->update_lock);
278
279 after = data->last_updated + data->update_interval;
280 if (time_after(jiffies, after) || !data->valid) {
281 rv = ina2xx_do_update(dev);
282 if (rv < 0)
283 ret = ERR_PTR(rv);
284 }
285
141 mutex_unlock(&data->update_lock); 286 mutex_unlock(&data->update_lock);
142 return ret; 287 return ret;
143} 288}
@@ -164,6 +309,10 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
164 /* signed register, LSB=1mA (selected), in mA */ 309 /* signed register, LSB=1mA (selected), in mA */
165 val = (s16)data->regs[reg]; 310 val = (s16)data->regs[reg];
166 break; 311 break;
312 case INA2XX_CALIBRATION:
313 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
314 data->regs[reg]);
315 break;
167 default: 316 default:
168 /* programmer goofed */ 317 /* programmer goofed */
169 WARN_ON_ONCE(1); 318 WARN_ON_ONCE(1);
@@ -187,6 +336,85 @@ static ssize_t ina2xx_show_value(struct device *dev,
187 ina2xx_get_value(data, attr->index)); 336 ina2xx_get_value(data, attr->index));
188} 337}
189 338
339static ssize_t ina2xx_set_shunt(struct device *dev,
340 struct device_attribute *da,
341 const char *buf, size_t count)
342{
343 struct ina2xx_data *data = ina2xx_update_device(dev);
344 unsigned long val;
345 int status;
346
347 if (IS_ERR(data))
348 return PTR_ERR(data);
349
350 status = kstrtoul(buf, 10, &val);
351 if (status < 0)
352 return status;
353
354 if (val == 0 ||
355 /* Values greater than the calibration factor make no sense. */
356 val > data->config->calibration_factor)
357 return -EINVAL;
358
359 mutex_lock(&data->update_lock);
360 data->rshunt = val;
361 status = ina2xx_calibrate(data);
362 mutex_unlock(&data->update_lock);
363 if (status < 0)
364 return status;
365
366 return count;
367}
368
369static ssize_t ina226_set_interval(struct device *dev,
370 struct device_attribute *da,
371 const char *buf, size_t count)
372{
373 struct ina2xx_data *data = dev_get_drvdata(dev);
374 unsigned long val;
375 int status;
376
377 status = kstrtoul(buf, 10, &val);
378 if (status < 0)
379 return status;
380
381 if (val > INT_MAX || val == 0)
382 return -EINVAL;
383
384 mutex_lock(&data->update_lock);
385 data->curr_config = ina226_interval_to_reg(val,
386 data->regs[INA2XX_CONFIG]);
387 status = i2c_smbus_write_word_swapped(data->client,
388 INA2XX_CONFIG,
389 data->curr_config);
390
391 ina226_set_update_interval(data);
392 /* Make sure the next access re-reads all registers. */
393 data->valid = 0;
394 mutex_unlock(&data->update_lock);
395 if (status < 0)
396 return status;
397
398 return count;
399}
400
401static ssize_t ina226_show_interval(struct device *dev,
402 struct device_attribute *da, char *buf)
403{
404 struct ina2xx_data *data = ina2xx_update_device(dev);
405
406 if (IS_ERR(data))
407 return PTR_ERR(data);
408
409 /*
410 * We don't use data->update_interval here as we want to display
411 * the actual interval used by the chip and jiffies_to_msecs()
412 * doesn't seem to be accurate enough.
413 */
414 return snprintf(buf, PAGE_SIZE, "%d\n",
415 ina226_reg_to_interval(data->regs[INA2XX_CONFIG]));
416}
417
190/* shunt voltage */ 418/* shunt voltage */
191static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL, 419static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL,
192 INA2XX_SHUNT_VOLTAGE); 420 INA2XX_SHUNT_VOLTAGE);
@@ -203,15 +431,37 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina2xx_show_value, NULL,
203static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, 431static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
204 INA2XX_POWER); 432 INA2XX_POWER);
205 433
434/* shunt resistance */
435static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
436 ina2xx_show_value, ina2xx_set_shunt,
437 INA2XX_CALIBRATION);
438
439/* update interval (ina226 only) */
440static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
441 ina226_show_interval, ina226_set_interval, 0);
442
206/* pointers to created device attributes */ 443/* pointers to created device attributes */
207static struct attribute *ina2xx_attrs[] = { 444static struct attribute *ina2xx_attrs[] = {
208 &sensor_dev_attr_in0_input.dev_attr.attr, 445 &sensor_dev_attr_in0_input.dev_attr.attr,
209 &sensor_dev_attr_in1_input.dev_attr.attr, 446 &sensor_dev_attr_in1_input.dev_attr.attr,
210 &sensor_dev_attr_curr1_input.dev_attr.attr, 447 &sensor_dev_attr_curr1_input.dev_attr.attr,
211 &sensor_dev_attr_power1_input.dev_attr.attr, 448 &sensor_dev_attr_power1_input.dev_attr.attr,
449 &sensor_dev_attr_shunt_resistor.dev_attr.attr,
212 NULL, 450 NULL,
213}; 451};
214ATTRIBUTE_GROUPS(ina2xx); 452
453static const struct attribute_group ina2xx_group = {
454 .attrs = ina2xx_attrs,
455};
456
457static struct attribute *ina226_attrs[] = {
458 &sensor_dev_attr_update_interval.dev_attr.attr,
459 NULL,
460};
461
462static const struct attribute_group ina226_group = {
463 .attrs = ina226_attrs,
464};
215 465
216static int ina2xx_probe(struct i2c_client *client, 466static int ina2xx_probe(struct i2c_client *client,
217 const struct i2c_device_id *id) 467 const struct i2c_device_id *id)
@@ -221,9 +471,8 @@ static int ina2xx_probe(struct i2c_client *client,
221 struct device *dev = &client->dev; 471 struct device *dev = &client->dev;
222 struct ina2xx_data *data; 472 struct ina2xx_data *data;
223 struct device *hwmon_dev; 473 struct device *hwmon_dev;
224 long shunt = 10000; /* default shunt value 10mOhms */
225 u32 val; 474 u32 val;
226 int ret; 475 int ret, group = 0;
227 476
228 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) 477 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
229 return -ENODEV; 478 return -ENODEV;
@@ -234,50 +483,52 @@ static int ina2xx_probe(struct i2c_client *client,
234 483
235 if (dev_get_platdata(dev)) { 484 if (dev_get_platdata(dev)) {
236 pdata = dev_get_platdata(dev); 485 pdata = dev_get_platdata(dev);
237 shunt = pdata->shunt_uohms; 486 data->rshunt = pdata->shunt_uohms;
238 } else if (!of_property_read_u32(dev->of_node, 487 } else if (!of_property_read_u32(dev->of_node,
239 "shunt-resistor", &val)) { 488 "shunt-resistor", &val)) {
240 shunt = val; 489 data->rshunt = val;
490 } else {
491 data->rshunt = INA2XX_RSHUNT_DEFAULT;
241 } 492 }
242 493
243 if (shunt <= 0)
244 return -ENODEV;
245
246 /* set the device type */ 494 /* set the device type */
247 data->kind = id->driver_data; 495 data->kind = id->driver_data;
248 data->config = &ina2xx_config[data->kind]; 496 data->config = &ina2xx_config[data->kind];
249 497 data->curr_config = data->config->config_default;
250 /* device configuration */ 498 data->client = client;
251 ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
252 data->config->config_default);
253 if (ret < 0) {
254 dev_err(dev,
255 "error writing to the config register: %d", ret);
256 return -ENODEV;
257 }
258 499
259 /* 500 /*
260 * Set current LSB to 1mA, shunt is in uOhms 501 * Ina226 has a variable update_interval. For ina219 we
261 * (equation 13 in datasheet). 502 * use a constant value.
262 */ 503 */
263 ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION, 504 if (data->kind == ina226)
264 data->config->calibration_factor / shunt); 505 ina226_set_update_interval(data);
506 else
507 data->update_interval = HZ / INA2XX_CONVERSION_RATE;
508
509 if (data->rshunt <= 0 ||
510 data->rshunt > data->config->calibration_factor)
511 return -ENODEV;
512
513 ret = ina2xx_init(data);
265 if (ret < 0) { 514 if (ret < 0) {
266 dev_err(dev, 515 dev_err(dev, "error configuring the device: %d\n", ret);
267 "error writing to the calibration register: %d", ret);
268 return -ENODEV; 516 return -ENODEV;
269 } 517 }
270 518
271 data->client = client;
272 mutex_init(&data->update_lock); 519 mutex_init(&data->update_lock);
273 520
521 data->groups[group++] = &ina2xx_group;
522 if (data->kind == ina226)
523 data->groups[group++] = &ina226_group;
524
274 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, 525 hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
275 data, ina2xx_groups); 526 data, data->groups);
276 if (IS_ERR(hwmon_dev)) 527 if (IS_ERR(hwmon_dev))
277 return PTR_ERR(hwmon_dev); 528 return PTR_ERR(hwmon_dev);
278 529
279 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", 530 dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
280 id->name, shunt); 531 id->name, data->rshunt);
281 532
282 return 0; 533 return 0;
283} 534}
@@ -287,6 +538,7 @@ static const struct i2c_device_id ina2xx_id[] = {
287 { "ina220", ina219 }, 538 { "ina220", ina219 },
288 { "ina226", ina226 }, 539 { "ina226", ina226 },
289 { "ina230", ina226 }, 540 { "ina230", ina226 },
541 { "ina231", ina226 },
290 { } 542 { }
291}; 543};
292MODULE_DEVICE_TABLE(i2c, ina2xx_id); 544MODULE_DEVICE_TABLE(i2c, ina2xx_id);
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 388f8bcd898e..996bdfd5cf25 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -201,7 +201,7 @@ struct jc42_data {
201#define JC42_TEMP_MIN 0 201#define JC42_TEMP_MIN 0
202#define JC42_TEMP_MAX 125000 202#define JC42_TEMP_MAX 125000
203 203
204static u16 jc42_temp_to_reg(int temp, bool extended) 204static u16 jc42_temp_to_reg(long temp, bool extended)
205{ 205{
206 int ntemp = clamp_val(temp, 206 int ntemp = clamp_val(temp,
207 extended ? JC42_TEMP_MIN_EXTENDED : 207 extended ? JC42_TEMP_MIN_EXTENDED :
@@ -213,11 +213,7 @@ static u16 jc42_temp_to_reg(int temp, bool extended)
213 213
214static int jc42_temp_from_reg(s16 reg) 214static int jc42_temp_from_reg(s16 reg)
215{ 215{
216 reg &= 0x1fff; 216 reg = sign_extend32(reg, 12);
217
218 /* sign extend register */
219 if (reg & 0x1000)
220 reg |= 0xf000;
221 217
222 /* convert from 0.0625 to 0.001 resolution */ 218 /* convert from 0.0625 to 0.001 resolution */
223 return reg * 125 / 2; 219 return reg * 125 / 2;
@@ -308,15 +304,18 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
308 const char *buf, size_t count) 304 const char *buf, size_t count)
309{ 305{
310 struct jc42_data *data = dev_get_drvdata(dev); 306 struct jc42_data *data = dev_get_drvdata(dev);
311 unsigned long val; 307 long val;
312 int diff, hyst; 308 int diff, hyst;
313 int err; 309 int err;
314 int ret = count; 310 int ret = count;
315 311
316 if (kstrtoul(buf, 10, &val) < 0) 312 if (kstrtol(buf, 10, &val) < 0)
317 return -EINVAL; 313 return -EINVAL;
318 314
315 val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED :
316 JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX);
319 diff = jc42_temp_from_reg(data->temp[t_crit]) - val; 317 diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
318
320 hyst = 0; 319 hyst = 0;
321 if (diff > 0) { 320 if (diff > 0) {
322 if (diff < 2250) 321 if (diff < 2250)
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index ec5678289e4a..55765790907b 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -779,7 +779,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
779 return reg != REG_BANK && reg <= 0x20; 779 return reg != REG_BANK && reg <= 0x20;
780} 780}
781 781
782static struct regmap_config nct7802_regmap_config = { 782static const struct regmap_config nct7802_regmap_config = {
783 .reg_bits = 8, 783 .reg_bits = 8,
784 .val_bits = 8, 784 .val_bits = 8,
785 .cache_type = REGCACHE_RBTREE, 785 .cache_type = REGCACHE_RBTREE,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index ba9f478f64ee..9da2735f1424 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -253,7 +253,7 @@ static int tmp102_remove(struct i2c_client *client)
253 return 0; 253 return 0;
254} 254}
255 255
256#ifdef CONFIG_PM 256#ifdef CONFIG_PM_SLEEP
257static int tmp102_suspend(struct device *dev) 257static int tmp102_suspend(struct device *dev)
258{ 258{
259 struct i2c_client *client = to_i2c_client(dev); 259 struct i2c_client *client = to_i2c_client(dev);
@@ -279,17 +279,10 @@ static int tmp102_resume(struct device *dev)
279 config &= ~TMP102_CONF_SD; 279 config &= ~TMP102_CONF_SD;
280 return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); 280 return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
281} 281}
282
283static const struct dev_pm_ops tmp102_dev_pm_ops = {
284 .suspend = tmp102_suspend,
285 .resume = tmp102_resume,
286};
287
288#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
289#else
290#define TMP102_DEV_PM_OPS NULL
291#endif /* CONFIG_PM */ 282#endif /* CONFIG_PM */
292 283
284static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
285
293static const struct i2c_device_id tmp102_id[] = { 286static const struct i2c_device_id tmp102_id[] = {
294 { "tmp102", 0 }, 287 { "tmp102", 0 },
295 { } 288 { }
@@ -298,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, tmp102_id);
298 291
299static struct i2c_driver tmp102_driver = { 292static struct i2c_driver tmp102_driver = {
300 .driver.name = DRIVER_NAME, 293 .driver.name = DRIVER_NAME,
301 .driver.pm = TMP102_DEV_PM_OPS, 294 .driver.pm = &tmp102_dev_pm_ops,
302 .probe = tmp102_probe, 295 .probe = tmp102_probe,
303 .remove = tmp102_remove, 296 .remove = tmp102_remove,
304 .id_table = tmp102_id, 297 .id_table = tmp102_id,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 9cceacb92f9d..1bc0c170f12a 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -727,6 +727,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
727 ICPU(0x46, idle_cpu_hsw), 727 ICPU(0x46, idle_cpu_hsw),
728 ICPU(0x4d, idle_cpu_avn), 728 ICPU(0x4d, idle_cpu_avn),
729 ICPU(0x3d, idle_cpu_bdw), 729 ICPU(0x3d, idle_cpu_bdw),
730 ICPU(0x47, idle_cpu_bdw),
730 ICPU(0x4f, idle_cpu_bdw), 731 ICPU(0x4f, idle_cpu_bdw),
731 ICPU(0x56, idle_cpu_bdw), 732 ICPU(0x56, idle_cpu_bdw),
732 {} 733 {}
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98024856df07..59de6364a910 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4284,7 +4284,6 @@ static int alloc_hpet_msi(unsigned int irq, unsigned int id)
4284} 4284}
4285 4285
4286struct irq_remap_ops amd_iommu_irq_ops = { 4286struct irq_remap_ops amd_iommu_irq_ops = {
4287 .supported = amd_iommu_supported,
4288 .prepare = amd_iommu_prepare, 4287 .prepare = amd_iommu_prepare,
4289 .enable = amd_iommu_enable, 4288 .enable = amd_iommu_enable,
4290 .disable = amd_iommu_disable, 4289 .disable = amd_iommu_disable,
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b0522f15730f..9a20248e7068 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2014,9 +2014,6 @@ static bool detect_ivrs(void)
2014 /* Make sure ACS will be enabled during PCI probe */ 2014 /* Make sure ACS will be enabled during PCI probe */
2015 pci_request_acs(); 2015 pci_request_acs();
2016 2016
2017 if (!disable_irq_remap)
2018 amd_iommu_irq_remap = true;
2019
2020 return true; 2017 return true;
2021} 2018}
2022 2019
@@ -2123,12 +2120,14 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
2123#ifdef CONFIG_IRQ_REMAP 2120#ifdef CONFIG_IRQ_REMAP
2124int __init amd_iommu_prepare(void) 2121int __init amd_iommu_prepare(void)
2125{ 2122{
2126 return iommu_go_to_state(IOMMU_ACPI_FINISHED); 2123 int ret;
2127}
2128 2124
2129int __init amd_iommu_supported(void) 2125 amd_iommu_irq_remap = true;
2130{ 2126
2131 return amd_iommu_irq_remap ? 1 : 0; 2127 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2128 if (ret)
2129 return ret;
2130 return amd_iommu_irq_remap ? 0 : -ENODEV;
2132} 2131}
2133 2132
2134int __init amd_iommu_enable(void) 2133int __init amd_iommu_enable(void)
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 95ed6deae47f..861af9d8338a 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -33,7 +33,6 @@ extern void amd_iommu_init_notifier(void);
33extern void amd_iommu_init_api(void); 33extern void amd_iommu_init_api(void);
34 34
35/* Needed for interrupt remapping */ 35/* Needed for interrupt remapping */
36extern int amd_iommu_supported(void);
37extern int amd_iommu_prepare(void); 36extern int amd_iommu_prepare(void);
38extern int amd_iommu_enable(void); 37extern int amd_iommu_enable(void);
39extern void amd_iommu_disable(void); 38extern void amd_iommu_disable(void);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index a55b207b9425..14de1ab223c8 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -32,8 +32,9 @@ struct hpet_scope {
32}; 32};
33 33
34#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) 34#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
35#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) 35#define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
36 36
37static int __read_mostly eim_mode;
37static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 38static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
38static struct hpet_scope ir_hpet[MAX_HPET_TBS]; 39static struct hpet_scope ir_hpet[MAX_HPET_TBS];
39 40
@@ -481,11 +482,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
481 if (iommu->ir_table) 482 if (iommu->ir_table)
482 return 0; 483 return 0;
483 484
484 ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC); 485 ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
485 if (!ir_table) 486 if (!ir_table)
486 return -ENOMEM; 487 return -ENOMEM;
487 488
488 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 489 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
489 INTR_REMAP_PAGE_ORDER); 490 INTR_REMAP_PAGE_ORDER);
490 491
491 if (!pages) { 492 if (!pages) {
@@ -566,13 +567,27 @@ static int __init dmar_x2apic_optout(void)
566 return dmar->flags & DMAR_X2APIC_OPT_OUT; 567 return dmar->flags & DMAR_X2APIC_OPT_OUT;
567} 568}
568 569
569static int __init intel_irq_remapping_supported(void) 570static void __init intel_cleanup_irq_remapping(void)
571{
572 struct dmar_drhd_unit *drhd;
573 struct intel_iommu *iommu;
574
575 for_each_iommu(iommu, drhd) {
576 if (ecap_ir_support(iommu->ecap)) {
577 iommu_disable_irq_remapping(iommu);
578 intel_teardown_irq_remapping(iommu);
579 }
580 }
581
582 if (x2apic_supported())
583 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
584}
585
586static int __init intel_prepare_irq_remapping(void)
570{ 587{
571 struct dmar_drhd_unit *drhd; 588 struct dmar_drhd_unit *drhd;
572 struct intel_iommu *iommu; 589 struct intel_iommu *iommu;
573 590
574 if (disable_irq_remap)
575 return 0;
576 if (irq_remap_broken) { 591 if (irq_remap_broken) {
577 printk(KERN_WARNING 592 printk(KERN_WARNING
578 "This system BIOS has enabled interrupt remapping\n" 593 "This system BIOS has enabled interrupt remapping\n"
@@ -581,38 +596,45 @@ static int __init intel_irq_remapping_supported(void)
581 "interrupt remapping is being disabled. Please\n" 596 "interrupt remapping is being disabled. Please\n"
582 "contact your BIOS vendor for an update\n"); 597 "contact your BIOS vendor for an update\n");
583 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 598 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
584 disable_irq_remap = 1; 599 return -ENODEV;
585 return 0;
586 } 600 }
587 601
602 if (dmar_table_init() < 0)
603 return -ENODEV;
604
588 if (!dmar_ir_support()) 605 if (!dmar_ir_support())
589 return 0; 606 return -ENODEV;
607
608 if (parse_ioapics_under_ir() != 1) {
609 printk(KERN_INFO "Not enabling interrupt remapping\n");
610 goto error;
611 }
590 612
613 /* First make sure all IOMMUs support IRQ remapping */
591 for_each_iommu(iommu, drhd) 614 for_each_iommu(iommu, drhd)
592 if (!ecap_ir_support(iommu->ecap)) 615 if (!ecap_ir_support(iommu->ecap))
593 return 0; 616 goto error;
594 617
595 return 1; 618 /* Do the allocations early */
619 for_each_iommu(iommu, drhd)
620 if (intel_setup_irq_remapping(iommu))
621 goto error;
622
623 return 0;
624
625error:
626 intel_cleanup_irq_remapping();
627 return -ENODEV;
596} 628}
597 629
598static int __init intel_enable_irq_remapping(void) 630static int __init intel_enable_irq_remapping(void)
599{ 631{
600 struct dmar_drhd_unit *drhd; 632 struct dmar_drhd_unit *drhd;
601 struct intel_iommu *iommu; 633 struct intel_iommu *iommu;
602 bool x2apic_present;
603 int setup = 0; 634 int setup = 0;
604 int eim = 0; 635 int eim = 0;
605 636
606 x2apic_present = x2apic_supported(); 637 if (x2apic_supported()) {
607
608 if (parse_ioapics_under_ir() != 1) {
609 printk(KERN_INFO "Not enable interrupt remapping\n");
610 goto error;
611 }
612
613 if (x2apic_present) {
614 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
615
616 eim = !dmar_x2apic_optout(); 638 eim = !dmar_x2apic_optout();
617 if (!eim) 639 if (!eim)
618 printk(KERN_WARNING 640 printk(KERN_WARNING
@@ -646,16 +668,15 @@ static int __init intel_enable_irq_remapping(void)
646 /* 668 /*
647 * check for the Interrupt-remapping support 669 * check for the Interrupt-remapping support
648 */ 670 */
649 for_each_iommu(iommu, drhd) { 671 for_each_iommu(iommu, drhd)
650 if (!ecap_ir_support(iommu->ecap))
651 continue;
652
653 if (eim && !ecap_eim_support(iommu->ecap)) { 672 if (eim && !ecap_eim_support(iommu->ecap)) {
654 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " 673 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
655 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); 674 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
656 goto error; 675 eim = 0;
657 } 676 }
658 } 677 eim_mode = eim;
678 if (eim)
679 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
659 680
660 /* 681 /*
661 * Enable queued invalidation for all the DRHD's. 682 * Enable queued invalidation for all the DRHD's.
@@ -675,12 +696,6 @@ static int __init intel_enable_irq_remapping(void)
675 * Setup Interrupt-remapping for all the DRHD's now. 696 * Setup Interrupt-remapping for all the DRHD's now.
676 */ 697 */
677 for_each_iommu(iommu, drhd) { 698 for_each_iommu(iommu, drhd) {
678 if (!ecap_ir_support(iommu->ecap))
679 continue;
680
681 if (intel_setup_irq_remapping(iommu))
682 goto error;
683
684 iommu_set_irq_remapping(iommu, eim); 699 iommu_set_irq_remapping(iommu, eim);
685 setup = 1; 700 setup = 1;
686 } 701 }
@@ -702,15 +717,7 @@ static int __init intel_enable_irq_remapping(void)
702 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; 717 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
703 718
704error: 719error:
705 for_each_iommu(iommu, drhd) 720 intel_cleanup_irq_remapping();
706 if (ecap_ir_support(iommu->ecap)) {
707 iommu_disable_irq_remapping(iommu);
708 intel_teardown_irq_remapping(iommu);
709 }
710
711 if (x2apic_present)
712 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
713
714 return -1; 721 return -1;
715} 722}
716 723
@@ -1199,8 +1206,7 @@ static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
1199} 1206}
1200 1207
1201struct irq_remap_ops intel_irq_remap_ops = { 1208struct irq_remap_ops intel_irq_remap_ops = {
1202 .supported = intel_irq_remapping_supported, 1209 .prepare = intel_prepare_irq_remapping,
1203 .prepare = dmar_table_init,
1204 .enable = intel_enable_irq_remapping, 1210 .enable = intel_enable_irq_remapping,
1205 .disable = disable_irq_remapping, 1211 .disable = disable_irq_remapping,
1206 .reenable = reenable_irq_remapping, 1212 .reenable = reenable_irq_remapping,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 89c4846683be..390079ee1350 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -17,12 +17,11 @@
17#include "irq_remapping.h" 17#include "irq_remapping.h"
18 18
19int irq_remapping_enabled; 19int irq_remapping_enabled;
20
21int disable_irq_remap;
22int irq_remap_broken; 20int irq_remap_broken;
23int disable_sourceid_checking; 21int disable_sourceid_checking;
24int no_x2apic_optout; 22int no_x2apic_optout;
25 23
24static int disable_irq_remap;
26static struct irq_remap_ops *remap_ops; 25static struct irq_remap_ops *remap_ops;
27 26
28static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec); 27static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
@@ -194,45 +193,32 @@ static __init int setup_irqremap(char *str)
194} 193}
195early_param("intremap", setup_irqremap); 194early_param("intremap", setup_irqremap);
196 195
197void __init setup_irq_remapping_ops(void)
198{
199 remap_ops = &intel_irq_remap_ops;
200
201#ifdef CONFIG_AMD_IOMMU
202 if (amd_iommu_irq_ops.prepare() == 0)
203 remap_ops = &amd_iommu_irq_ops;
204#endif
205}
206
207void set_irq_remapping_broken(void) 196void set_irq_remapping_broken(void)
208{ 197{
209 irq_remap_broken = 1; 198 irq_remap_broken = 1;
210} 199}
211 200
212int irq_remapping_supported(void) 201int __init irq_remapping_prepare(void)
213{ 202{
214 if (disable_irq_remap) 203 if (disable_irq_remap)
215 return 0; 204 return -ENOSYS;
216
217 if (!remap_ops || !remap_ops->supported)
218 return 0;
219
220 return remap_ops->supported();
221}
222 205
223int __init irq_remapping_prepare(void) 206 if (intel_irq_remap_ops.prepare() == 0)
224{ 207 remap_ops = &intel_irq_remap_ops;
225 if (!remap_ops || !remap_ops->prepare) 208 else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
226 return -ENODEV; 209 amd_iommu_irq_ops.prepare() == 0)
210 remap_ops = &amd_iommu_irq_ops;
211 else
212 return -ENOSYS;
227 213
228 return remap_ops->prepare(); 214 return 0;
229} 215}
230 216
231int __init irq_remapping_enable(void) 217int __init irq_remapping_enable(void)
232{ 218{
233 int ret; 219 int ret;
234 220
235 if (!remap_ops || !remap_ops->enable) 221 if (!remap_ops->enable)
236 return -ENODEV; 222 return -ENODEV;
237 223
238 ret = remap_ops->enable(); 224 ret = remap_ops->enable();
@@ -245,22 +231,16 @@ int __init irq_remapping_enable(void)
245 231
246void irq_remapping_disable(void) 232void irq_remapping_disable(void)
247{ 233{
248 if (!irq_remapping_enabled || 234 if (irq_remapping_enabled && remap_ops->disable)
249 !remap_ops || 235 remap_ops->disable();
250 !remap_ops->disable)
251 return;
252
253 remap_ops->disable();
254} 236}
255 237
256int irq_remapping_reenable(int mode) 238int irq_remapping_reenable(int mode)
257{ 239{
258 if (!irq_remapping_enabled || 240 if (irq_remapping_enabled && remap_ops->reenable)
259 !remap_ops || 241 return remap_ops->reenable(mode);
260 !remap_ops->reenable)
261 return 0;
262 242
263 return remap_ops->reenable(mode); 243 return 0;
264} 244}
265 245
266int __init irq_remap_enable_fault_handling(void) 246int __init irq_remap_enable_fault_handling(void)
@@ -268,7 +248,7 @@ int __init irq_remap_enable_fault_handling(void)
268 if (!irq_remapping_enabled) 248 if (!irq_remapping_enabled)
269 return 0; 249 return 0;
270 250
271 if (!remap_ops || !remap_ops->enable_faulting) 251 if (!remap_ops->enable_faulting)
272 return -ENODEV; 252 return -ENODEV;
273 253
274 return remap_ops->enable_faulting(); 254 return remap_ops->enable_faulting();
@@ -279,7 +259,7 @@ int setup_ioapic_remapped_entry(int irq,
279 unsigned int destination, int vector, 259 unsigned int destination, int vector,
280 struct io_apic_irq_attr *attr) 260 struct io_apic_irq_attr *attr)
281{ 261{
282 if (!remap_ops || !remap_ops->setup_ioapic_entry) 262 if (!remap_ops->setup_ioapic_entry)
283 return -ENODEV; 263 return -ENODEV;
284 264
285 return remap_ops->setup_ioapic_entry(irq, entry, destination, 265 return remap_ops->setup_ioapic_entry(irq, entry, destination,
@@ -289,8 +269,7 @@ int setup_ioapic_remapped_entry(int irq,
289static int set_remapped_irq_affinity(struct irq_data *data, 269static int set_remapped_irq_affinity(struct irq_data *data,
290 const struct cpumask *mask, bool force) 270 const struct cpumask *mask, bool force)
291{ 271{
292 if (!config_enabled(CONFIG_SMP) || !remap_ops || 272 if (!config_enabled(CONFIG_SMP) || !remap_ops->set_affinity)
293 !remap_ops->set_affinity)
294 return 0; 273 return 0;
295 274
296 return remap_ops->set_affinity(data, mask, force); 275 return remap_ops->set_affinity(data, mask, force);
@@ -300,10 +279,7 @@ void free_remapped_irq(int irq)
300{ 279{
301 struct irq_cfg *cfg = irq_cfg(irq); 280 struct irq_cfg *cfg = irq_cfg(irq);
302 281
303 if (!remap_ops || !remap_ops->free_irq) 282 if (irq_remapped(cfg) && remap_ops->free_irq)
304 return;
305
306 if (irq_remapped(cfg))
307 remap_ops->free_irq(irq); 283 remap_ops->free_irq(irq);
308} 284}
309 285
@@ -315,13 +291,13 @@ void compose_remapped_msi_msg(struct pci_dev *pdev,
315 291
316 if (!irq_remapped(cfg)) 292 if (!irq_remapped(cfg))
317 native_compose_msi_msg(pdev, irq, dest, msg, hpet_id); 293 native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
318 else if (remap_ops && remap_ops->compose_msi_msg) 294 else if (remap_ops->compose_msi_msg)
319 remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id); 295 remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
320} 296}
321 297
322static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) 298static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
323{ 299{
324 if (!remap_ops || !remap_ops->msi_alloc_irq) 300 if (!remap_ops->msi_alloc_irq)
325 return -ENODEV; 301 return -ENODEV;
326 302
327 return remap_ops->msi_alloc_irq(pdev, irq, nvec); 303 return remap_ops->msi_alloc_irq(pdev, irq, nvec);
@@ -330,7 +306,7 @@ static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
330static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, 306static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
331 int index, int sub_handle) 307 int index, int sub_handle)
332{ 308{
333 if (!remap_ops || !remap_ops->msi_setup_irq) 309 if (!remap_ops->msi_setup_irq)
334 return -ENODEV; 310 return -ENODEV;
335 311
336 return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle); 312 return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle);
@@ -340,7 +316,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
340{ 316{
341 int ret; 317 int ret;
342 318
343 if (!remap_ops || !remap_ops->alloc_hpet_msi) 319 if (!remap_ops->alloc_hpet_msi)
344 return -ENODEV; 320 return -ENODEV;
345 321
346 ret = remap_ops->alloc_hpet_msi(irq, id); 322 ret = remap_ops->alloc_hpet_msi(irq, id);
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index fde250f86e60..c448eb48340a 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -31,16 +31,12 @@ struct cpumask;
31struct pci_dev; 31struct pci_dev;
32struct msi_msg; 32struct msi_msg;
33 33
34extern int disable_irq_remap;
35extern int irq_remap_broken; 34extern int irq_remap_broken;
36extern int disable_sourceid_checking; 35extern int disable_sourceid_checking;
37extern int no_x2apic_optout; 36extern int no_x2apic_optout;
38extern int irq_remapping_enabled; 37extern int irq_remapping_enabled;
39 38
40struct irq_remap_ops { 39struct irq_remap_ops {
41 /* Check whether Interrupt Remapping is supported */
42 int (*supported)(void);
43
44 /* Initializes hardware and makes it ready for remapping interrupts */ 40 /* Initializes hardware and makes it ready for remapping interrupts */
45 int (*prepare)(void); 41 int (*prepare)(void);
46 42
@@ -89,7 +85,6 @@ extern struct irq_remap_ops amd_iommu_irq_ops;
89#else /* CONFIG_IRQ_REMAP */ 85#else /* CONFIG_IRQ_REMAP */
90 86
91#define irq_remapping_enabled 0 87#define irq_remapping_enabled 0
92#define disable_irq_remap 1
93#define irq_remap_broken 0 88#define irq_remap_broken 0
94 89
95#endif /* CONFIG_IRQ_REMAP */ 90#endif /* CONFIG_IRQ_REMAP */
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 6dbf6fcbdfaf..e8902f8dddfc 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -386,7 +386,7 @@ static int __init pcc_init(void)
386 ret = acpi_pcc_probe(); 386 ret = acpi_pcc_probe();
387 387
388 if (ret) { 388 if (ret) {
389 pr_err("ACPI PCC probe failed.\n"); 389 pr_debug("ACPI PCC probe failed.\n");
390 return -ENODEV; 390 return -ENODEV;
391 } 391 }
392 392
@@ -394,7 +394,7 @@ static int __init pcc_init(void)
394 pcc_mbox_probe, NULL, 0, NULL, 0); 394 pcc_mbox_probe, NULL, 0, NULL, 0);
395 395
396 if (!pcc_pdev) { 396 if (!pcc_pdev) {
397 pr_err("Err creating PCC platform bundle\n"); 397 pr_debug("Err creating PCC platform bundle\n");
398 return -ENODEV; 398 return -ENODEV;
399 } 399 }
400 400
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 5bdedf6df153..c355a226a024 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -5,6 +5,7 @@
5menuconfig MD 5menuconfig MD
6 bool "Multiple devices driver support (RAID and LVM)" 6 bool "Multiple devices driver support (RAID and LVM)"
7 depends on BLOCK 7 depends on BLOCK
8 select SRCU
8 help 9 help
9 Support multiple physical spindles through a single logical device. 10 Support multiple physical spindles through a single logical device.
10 Required for RAID and logical volume management. 11 Required for RAID and logical volume management.
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d6607ee9c855..84673ebcf428 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC
197 197
198config NETPOLL 198config NETPOLL
199 def_bool NETCONSOLE 199 def_bool NETCONSOLE
200 select SRCU
200 201
201config NET_POLL_CONTROLLER 202config NET_POLL_CONTROLLER
202 def_bool NETPOLL 203 def_bool NETPOLL
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index e07ce5ff2d48..b10964e8cb54 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -553,8 +553,8 @@ static unsigned long __init lance_probe1( struct net_device *dev,
553 if (lp->cardtype == PAM_CARD || 553 if (lp->cardtype == PAM_CARD ||
554 memaddr == (unsigned short *)0xffe00000) { 554 memaddr == (unsigned short *)0xffe00000) {
555 /* PAMs card and Riebl on ST use level 5 autovector */ 555 /* PAMs card and Riebl on ST use level 5 autovector */
556 if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, 556 if (request_irq(IRQ_AUTO_5, lance_interrupt, 0,
557 "PAM,Riebl-ST Ethernet", dev)) { 557 "PAM,Riebl-ST Ethernet", dev)) {
558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); 558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
559 return 0; 559 return 0;
560 } 560 }
@@ -567,8 +567,8 @@ static unsigned long __init lance_probe1( struct net_device *dev,
567 printk( "Lance: request for VME interrupt failed\n" ); 567 printk( "Lance: request for VME interrupt failed\n" );
568 return 0; 568 return 0;
569 } 569 }
570 if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO, 570 if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet",
571 "Riebl-VME Ethernet", dev)) { 571 dev)) {
572 printk( "Lance: request for irq %u failed\n", irq ); 572 printk( "Lance: request for irq %u failed\n", irq );
573 return 0; 573 return 0;
574 } 574 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 14a1c5cec3a5..fa274e0f47d7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4915,7 +4915,7 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4915 4915
4916 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); 4916 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4917 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); 4917 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4918 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); 4918 rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
4919} 4919}
4920 4920
4921static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) 4921static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4948,7 +4948,7 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4948 RTL_W8(MaxTxPacketSize, 0x3f); 4948 RTL_W8(MaxTxPacketSize, 0x3f);
4949 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); 4949 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4950 RTL_W8(Config4, RTL_R8(Config4) | 0x01); 4950 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4951 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); 4951 rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
4952} 4952}
4953 4953
4954static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) 4954static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4964,7 +4964,7 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4964static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) 4964static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4965{ 4965{
4966 rtl_tx_performance_tweak(tp->pci_dev, 4966 rtl_tx_performance_tweak(tp->pci_dev,
4967 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); 4967 PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
4968} 4968}
4969 4969
4970static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) 4970static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 037f74f0fcf6..12f9e2708afb 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
483 * better enable it. The long term solution would be to use just a 483 * better enable it. The long term solution would be to use just a
484 * bunch of valid page descriptors, without dependency on ballooning 484 * bunch of valid page descriptors, without dependency on ballooning
485 */ 485 */
486 err = alloc_xenballooned_pages(MAX_PENDING_REQS, 486 err = gnttab_alloc_pages(MAX_PENDING_REQS,
487 queue->mmap_pages, 487 queue->mmap_pages);
488 false);
489 if (err) { 488 if (err) {
490 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); 489 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
491 return -ENOMEM; 490 return -ENOMEM;
@@ -664,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif)
664 */ 663 */
665void xenvif_deinit_queue(struct xenvif_queue *queue) 664void xenvif_deinit_queue(struct xenvif_queue *queue)
666{ 665{
667 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); 666 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
668} 667}
669 668
670void xenvif_free(struct xenvif *vif) 669void xenvif_free(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c8ce701a7efb..7dc2d64db3cb 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
314static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, 314static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
315 struct netrx_pending_operations *npo, 315 struct netrx_pending_operations *npo,
316 struct page *page, unsigned long size, 316 struct page *page, unsigned long size,
317 unsigned long offset, int *head, 317 unsigned long offset, int *head)
318 struct xenvif_queue *foreign_queue,
319 grant_ref_t foreign_gref)
320{ 318{
321 struct gnttab_copy *copy_gop; 319 struct gnttab_copy *copy_gop;
322 struct xenvif_rx_meta *meta; 320 struct xenvif_rx_meta *meta;
@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
333 offset &= ~PAGE_MASK; 331 offset &= ~PAGE_MASK;
334 332
335 while (size > 0) { 333 while (size > 0) {
334 struct xen_page_foreign *foreign;
335
336 BUG_ON(offset >= PAGE_SIZE); 336 BUG_ON(offset >= PAGE_SIZE);
337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
338 338
@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
361 copy_gop->flags = GNTCOPY_dest_gref; 361 copy_gop->flags = GNTCOPY_dest_gref;
362 copy_gop->len = bytes; 362 copy_gop->len = bytes;
363 363
364 if (foreign_queue) { 364 foreign = xen_page_foreign(page);
365 copy_gop->source.domid = foreign_queue->vif->domid; 365 if (foreign) {
366 copy_gop->source.u.ref = foreign_gref; 366 copy_gop->source.domid = foreign->domid;
367 copy_gop->source.u.ref = foreign->gref;
367 copy_gop->flags |= GNTCOPY_source_gref; 368 copy_gop->flags |= GNTCOPY_source_gref;
368 } else { 369 } else {
369 copy_gop->source.domid = DOMID_SELF; 370 copy_gop->source.domid = DOMID_SELF;
@@ -406,35 +407,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
406} 407}
407 408
408/* 409/*
409 * Find the grant ref for a given frag in a chain of struct ubuf_info's
410 * skb: the skb itself
411 * i: the frag's number
412 * ubuf: a pointer to an element in the chain. It should not be NULL
413 *
414 * Returns a pointer to the element in the chain where the page were found. If
415 * not found, returns NULL.
416 * See the definition of callback_struct in common.h for more details about
417 * the chain.
418 */
419static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
420 const int i,
421 const struct ubuf_info *ubuf)
422{
423 struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
424
425 do {
426 u16 pending_idx = ubuf->desc;
427
428 if (skb_shinfo(skb)->frags[i].page.p ==
429 foreign_queue->mmap_pages[pending_idx])
430 break;
431 ubuf = (struct ubuf_info *) ubuf->ctx;
432 } while (ubuf);
433
434 return ubuf;
435}
436
437/*
438 * Prepare an SKB to be transmitted to the frontend. 410 * Prepare an SKB to be transmitted to the frontend.
439 * 411 *
440 * This function is responsible for allocating grant operations, meta 412 * This function is responsible for allocating grant operations, meta
@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
459 int head = 1; 431 int head = 1;
460 int old_meta_prod; 432 int old_meta_prod;
461 int gso_type; 433 int gso_type;
462 const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
463 const struct ubuf_info *const head_ubuf = ubuf;
464 434
465 old_meta_prod = npo->meta_prod; 435 old_meta_prod = npo->meta_prod;
466 436
@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb,
507 len = skb_tail_pointer(skb) - data; 477 len = skb_tail_pointer(skb) - data;
508 478
509 xenvif_gop_frag_copy(queue, skb, npo, 479 xenvif_gop_frag_copy(queue, skb, npo,
510 virt_to_page(data), len, offset, &head, 480 virt_to_page(data), len, offset, &head);
511 NULL,
512 0);
513 data += len; 481 data += len;
514 } 482 }
515 483
516 for (i = 0; i < nr_frags; i++) { 484 for (i = 0; i < nr_frags; i++) {
517 /* This variable also signals whether foreign_gref has a real
518 * value or not.
519 */
520 struct xenvif_queue *foreign_queue = NULL;
521 grant_ref_t foreign_gref;
522
523 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
524 (ubuf->callback == &xenvif_zerocopy_callback)) {
525 const struct ubuf_info *const startpoint = ubuf;
526
527 /* Ideally ubuf points to the chain element which
528 * belongs to this frag. Or if frags were removed from
529 * the beginning, then shortly before it.
530 */
531 ubuf = xenvif_find_gref(skb, i, ubuf);
532
533 /* Try again from the beginning of the list, if we
534 * haven't tried from there. This only makes sense in
535 * the unlikely event of reordering the original frags.
536 * For injected local pages it's an unnecessary second
537 * run.
538 */
539 if (unlikely(!ubuf) && startpoint != head_ubuf)
540 ubuf = xenvif_find_gref(skb, i, head_ubuf);
541
542 if (likely(ubuf)) {
543 u16 pending_idx = ubuf->desc;
544
545 foreign_queue = ubuf_to_queue(ubuf);
546 foreign_gref =
547 foreign_queue->pending_tx_info[pending_idx].req.gref;
548 /* Just a safety measure. If this was the last
549 * element on the list, the for loop will
550 * iterate again if a local page were added to
551 * the end. Using head_ubuf here prevents the
552 * second search on the chain. Or the original
553 * frags changed order, but that's less likely.
554 * In any way, ubuf shouldn't be NULL.
555 */
556 ubuf = ubuf->ctx ?
557 (struct ubuf_info *) ubuf->ctx :
558 head_ubuf;
559 } else
560 /* This frag was a local page, added to the
561 * array after the skb left netback.
562 */
563 ubuf = head_ubuf;
564 }
565 xenvif_gop_frag_copy(queue, skb, npo, 485 xenvif_gop_frag_copy(queue, skb, npo,
566 skb_frag_page(&skb_shinfo(skb)->frags[i]), 486 skb_frag_page(&skb_shinfo(skb)->frags[i]),
567 skb_frag_size(&skb_shinfo(skb)->frags[i]), 487 skb_frag_size(&skb_shinfo(skb)->frags[i]),
568 skb_shinfo(skb)->frags[i].page_offset, 488 skb_shinfo(skb)->frags[i].page_offset,
569 &head, 489 &head);
570 foreign_queue,
571 foreign_queue ? foreign_gref : UINT_MAX);
572 } 490 }
573 491
574 return npo->meta_prod - old_meta_prod; 492 return npo->meta_prod - old_meta_prod;
@@ -1241,12 +1159,6 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1241 /* Take an extra reference to offset network stack's put_page */ 1159 /* Take an extra reference to offset network stack's put_page */
1242 get_page(queue->mmap_pages[pending_idx]); 1160 get_page(queue->mmap_pages[pending_idx]);
1243 } 1161 }
1244 /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1245 * overlaps with "index", and "mapping" is not set. I think mapping
1246 * should be set. If delivered to local stack, it would drop this
1247 * skb in sk_filter unless the socket has the right to use it.
1248 */
1249 skb->pfmemalloc = false;
1250} 1162}
1251 1163
1252static int xenvif_get_extras(struct xenvif_queue *queue, 1164static int xenvif_get_extras(struct xenvif_queue *queue,
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 88471d3d98cd..110fece2ff53 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -140,6 +140,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
140 unsigned char busno, unsigned char bus_max, 140 unsigned char busno, unsigned char bus_max,
141 struct list_head *resources, resource_size_t *io_base) 141 struct list_head *resources, resource_size_t *io_base)
142{ 142{
143 struct resource_entry *window;
143 struct resource *res; 144 struct resource *res;
144 struct resource *bus_range; 145 struct resource *bus_range;
145 struct of_pci_range range; 146 struct of_pci_range range;
@@ -225,7 +226,10 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
225conversion_failed: 226conversion_failed:
226 kfree(res); 227 kfree(res);
227parse_failed: 228parse_failed:
229 resource_list_for_each_entry(window, resources)
230 kfree(window->res);
228 pci_free_resource_list(resources); 231 pci_free_resource_list(resources);
232 kfree(bus_range);
229 return err; 233 return err;
230} 234}
231EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); 235EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c
index 7ad59ac68cf6..a81cd2a2747f 100644
--- a/drivers/parport/parport_atari.c
+++ b/drivers/parport/parport_atari.c
@@ -192,8 +192,8 @@ static int __init parport_atari_init(void)
192 &parport_atari_ops); 192 &parport_atari_ops);
193 if (!p) 193 if (!p)
194 return -ENODEV; 194 return -ENODEV;
195 if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 195 if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 0, p->name,
196 IRQ_TYPE_SLOW, p->name, p)) { 196 p)) {
197 parport_put_port (p); 197 parport_put_port (p);
198 return -ENODEV; 198 return -ENODEV;
199 } 199 }
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 49dd766852ba..d9b64a175990 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -67,6 +67,93 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
67EXPORT_SYMBOL(pci_bus_write_config_word); 67EXPORT_SYMBOL(pci_bus_write_config_word);
68EXPORT_SYMBOL(pci_bus_write_config_dword); 68EXPORT_SYMBOL(pci_bus_write_config_dword);
69 69
70int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
71 int where, int size, u32 *val)
72{
73 void __iomem *addr;
74
75 addr = bus->ops->map_bus(bus, devfn, where);
76 if (!addr) {
77 *val = ~0;
78 return PCIBIOS_DEVICE_NOT_FOUND;
79 }
80
81 if (size == 1)
82 *val = readb(addr);
83 else if (size == 2)
84 *val = readw(addr);
85 else
86 *val = readl(addr);
87
88 return PCIBIOS_SUCCESSFUL;
89}
90EXPORT_SYMBOL_GPL(pci_generic_config_read);
91
92int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
93 int where, int size, u32 val)
94{
95 void __iomem *addr;
96
97 addr = bus->ops->map_bus(bus, devfn, where);
98 if (!addr)
99 return PCIBIOS_DEVICE_NOT_FOUND;
100
101 if (size == 1)
102 writeb(val, addr);
103 else if (size == 2)
104 writew(val, addr);
105 else
106 writel(val, addr);
107
108 return PCIBIOS_SUCCESSFUL;
109}
110EXPORT_SYMBOL_GPL(pci_generic_config_write);
111
112int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
113 int where, int size, u32 *val)
114{
115 void __iomem *addr;
116
117 addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
118 if (!addr) {
119 *val = ~0;
120 return PCIBIOS_DEVICE_NOT_FOUND;
121 }
122
123 *val = readl(addr);
124
125 if (size <= 2)
126 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
127
128 return PCIBIOS_SUCCESSFUL;
129}
130EXPORT_SYMBOL_GPL(pci_generic_config_read32);
131
132int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
133 int where, int size, u32 val)
134{
135 void __iomem *addr;
136 u32 mask, tmp;
137
138 addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
139 if (!addr)
140 return PCIBIOS_DEVICE_NOT_FOUND;
141
142 if (size == 4) {
143 writel(val, addr);
144 return PCIBIOS_SUCCESSFUL;
145 } else {
146 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
147 }
148
149 tmp = readl(addr) & mask;
150 tmp |= val << ((where & 0x3) * 8);
151 writel(tmp, addr);
152
153 return PCIBIOS_SUCCESSFUL;
154}
155EXPORT_SYMBOL_GPL(pci_generic_config_write32);
156
70/** 157/**
71 * pci_bus_set_ops - Set raw operations of pci bus 158 * pci_bus_set_ops - Set raw operations of pci bus
72 * @bus: pci bus struct 159 * @bus: pci bus struct
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 8fb16188cd82..90fa3a78fb7c 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -20,17 +20,16 @@
20void pci_add_resource_offset(struct list_head *resources, struct resource *res, 20void pci_add_resource_offset(struct list_head *resources, struct resource *res,
21 resource_size_t offset) 21 resource_size_t offset)
22{ 22{
23 struct pci_host_bridge_window *window; 23 struct resource_entry *entry;
24 24
25 window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL); 25 entry = resource_list_create_entry(res, 0);
26 if (!window) { 26 if (!entry) {
27 printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); 27 printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res);
28 return; 28 return;
29 } 29 }
30 30
31 window->res = res; 31 entry->offset = offset;
32 window->offset = offset; 32 resource_list_add_tail(entry, resources);
33 list_add_tail(&window->list, resources);
34} 33}
35EXPORT_SYMBOL(pci_add_resource_offset); 34EXPORT_SYMBOL(pci_add_resource_offset);
36 35
@@ -42,12 +41,7 @@ EXPORT_SYMBOL(pci_add_resource);
42 41
43void pci_free_resource_list(struct list_head *resources) 42void pci_free_resource_list(struct list_head *resources)
44{ 43{
45 struct pci_host_bridge_window *window, *tmp; 44 resource_list_free(resources);
46
47 list_for_each_entry_safe(window, tmp, resources, list) {
48 list_del(&window->list);
49 kfree(window);
50 }
51} 45}
52EXPORT_SYMBOL(pci_free_resource_list); 46EXPORT_SYMBOL(pci_free_resource_list);
53 47
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index 0e5f3c95af5b..39b2dbe585aa 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -35,10 +35,10 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
35 struct resource *res) 35 struct resource *res)
36{ 36{
37 struct pci_host_bridge *bridge = find_pci_host_bridge(bus); 37 struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
38 struct pci_host_bridge_window *window; 38 struct resource_entry *window;
39 resource_size_t offset = 0; 39 resource_size_t offset = 0;
40 40
41 list_for_each_entry(window, &bridge->windows, list) { 41 resource_list_for_each_entry(window, &bridge->windows) {
42 if (resource_contains(window->res, res)) { 42 if (resource_contains(window->res, res)) {
43 offset = window->offset; 43 offset = window->offset;
44 break; 44 break;
@@ -60,10 +60,10 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
60 struct pci_bus_region *region) 60 struct pci_bus_region *region)
61{ 61{
62 struct pci_host_bridge *bridge = find_pci_host_bridge(bus); 62 struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
63 struct pci_host_bridge_window *window; 63 struct resource_entry *window;
64 resource_size_t offset = 0; 64 resource_size_t offset = 0;
65 65
66 list_for_each_entry(window, &bridge->windows, list) { 66 resource_list_for_each_entry(window, &bridge->windows) {
67 struct pci_bus_region bus_region; 67 struct pci_bus_region bus_region;
68 68
69 if (resource_type(res) != resource_type(window->res)) 69 if (resource_type(res) != resource_type(window->res))
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index c4b6568e486d..7b892a9cc4fc 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -102,4 +102,8 @@ config PCI_LAYERSCAPE
102 help 102 help
103 Say Y here if you want PCIe controller support on Layerscape SoCs. 103 Say Y here if you want PCIe controller support on Layerscape SoCs.
104 104
105config PCI_VERSATILE
106 bool "ARM Versatile PB PCI controller"
107 depends on ARCH_VERSATILE
108
105endmenu 109endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 44c26998027f..e61d91c92bf1 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o 12obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o 13obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
14obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o 14obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
15obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 6eb1aa75bd37..ba46e581db99 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -76,55 +76,9 @@ static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
76 .map_bus = gen_pci_map_cfg_bus_ecam, 76 .map_bus = gen_pci_map_cfg_bus_ecam,
77}; 77};
78 78
79static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
80 int where, int size, u32 *val)
81{
82 void __iomem *addr;
83 struct pci_sys_data *sys = bus->sysdata;
84 struct gen_pci *pci = sys->private_data;
85
86 addr = pci->cfg.ops->map_bus(bus, devfn, where);
87
88 switch (size) {
89 case 1:
90 *val = readb(addr);
91 break;
92 case 2:
93 *val = readw(addr);
94 break;
95 default:
96 *val = readl(addr);
97 }
98
99 return PCIBIOS_SUCCESSFUL;
100}
101
102static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
103 int where, int size, u32 val)
104{
105 void __iomem *addr;
106 struct pci_sys_data *sys = bus->sysdata;
107 struct gen_pci *pci = sys->private_data;
108
109 addr = pci->cfg.ops->map_bus(bus, devfn, where);
110
111 switch (size) {
112 case 1:
113 writeb(val, addr);
114 break;
115 case 2:
116 writew(val, addr);
117 break;
118 default:
119 writel(val, addr);
120 }
121
122 return PCIBIOS_SUCCESSFUL;
123}
124
125static struct pci_ops gen_pci_ops = { 79static struct pci_ops gen_pci_ops = {
126 .read = gen_pci_config_read, 80 .read = pci_generic_config_read,
127 .write = gen_pci_config_write, 81 .write = pci_generic_config_write,
128}; 82};
129 83
130static const struct of_device_id gen_pci_of_match[] = { 84static const struct of_device_id gen_pci_of_match[] = {
@@ -149,14 +103,14 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
149 struct device *dev = pci->host.dev.parent; 103 struct device *dev = pci->host.dev.parent;
150 struct device_node *np = dev->of_node; 104 struct device_node *np = dev->of_node;
151 resource_size_t iobase; 105 resource_size_t iobase;
152 struct pci_host_bridge_window *win; 106 struct resource_entry *win;
153 107
154 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, 108 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
155 &iobase); 109 &iobase);
156 if (err) 110 if (err)
157 return err; 111 return err;
158 112
159 list_for_each_entry(win, &pci->resources, list) { 113 resource_list_for_each_entry(win, &pci->resources) {
160 struct resource *parent, *res = win->res; 114 struct resource *parent, *res = win->res;
161 115
162 switch (resource_type(res)) { 116 switch (resource_type(res)) {
@@ -287,6 +241,7 @@ static int gen_pci_probe(struct platform_device *pdev)
287 241
288 of_id = of_match_node(gen_pci_of_match, np); 242 of_id = of_match_node(gen_pci_of_match, np);
289 pci->cfg.ops = of_id->data; 243 pci->cfg.ops = of_id->data;
244 gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
290 pci->host.dev.parent = dev; 245 pci->host.dev.parent = dev;
291 INIT_LIST_HEAD(&pci->host.windows); 246 INIT_LIST_HEAD(&pci->host.windows);
292 INIT_LIST_HEAD(&pci->resources); 247 INIT_LIST_HEAD(&pci->resources);
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 78f79e31ac5c..75333b0c4f0a 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -119,7 +119,7 @@ static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
119 struct pcie_port *pp = &ks_pcie->pp; 119 struct pcie_port *pp = &ks_pcie->pp;
120 struct irq_chip *chip = irq_desc_get_chip(desc); 120 struct irq_chip *chip = irq_desc_get_chip(desc);
121 121
122 dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq); 122 dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
123 123
124 /* 124 /*
125 * The chained irq handler installation would have replaced normal 125 * The chained irq handler installation would have replaced normal
@@ -197,7 +197,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
197 */ 197 */
198 for (temp = 0; temp < max_host_irqs; temp++) { 198 for (temp = 0; temp < max_host_irqs; temp++) {
199 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); 199 host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
200 if (host_irqs[temp] < 0) 200 if (!host_irqs[temp])
201 break; 201 break;
202 } 202 }
203 if (temp) { 203 if (temp) {
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 6697b1a4d4fa..68c9e5e9b0a8 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -167,7 +167,6 @@ MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
167static struct platform_driver ls_pcie_driver = { 167static struct platform_driver ls_pcie_driver = {
168 .driver = { 168 .driver = {
169 .name = "layerscape-pcie", 169 .name = "layerscape-pcie",
170 .owner = THIS_MODULE,
171 .of_match_table = ls_pcie_of_match, 170 .of_match_table = ls_pcie_of_match,
172 }, 171 },
173}; 172};
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 1dd759596b0a..1309cfbaa719 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -101,9 +101,7 @@ struct mvebu_pcie {
101 struct mvebu_pcie_port *ports; 101 struct mvebu_pcie_port *ports;
102 struct msi_controller *msi; 102 struct msi_controller *msi;
103 struct resource io; 103 struct resource io;
104 char io_name[30];
105 struct resource realio; 104 struct resource realio;
106 char mem_name[30];
107 struct resource mem; 105 struct resource mem;
108 struct resource busn; 106 struct resource busn;
109 int nports; 107 int nports;
@@ -723,18 +721,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
723{ 721{
724 struct mvebu_pcie *pcie = sys_to_pcie(sys); 722 struct mvebu_pcie *pcie = sys_to_pcie(sys);
725 int i; 723 int i;
726 int domain = 0;
727 724
728#ifdef CONFIG_PCI_DOMAINS 725 pcie->mem.name = "PCI MEM";
729 domain = sys->domain; 726 pcie->realio.name = "PCI I/O";
730#endif
731
732 snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x",
733 domain);
734 pcie->mem.name = pcie->mem_name;
735
736 snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain);
737 pcie->realio.name = pcie->io_name;
738 727
739 if (request_resource(&iomem_resource, &pcie->mem)) 728 if (request_resource(&iomem_resource, &pcie->mem))
740 return 0; 729 return 0;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index d9c042febb1a..dd6b84e6206c 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -131,52 +131,6 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
131 return priv->reg + (slot >> 1) * 0x100 + where; 131 return priv->reg + (slot >> 1) * 0x100 + where;
132} 132}
133 133
134static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn,
135 int where, int size, u32 *val)
136{
137 void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
138
139 if (!reg)
140 return PCIBIOS_DEVICE_NOT_FOUND;
141
142 switch (size) {
143 case 1:
144 *val = ioread8(reg);
145 break;
146 case 2:
147 *val = ioread16(reg);
148 break;
149 default:
150 *val = ioread32(reg);
151 break;
152 }
153
154 return PCIBIOS_SUCCESSFUL;
155}
156
157static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
158 int where, int size, u32 val)
159{
160 void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
161
162 if (!reg)
163 return PCIBIOS_DEVICE_NOT_FOUND;
164
165 switch (size) {
166 case 1:
167 iowrite8(val, reg);
168 break;
169 case 2:
170 iowrite16(val, reg);
171 break;
172 default:
173 iowrite32(val, reg);
174 break;
175 }
176
177 return PCIBIOS_SUCCESSFUL;
178}
179
180/* PCI interrupt mapping */ 134/* PCI interrupt mapping */
181static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 135static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
182{ 136{
@@ -325,8 +279,9 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
325} 279}
326 280
327static struct pci_ops rcar_pci_ops = { 281static struct pci_ops rcar_pci_ops = {
328 .read = rcar_pci_read_config, 282 .map_bus = rcar_pci_cfg_base,
329 .write = rcar_pci_write_config, 283 .read = pci_generic_config_read,
284 .write = pci_generic_config_write,
330}; 285};
331 286
332static int rcar_pci_probe(struct platform_device *pdev) 287static int rcar_pci_probe(struct platform_device *pdev)
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index a800ae916394..00e92720d7f7 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -480,59 +480,10 @@ static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
480 return addr; 480 return addr;
481} 481}
482 482
483static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
484 int where, int size, u32 *value)
485{
486 void __iomem *addr;
487
488 addr = tegra_pcie_conf_address(bus, devfn, where);
489 if (!addr) {
490 *value = 0xffffffff;
491 return PCIBIOS_DEVICE_NOT_FOUND;
492 }
493
494 *value = readl(addr);
495
496 if (size == 1)
497 *value = (*value >> (8 * (where & 3))) & 0xff;
498 else if (size == 2)
499 *value = (*value >> (8 * (where & 3))) & 0xffff;
500
501 return PCIBIOS_SUCCESSFUL;
502}
503
504static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
505 int where, int size, u32 value)
506{
507 void __iomem *addr;
508 u32 mask, tmp;
509
510 addr = tegra_pcie_conf_address(bus, devfn, where);
511 if (!addr)
512 return PCIBIOS_DEVICE_NOT_FOUND;
513
514 if (size == 4) {
515 writel(value, addr);
516 return PCIBIOS_SUCCESSFUL;
517 }
518
519 if (size == 2)
520 mask = ~(0xffff << ((where & 0x3) * 8));
521 else if (size == 1)
522 mask = ~(0xff << ((where & 0x3) * 8));
523 else
524 return PCIBIOS_BAD_REGISTER_NUMBER;
525
526 tmp = readl(addr) & mask;
527 tmp |= value << ((where & 0x3) * 8);
528 writel(tmp, addr);
529
530 return PCIBIOS_SUCCESSFUL;
531}
532
533static struct pci_ops tegra_pcie_ops = { 483static struct pci_ops tegra_pcie_ops = {
534 .read = tegra_pcie_read_conf, 484 .map_bus = tegra_pcie_conf_address,
535 .write = tegra_pcie_write_conf, 485 .read = pci_generic_config_read32,
486 .write = pci_generic_config_write32,
536}; 487};
537 488
538static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) 489static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
@@ -625,19 +576,6 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port)
625 devm_kfree(pcie->dev, port); 576 devm_kfree(pcie->dev, port);
626} 577}
627 578
628static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
629{
630 u16 reg;
631
632 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
633 pci_read_config_word(dev, PCI_COMMAND, &reg);
634 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
635 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
636 pci_write_config_word(dev, PCI_COMMAND, reg);
637 }
638}
639DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
640
641/* Tegra PCIE root complex wrongly reports device class */ 579/* Tegra PCIE root complex wrongly reports device class */
642static void tegra_pcie_fixup_class(struct pci_dev *dev) 580static void tegra_pcie_fixup_class(struct pci_dev *dev)
643{ 581{
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
new file mode 100644
index 000000000000..1ec694a52379
--- /dev/null
+++ b/drivers/pci/host/pci-versatile.c
@@ -0,0 +1,237 @@
1/*
2 * Copyright 2004 Koninklijke Philips Electronics NV
3 *
4 * Conversion to platform driver and DT:
5 * Copyright 2014 Linaro Ltd.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * 14/04/2005 Initial version, colin.king@philips.com
17 */
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/of_address.h>
21#include <linux/of_pci.h>
22#include <linux/of_platform.h>
23#include <linux/pci.h>
24#include <linux/platform_device.h>
25
26static void __iomem *versatile_pci_base;
27static void __iomem *versatile_cfg_base[2];
28
29#define PCI_IMAP(m) (versatile_pci_base + ((m) * 4))
30#define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4))
31#define PCI_SELFID (versatile_pci_base + 0xc)
32
33#define VP_PCI_DEVICE_ID 0x030010ee
34#define VP_PCI_CLASS_ID 0x0b400000
35
36static u32 pci_slot_ignore;
37
38static int __init versatile_pci_slot_ignore(char *str)
39{
40 int retval;
41 int slot;
42
43 while ((retval = get_option(&str, &slot))) {
44 if ((slot < 0) || (slot > 31))
45 pr_err("Illegal slot value: %d\n", slot);
46 else
47 pci_slot_ignore |= (1 << slot);
48 }
49 return 1;
50}
51__setup("pci_slot_ignore=", versatile_pci_slot_ignore);
52
53
54static void __iomem *versatile_map_bus(struct pci_bus *bus,
55 unsigned int devfn, int offset)
56{
57 unsigned int busnr = bus->number;
58
59 if (pci_slot_ignore & (1 << PCI_SLOT(devfn)))
60 return NULL;
61
62 return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset);
63}
64
65static struct pci_ops pci_versatile_ops = {
66 .map_bus = versatile_map_bus,
67 .read = pci_generic_config_read32,
68 .write = pci_generic_config_write,
69};
70
71static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
72 struct list_head *res)
73{
74 int err, mem = 1, res_valid = 0;
75 struct device_node *np = dev->of_node;
76 resource_size_t iobase;
77 struct resource_entry *win;
78
79 err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
80 if (err)
81 return err;
82
83 resource_list_for_each_entry(win, res, list) {
84 struct resource *parent, *res = win->res;
85
86 switch (resource_type(res)) {
87 case IORESOURCE_IO:
88 parent = &ioport_resource;
89 err = pci_remap_iospace(res, iobase);
90 if (err) {
91 dev_warn(dev, "error %d: failed to map resource %pR\n",
92 err, res);
93 continue;
94 }
95 break;
96 case IORESOURCE_MEM:
97 parent = &iomem_resource;
98 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
99
100 writel(res->start >> 28, PCI_IMAP(mem));
101 writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
102 mem++;
103
104 break;
105 case IORESOURCE_BUS:
106 default:
107 continue;
108 }
109
110 err = devm_request_resource(dev, parent, res);
111 if (err)
112 goto out_release_res;
113 }
114
115 if (!res_valid) {
116 dev_err(dev, "non-prefetchable memory resource required\n");
117 err = -EINVAL;
118 goto out_release_res;
119 }
120
121 return 0;
122
123out_release_res:
124 pci_free_resource_list(res);
125 return err;
126}
127
128/* Unused, temporary to satisfy ARM arch code */
129struct pci_sys_data sys;
130
131static int versatile_pci_probe(struct platform_device *pdev)
132{
133 struct resource *res;
134 int ret, i, myslot = -1;
135 u32 val;
136 void __iomem *local_pci_cfg_base;
137 struct pci_bus *bus;
138 LIST_HEAD(pci_res);
139
140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
141 if (!res)
142 return -ENODEV;
143 versatile_pci_base = devm_ioremap_resource(&pdev->dev, res);
144
145 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
146 if (!res)
147 return -ENODEV;
148 versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res);
149
150 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
151 if (!res)
152 return -ENODEV;
153 versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res);
154
155 ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res);
156 if (ret)
157 return ret;
158
159 /*
160 * We need to discover the PCI core first to configure itself
161 * before the main PCI probing is performed
162 */
163 for (i = 0; i < 32; i++) {
164 if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) &&
165 (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) {
166 myslot = i;
167 break;
168 }
169 }
170 if (myslot == -1) {
171 dev_err(&pdev->dev, "Cannot find PCI core!\n");
172 return -EIO;
173 }
174 /*
175 * Do not to map Versatile FPGA PCI device into memory space
176 */
177 pci_slot_ignore |= (1 << myslot);
178
179 dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot);
180
181 writel(myslot, PCI_SELFID);
182 local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
183
184 val = readl(local_pci_cfg_base + PCI_COMMAND);
185 val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
186 writel(val, local_pci_cfg_base + PCI_COMMAND);
187
188 /*
189 * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
190 */
191 writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
192 writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
193 writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
194
195 /*
196 * For many years the kernel and QEMU were symbiotically buggy
197 * in that they both assumed the same broken IRQ mapping.
198 * QEMU therefore attempts to auto-detect old broken kernels
199 * so that they still work on newer QEMU as they did on old
200 * QEMU. Since we now use the correct (ie matching-hardware)
201 * IRQ mapping we write a definitely different value to a
202 * PCI_INTERRUPT_LINE register to tell QEMU that we expect
203 * real hardware behaviour and it need not be backwards
204 * compatible for us. This write is harmless on real hardware.
205 */
206 writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE);
207
208 pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
209 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
210
211 bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, &sys, &pci_res);
212 if (!bus)
213 return -ENOMEM;
214
215 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
216 pci_assign_unassigned_bus_resources(bus);
217
218 return 0;
219}
220
221static const struct of_device_id versatile_pci_of_match[] = {
222 { .compatible = "arm,versatile-pci", },
223 { },
224};
225MODULE_DEVICE_TABLE(of, versatile_pci_of_match);
226
227static struct platform_driver versatile_pci_driver = {
228 .driver = {
229 .name = "versatile-pci",
230 .of_match_table = versatile_pci_of_match,
231 },
232 .probe = versatile_pci_probe,
233};
234module_platform_driver(versatile_pci_driver);
235
236MODULE_DESCRIPTION("Versatile PCI driver");
237MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index b1d0596457c5..aab55474dd0d 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -16,7 +16,7 @@
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 */ 18 */
19#include <linux/clk-private.h> 19#include <linux/clk.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
@@ -74,92 +74,6 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
74 return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; 74 return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
75} 75}
76 76
77/* PCIe Configuration Out/In */
78static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val)
79{
80 writel(val, addr + offset);
81}
82
83static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val)
84{
85 u32 val32 = readl(addr + (offset & ~0x3));
86
87 switch (offset & 0x3) {
88 case 2:
89 val32 &= ~0xFFFF0000;
90 val32 |= (u32)val << 16;
91 break;
92 case 0:
93 default:
94 val32 &= ~0xFFFF;
95 val32 |= val;
96 break;
97 }
98 writel(val32, addr + (offset & ~0x3));
99}
100
101static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val)
102{
103 u32 val32 = readl(addr + (offset & ~0x3));
104
105 switch (offset & 0x3) {
106 case 0:
107 val32 &= ~0xFF;
108 val32 |= val;
109 break;
110 case 1:
111 val32 &= ~0xFF00;
112 val32 |= (u32)val << 8;
113 break;
114 case 2:
115 val32 &= ~0xFF0000;
116 val32 |= (u32)val << 16;
117 break;
118 case 3:
119 default:
120 val32 &= ~0xFF000000;
121 val32 |= (u32)val << 24;
122 break;
123 }
124 writel(val32, addr + (offset & ~0x3));
125}
126
127static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val)
128{
129 *val = readl(addr + offset);
130}
131
132static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val)
133{
134 *val = readl(addr + (offset & ~0x3));
135
136 switch (offset & 0x3) {
137 case 2:
138 *val >>= 16;
139 break;
140 }
141
142 *val &= 0xFFFF;
143}
144
145static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val)
146{
147 *val = readl(addr + (offset & ~0x3));
148
149 switch (offset & 0x3) {
150 case 3:
151 *val = *val >> 24;
152 break;
153 case 2:
154 *val = *val >> 16;
155 break;
156 case 1:
157 *val = *val >> 8;
158 break;
159 }
160 *val &= 0xFF;
161}
162
163/* 77/*
164 * When the address bit [17:16] is 2'b01, the Configuration access will be 78 * When the address bit [17:16] is 2'b01, the Configuration access will be
165 * treated as Type 1 and it will be forwarded to external PCIe device. 79 * treated as Type 1 and it will be forwarded to external PCIe device.
@@ -213,69 +127,23 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
213 return false; 127 return false;
214} 128}
215 129
216static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn, 130static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
217 int offset, int len, u32 *val) 131 int offset)
218{
219 struct xgene_pcie_port *port = bus->sysdata;
220 void __iomem *addr;
221
222 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
223 return PCIBIOS_DEVICE_NOT_FOUND;
224
225 if (xgene_pcie_hide_rc_bars(bus, offset)) {
226 *val = 0;
227 return PCIBIOS_SUCCESSFUL;
228 }
229
230 xgene_pcie_set_rtdid_reg(bus, devfn);
231 addr = xgene_pcie_get_cfg_base(bus);
232 switch (len) {
233 case 1:
234 xgene_pcie_cfg_in8(addr, offset, val);
235 break;
236 case 2:
237 xgene_pcie_cfg_in16(addr, offset, val);
238 break;
239 default:
240 xgene_pcie_cfg_in32(addr, offset, val);
241 break;
242 }
243
244 return PCIBIOS_SUCCESSFUL;
245}
246
247static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
248 int offset, int len, u32 val)
249{ 132{
250 struct xgene_pcie_port *port = bus->sysdata; 133 struct xgene_pcie_port *port = bus->sysdata;
251 void __iomem *addr;
252 134
253 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up) 135 if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up ||
254 return PCIBIOS_DEVICE_NOT_FOUND; 136 xgene_pcie_hide_rc_bars(bus, offset))
255 137 return NULL;
256 if (xgene_pcie_hide_rc_bars(bus, offset))
257 return PCIBIOS_SUCCESSFUL;
258 138
259 xgene_pcie_set_rtdid_reg(bus, devfn); 139 xgene_pcie_set_rtdid_reg(bus, devfn);
260 addr = xgene_pcie_get_cfg_base(bus); 140 return xgene_pcie_get_cfg_base(bus);
261 switch (len) {
262 case 1:
263 xgene_pcie_cfg_out8(addr, offset, (u8)val);
264 break;
265 case 2:
266 xgene_pcie_cfg_out16(addr, offset, (u16)val);
267 break;
268 default:
269 xgene_pcie_cfg_out32(addr, offset, val);
270 break;
271 }
272
273 return PCIBIOS_SUCCESSFUL;
274} 141}
275 142
276static struct pci_ops xgene_pcie_ops = { 143static struct pci_ops xgene_pcie_ops = {
277 .read = xgene_pcie_read_config, 144 .map_bus = xgene_pcie_map_bus,
278 .write = xgene_pcie_write_config 145 .read = pci_generic_config_read32,
146 .write = pci_generic_config_write32,
279}; 147};
280 148
281static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr, 149static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
@@ -401,11 +269,11 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
401 struct list_head *res, 269 struct list_head *res,
402 resource_size_t io_base) 270 resource_size_t io_base)
403{ 271{
404 struct pci_host_bridge_window *window; 272 struct resource_entry *window;
405 struct device *dev = port->dev; 273 struct device *dev = port->dev;
406 int ret; 274 int ret;
407 275
408 list_for_each_entry(window, res, list) { 276 resource_list_for_each_entry(window, res) {
409 struct resource *res = window->res; 277 struct resource *res = window->res;
410 u64 restype = resource_type(res); 278 u64 restype = resource_type(res);
411 279
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 17ca98657a28..1f4ea6f2d910 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -511,9 +511,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
511 dw_pci.private_data = (void **)&pp; 511 dw_pci.private_data = (void **)&pp;
512 512
513 pci_common_init_dev(pp->dev, &dw_pci); 513 pci_common_init_dev(pp->dev, &dw_pci);
514#ifdef CONFIG_PCI_DOMAINS
515 dw_pci.domain++;
516#endif
517 514
518 return 0; 515 return 0;
519} 516}
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 748786c402fc..c57bd0ac39a0 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -397,9 +397,6 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie)
397#endif 397#endif
398 398
399 pci_common_init_dev(&pdev->dev, &rcar_pci); 399 pci_common_init_dev(&pdev->dev, &rcar_pci);
400#ifdef CONFIG_PCI_DOMAINS
401 rcar_pci.domain++;
402#endif
403} 400}
404 401
405static int phy_wait_for_ack(struct rcar_pcie *pcie) 402static int phy_wait_for_ack(struct rcar_pcie *pcie)
@@ -757,7 +754,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
757 goto err_map_reg; 754 goto err_map_reg;
758 755
759 i = irq_of_parse_and_map(pdev->dev.of_node, 0); 756 i = irq_of_parse_and_map(pdev->dev.of_node, 0);
760 if (i < 0) { 757 if (!i) {
761 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); 758 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
762 err = -ENOENT; 759 err = -ENOENT;
763 goto err_map_reg; 760 goto err_map_reg;
@@ -765,7 +762,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
765 pcie->msi.irq1 = i; 762 pcie->msi.irq1 = i;
766 763
767 i = irq_of_parse_and_map(pdev->dev.of_node, 1); 764 i = irq_of_parse_and_map(pdev->dev.of_node, 1);
768 if (i < 0) { 765 if (!i) {
769 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); 766 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
770 err = -ENOENT; 767 err = -ENOENT;
771 goto err_map_reg; 768 goto err_map_reg;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index ef3ebaf9a738..f1a06a091ccb 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -148,10 +148,10 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
148 */ 148 */
149static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) 149static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
150{ 150{
151 u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR); 151 unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
152 152
153 if (val & XILINX_PCIE_RPEFR_ERR_VALID) { 153 if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
154 dev_dbg(port->dev, "Requester ID %d\n", 154 dev_dbg(port->dev, "Requester ID %lu\n",
155 val & XILINX_PCIE_RPEFR_REQ_ID); 155 val & XILINX_PCIE_RPEFR_REQ_ID);
156 pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, 156 pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
157 XILINX_PCIE_REG_RPEFR); 157 XILINX_PCIE_REG_RPEFR);
@@ -189,7 +189,7 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
189} 189}
190 190
191/** 191/**
192 * xilinx_pcie_config_base - Get configuration base 192 * xilinx_pcie_map_bus - Get configuration base
193 * @bus: PCI Bus structure 193 * @bus: PCI Bus structure
194 * @devfn: Device/function 194 * @devfn: Device/function
195 * @where: Offset from base 195 * @where: Offset from base
@@ -197,96 +197,26 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
197 * Return: Base address of the configuration space needed to be 197 * Return: Base address of the configuration space needed to be
198 * accessed. 198 * accessed.
199 */ 199 */
200static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus, 200static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
201 unsigned int devfn, int where) 201 unsigned int devfn, int where)
202{ 202{
203 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); 203 struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
204 int relbus; 204 int relbus;
205 205
206 if (!xilinx_pcie_valid_device(bus, devfn))
207 return NULL;
208
206 relbus = (bus->number << ECAM_BUS_NUM_SHIFT) | 209 relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
207 (devfn << ECAM_DEV_NUM_SHIFT); 210 (devfn << ECAM_DEV_NUM_SHIFT);
208 211
209 return port->reg_base + relbus + where; 212 return port->reg_base + relbus + where;
210} 213}
211 214
212/**
213 * xilinx_pcie_read_config - Read configuration space
214 * @bus: PCI Bus structure
215 * @devfn: Device/function
216 * @where: Offset from base
217 * @size: Byte/word/dword
218 * @val: Value to be read
219 *
220 * Return: PCIBIOS_SUCCESSFUL on success
221 * PCIBIOS_DEVICE_NOT_FOUND on failure
222 */
223static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
224 int where, int size, u32 *val)
225{
226 void __iomem *addr;
227
228 if (!xilinx_pcie_valid_device(bus, devfn)) {
229 *val = 0xFFFFFFFF;
230 return PCIBIOS_DEVICE_NOT_FOUND;
231 }
232
233 addr = xilinx_pcie_config_base(bus, devfn, where);
234
235 switch (size) {
236 case 1:
237 *val = readb(addr);
238 break;
239 case 2:
240 *val = readw(addr);
241 break;
242 default:
243 *val = readl(addr);
244 break;
245 }
246
247 return PCIBIOS_SUCCESSFUL;
248}
249
250/**
251 * xilinx_pcie_write_config - Write configuration space
252 * @bus: PCI Bus structure
253 * @devfn: Device/function
254 * @where: Offset from base
255 * @size: Byte/word/dword
256 * @val: Value to be written to device
257 *
258 * Return: PCIBIOS_SUCCESSFUL on success
259 * PCIBIOS_DEVICE_NOT_FOUND on failure
260 */
261static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
262 int where, int size, u32 val)
263{
264 void __iomem *addr;
265
266 if (!xilinx_pcie_valid_device(bus, devfn))
267 return PCIBIOS_DEVICE_NOT_FOUND;
268
269 addr = xilinx_pcie_config_base(bus, devfn, where);
270
271 switch (size) {
272 case 1:
273 writeb(val, addr);
274 break;
275 case 2:
276 writew(val, addr);
277 break;
278 default:
279 writel(val, addr);
280 break;
281 }
282
283 return PCIBIOS_SUCCESSFUL;
284}
285
286/* PCIe operations */ 215/* PCIe operations */
287static struct pci_ops xilinx_pcie_ops = { 216static struct pci_ops xilinx_pcie_ops = {
288 .read = xilinx_pcie_read_config, 217 .map_bus = xilinx_pcie_map_bus,
289 .write = xilinx_pcie_write_config, 218 .read = pci_generic_config_read,
219 .write = pci_generic_config_write,
290}; 220};
291 221
292/* MSI functions */ 222/* MSI functions */
@@ -737,7 +667,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
737 resource_size_t offset; 667 resource_size_t offset;
738 struct of_pci_range_parser parser; 668 struct of_pci_range_parser parser;
739 struct of_pci_range range; 669 struct of_pci_range range;
740 struct pci_host_bridge_window *win; 670 struct resource_entry *win;
741 int err = 0, mem_resno = 0; 671 int err = 0, mem_resno = 0;
742 672
743 /* Get the ranges */ 673 /* Get the ranges */
@@ -807,7 +737,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
807 737
808free_resources: 738free_resources:
809 release_child_resources(&iomem_resource); 739 release_child_resources(&iomem_resource);
810 list_for_each_entry(win, &port->resources, list) 740 resource_list_for_each_entry(win, &port->resources)
811 devm_kfree(dev, win->res); 741 devm_kfree(dev, win->res);
812 pci_free_resource_list(&port->resources); 742 pci_free_resource_list(&port->resources);
813 743
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index a5a7fd8332ac..46db29395a62 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -214,8 +214,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
214 214
215 kfree(slot->hotplug_slot->info); 215 kfree(slot->hotplug_slot->info);
216 kfree(slot->hotplug_slot); 216 kfree(slot->hotplug_slot);
217 if (slot->dev) 217 pci_dev_put(slot->dev);
218 pci_dev_put(slot->dev);
219 kfree(slot); 218 kfree(slot);
220} 219}
221 220
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index ff32e85e1de6..f052e951b23e 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -532,8 +532,6 @@ static void interrupt_event_handler(struct work_struct *work)
532 pciehp_green_led_off(p_slot); 532 pciehp_green_led_off(p_slot);
533 break; 533 break;
534 case INT_PRESENCE_ON: 534 case INT_PRESENCE_ON:
535 if (!HP_SUPR_RM(ctrl))
536 break;
537 ctrl_dbg(ctrl, "Surprise Insertion\n"); 535 ctrl_dbg(ctrl, "Surprise Insertion\n");
538 handle_surprise_event(p_slot); 536 handle_surprise_event(p_slot);
539 break; 537 break;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index bada20999870..c32fb786d48e 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -475,7 +475,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
475 struct slot *slot = bss_hotplug_slot->private; 475 struct slot *slot = bss_hotplug_slot->private;
476 struct pci_dev *dev, *temp; 476 struct pci_dev *dev, *temp;
477 int rc; 477 int rc;
478 acpi_owner_id ssdt_id = 0; 478 acpi_handle ssdt_hdl = NULL;
479 479
480 /* Acquire update access to the bus */ 480 /* Acquire update access to the bus */
481 mutex_lock(&sn_hotplug_mutex); 481 mutex_lock(&sn_hotplug_mutex);
@@ -522,7 +522,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
522 if (ACPI_SUCCESS(ret) && 522 if (ACPI_SUCCESS(ret) &&
523 (adr>>16) == (slot->device_num + 1)) { 523 (adr>>16) == (slot->device_num + 1)) {
524 /* retain the owner id */ 524 /* retain the owner id */
525 acpi_get_id(chandle, &ssdt_id); 525 ssdt_hdl = chandle;
526 526
527 ret = acpi_bus_get_device(chandle, 527 ret = acpi_bus_get_device(chandle,
528 &device); 528 &device);
@@ -547,12 +547,13 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
547 pci_unlock_rescan_remove(); 547 pci_unlock_rescan_remove();
548 548
549 /* Remove the SSDT for the slot from the ACPI namespace */ 549 /* Remove the SSDT for the slot from the ACPI namespace */
550 if (SN_ACPI_BASE_SUPPORT() && ssdt_id) { 550 if (SN_ACPI_BASE_SUPPORT() && ssdt_hdl) {
551 acpi_status ret; 551 acpi_status ret;
552 ret = acpi_unload_table_id(ssdt_id); 552 ret = acpi_unload_parent_table(ssdt_hdl);
553 if (ACPI_FAILURE(ret)) { 553 if (ACPI_FAILURE(ret)) {
554 printk(KERN_ERR "%s: acpi_unload_table_id failed (0x%x) for id %d\n", 554 acpi_handle_err(ssdt_hdl,
555 __func__, ret, ssdt_id); 555 "%s: acpi_unload_parent_table failed (0x%x)\n",
556 __func__, ret);
556 /* try to continue on */ 557 /* try to continue on */
557 } 558 }
558 } 559 }
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index fd60806d3fd0..c3e7dfcf9ff5 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -694,11 +694,16 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
694{ 694{
695 resource_size_t phys_addr; 695 resource_size_t phys_addr;
696 u32 table_offset; 696 u32 table_offset;
697 unsigned long flags;
697 u8 bir; 698 u8 bir;
698 699
699 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, 700 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
700 &table_offset); 701 &table_offset);
701 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); 702 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
703 flags = pci_resource_flags(dev, bir);
704 if (!flags || (flags & IORESOURCE_UNSET))
705 return NULL;
706
702 table_offset &= PCI_MSIX_TABLE_OFFSET; 707 table_offset &= PCI_MSIX_TABLE_OFFSET;
703 phys_addr = pci_resource_start(dev, bir) + table_offset; 708 phys_addr = pci_resource_start(dev, bir) + table_offset;
704 709
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 3542150fc8a3..489063987325 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -501,12 +501,29 @@ static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
501 return 0; 501 return 0;
502} 502}
503 503
504static bool acpi_pci_need_resume(struct pci_dev *dev)
505{
506 struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
507
508 if (!adev || !acpi_device_power_manageable(adev))
509 return false;
510
511 if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
512 return true;
513
514 if (acpi_target_system_state() == ACPI_STATE_S0)
515 return false;
516
517 return !!adev->power.flags.dsw_present;
518}
519
504static struct pci_platform_pm_ops acpi_pci_platform_pm = { 520static struct pci_platform_pm_ops acpi_pci_platform_pm = {
505 .is_manageable = acpi_pci_power_manageable, 521 .is_manageable = acpi_pci_power_manageable,
506 .set_state = acpi_pci_set_power_state, 522 .set_state = acpi_pci_set_power_state,
507 .choose_state = acpi_pci_choose_state, 523 .choose_state = acpi_pci_choose_state,
508 .sleep_wake = acpi_pci_sleep_wake, 524 .sleep_wake = acpi_pci_sleep_wake,
509 .run_wake = acpi_pci_run_wake, 525 .run_wake = acpi_pci_run_wake,
526 .need_resume = acpi_pci_need_resume,
510}; 527};
511 528
512void acpi_pci_add_bus(struct pci_bus *bus) 529void acpi_pci_add_bus(struct pci_bus *bus)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 887e6bd95af7..3cb2210de553 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -653,7 +653,6 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
653static int pci_pm_prepare(struct device *dev) 653static int pci_pm_prepare(struct device *dev)
654{ 654{
655 struct device_driver *drv = dev->driver; 655 struct device_driver *drv = dev->driver;
656 int error = 0;
657 656
658 /* 657 /*
659 * Devices having power.ignore_children set may still be necessary for 658 * Devices having power.ignore_children set may still be necessary for
@@ -662,10 +661,12 @@ static int pci_pm_prepare(struct device *dev)
662 if (dev->power.ignore_children) 661 if (dev->power.ignore_children)
663 pm_runtime_resume(dev); 662 pm_runtime_resume(dev);
664 663
665 if (drv && drv->pm && drv->pm->prepare) 664 if (drv && drv->pm && drv->pm->prepare) {
666 error = drv->pm->prepare(dev); 665 int error = drv->pm->prepare(dev);
667 666 if (error)
668 return error; 667 return error;
668 }
669 return pci_dev_keep_suspended(to_pci_dev(dev));
669} 670}
670 671
671 672
@@ -1383,7 +1384,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
1383 if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) 1384 if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
1384 return -ENOMEM; 1385 return -ENOMEM;
1385 1386
1386 if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x", 1387 if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
1387 pdev->vendor, pdev->device, 1388 pdev->vendor, pdev->device,
1388 pdev->subsystem_vendor, pdev->subsystem_device, 1389 pdev->subsystem_vendor, pdev->subsystem_device,
1389 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), 1390 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e9d4fd861ba1..81f06e8dcc04 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -10,6 +10,8 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/of.h>
14#include <linux/of_pci.h>
13#include <linux/pci.h> 15#include <linux/pci.h>
14#include <linux/pm.h> 16#include <linux/pm.h>
15#include <linux/slab.h> 17#include <linux/slab.h>
@@ -521,6 +523,11 @@ static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
521 pci_platform_pm->run_wake(dev, enable) : -ENODEV; 523 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
522} 524}
523 525
526static inline bool platform_pci_need_resume(struct pci_dev *dev)
527{
528 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
529}
530
524/** 531/**
525 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 532 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
526 * given PCI device 533 * given PCI device
@@ -1999,6 +2006,27 @@ bool pci_dev_run_wake(struct pci_dev *dev)
1999} 2006}
2000EXPORT_SYMBOL_GPL(pci_dev_run_wake); 2007EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2001 2008
2009/**
2010 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2011 * @pci_dev: Device to check.
2012 *
2013 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2014 * reconfigured due to wakeup settings difference between system and runtime
2015 * suspend and the current power state of it is suitable for the upcoming
2016 * (system) transition.
2017 */
2018bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2019{
2020 struct device *dev = &pci_dev->dev;
2021
2022 if (!pm_runtime_suspended(dev)
2023 || (device_can_wakeup(dev) && !device_may_wakeup(dev))
2024 || platform_pci_need_resume(pci_dev))
2025 return false;
2026
2027 return pci_target_state(pci_dev) == pci_dev->current_state;
2028}
2029
2002void pci_config_pm_runtime_get(struct pci_dev *pdev) 2030void pci_config_pm_runtime_get(struct pci_dev *pdev)
2003{ 2031{
2004 struct device *dev = &pdev->dev; 2032 struct device *dev = &pdev->dev;
@@ -3197,7 +3225,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
3197{ 3225{
3198 u16 csr; 3226 u16 csr;
3199 3227
3200 if (!dev->pm_cap) 3228 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
3201 return -ENOTTY; 3229 return -ENOTTY;
3202 3230
3203 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 3231 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
@@ -4471,6 +4499,53 @@ int pci_get_new_domain_nr(void)
4471{ 4499{
4472 return atomic_inc_return(&__domain_nr); 4500 return atomic_inc_return(&__domain_nr);
4473} 4501}
4502
4503#ifdef CONFIG_PCI_DOMAINS_GENERIC
4504void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
4505{
4506 static int use_dt_domains = -1;
4507 int domain = of_get_pci_domain_nr(parent->of_node);
4508
4509 /*
4510 * Check DT domain and use_dt_domains values.
4511 *
4512 * If DT domain property is valid (domain >= 0) and
4513 * use_dt_domains != 0, the DT assignment is valid since this means
4514 * we have not previously allocated a domain number by using
4515 * pci_get_new_domain_nr(); we should also update use_dt_domains to
4516 * 1, to indicate that we have just assigned a domain number from
4517 * DT.
4518 *
4519 * If DT domain property value is not valid (ie domain < 0), and we
4520 * have not previously assigned a domain number from DT
4521 * (use_dt_domains != 1) we should assign a domain number by
4522 * using the:
4523 *
4524 * pci_get_new_domain_nr()
4525 *
4526 * API and update the use_dt_domains value to keep track of method we
4527 * are using to assign domain numbers (use_dt_domains = 0).
4528 *
4529 * All other combinations imply we have a platform that is trying
4530 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
4531 * which is a recipe for domain mishandling and it is prevented by
4532 * invalidating the domain value (domain = -1) and printing a
4533 * corresponding error.
4534 */
4535 if (domain >= 0 && use_dt_domains) {
4536 use_dt_domains = 1;
4537 } else if (domain < 0 && use_dt_domains != 1) {
4538 use_dt_domains = 0;
4539 domain = pci_get_new_domain_nr();
4540 } else {
4541 dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
4542 parent->of_node->full_name);
4543 domain = -1;
4544 }
4545
4546 bus->domain_nr = domain;
4547}
4548#endif
4474#endif 4549#endif
4475 4550
4476/** 4551/**
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d54632a1db43..4091f82239cd 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -50,6 +50,10 @@ int pci_probe_reset_function(struct pci_dev *dev);
50 * for given device (the device's wake-up capability has to be 50 * for given device (the device's wake-up capability has to be
51 * enabled by @sleep_wake for this feature to work) 51 * enabled by @sleep_wake for this feature to work)
52 * 52 *
53 * @need_resume: returns 'true' if the given device (which is currently
54 * suspended) needs to be resumed to be configured for system
55 * wakeup.
56 *
53 * If given platform is generally capable of power managing PCI devices, all of 57 * If given platform is generally capable of power managing PCI devices, all of
54 * these callbacks are mandatory. 58 * these callbacks are mandatory.
55 */ 59 */
@@ -59,6 +63,7 @@ struct pci_platform_pm_ops {
59 pci_power_t (*choose_state)(struct pci_dev *dev); 63 pci_power_t (*choose_state)(struct pci_dev *dev);
60 int (*sleep_wake)(struct pci_dev *dev, bool enable); 64 int (*sleep_wake)(struct pci_dev *dev, bool enable);
61 int (*run_wake)(struct pci_dev *dev, bool enable); 65 int (*run_wake)(struct pci_dev *dev, bool enable);
66 bool (*need_resume)(struct pci_dev *dev);
62}; 67};
63 68
64int pci_set_platform_pm(struct pci_platform_pm_ops *ops); 69int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
@@ -67,6 +72,7 @@ void pci_power_up(struct pci_dev *dev);
67void pci_disable_enabled_device(struct pci_dev *dev); 72void pci_disable_enabled_device(struct pci_dev *dev);
68int pci_finish_runtime_suspend(struct pci_dev *dev); 73int pci_finish_runtime_suspend(struct pci_dev *dev);
69int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 74int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
75bool pci_dev_keep_suspended(struct pci_dev *dev);
70void pci_config_pm_runtime_get(struct pci_dev *dev); 76void pci_config_pm_runtime_get(struct pci_dev *dev);
71void pci_config_pm_runtime_put(struct pci_dev *dev); 77void pci_config_pm_runtime_put(struct pci_dev *dev);
72void pci_pm_init(struct pci_dev *dev); 78void pci_pm_init(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e1e7026b838d..820740a22e94 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -859,7 +859,10 @@ static ssize_t link_state_store(struct device *dev,
859{ 859{
860 struct pci_dev *pdev = to_pci_dev(dev); 860 struct pci_dev *pdev = to_pci_dev(dev);
861 struct pcie_link_state *link, *root = pdev->link_state->root; 861 struct pcie_link_state *link, *root = pdev->link_state->root;
862 u32 val = buf[0] - '0', state = 0; 862 u32 val, state = 0;
863
864 if (kstrtouint(buf, 10, &val))
865 return -EINVAL;
863 866
864 if (aspm_disabled) 867 if (aspm_disabled)
865 return -EPERM; 868 return -EPERM;
@@ -900,15 +903,14 @@ static ssize_t clk_ctl_store(struct device *dev,
900 size_t n) 903 size_t n)
901{ 904{
902 struct pci_dev *pdev = to_pci_dev(dev); 905 struct pci_dev *pdev = to_pci_dev(dev);
903 int state; 906 bool state;
904 907
905 if (n < 1) 908 if (strtobool(buf, &state))
906 return -EINVAL; 909 return -EINVAL;
907 state = buf[0]-'0';
908 910
909 down_read(&pci_bus_sem); 911 down_read(&pci_bus_sem);
910 mutex_lock(&aspm_lock); 912 mutex_lock(&aspm_lock);
911 pcie_set_clkpm_nocheck(pdev->link_state, !!state); 913 pcie_set_clkpm_nocheck(pdev->link_state, state);
912 mutex_unlock(&aspm_lock); 914 mutex_unlock(&aspm_lock);
913 up_read(&pci_bus_sem); 915 up_read(&pci_bus_sem);
914 916
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 23212f8ae09b..8d2f400e96cb 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1895,7 +1895,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1895 int error; 1895 int error;
1896 struct pci_host_bridge *bridge; 1896 struct pci_host_bridge *bridge;
1897 struct pci_bus *b, *b2; 1897 struct pci_bus *b, *b2;
1898 struct pci_host_bridge_window *window, *n; 1898 struct resource_entry *window, *n;
1899 struct resource *res; 1899 struct resource *res;
1900 resource_size_t offset; 1900 resource_size_t offset;
1901 char bus_addr[64]; 1901 char bus_addr[64];
@@ -1959,8 +1959,8 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1959 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1959 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1960 1960
1961 /* Add initial resources to the bus */ 1961 /* Add initial resources to the bus */
1962 list_for_each_entry_safe(window, n, resources, list) { 1962 resource_list_for_each_entry_safe(window, n, resources) {
1963 list_move_tail(&window->list, &bridge->windows); 1963 list_move_tail(&window->node, &bridge->windows);
1964 res = window->res; 1964 res = window->res;
1965 offset = window->offset; 1965 offset = window->offset;
1966 if (res->flags & IORESOURCE_BUS) 1966 if (res->flags & IORESOURCE_BUS)
@@ -2060,12 +2060,12 @@ void pci_bus_release_busn_res(struct pci_bus *b)
2060struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 2060struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2061 struct pci_ops *ops, void *sysdata, struct list_head *resources) 2061 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2062{ 2062{
2063 struct pci_host_bridge_window *window; 2063 struct resource_entry *window;
2064 bool found = false; 2064 bool found = false;
2065 struct pci_bus *b; 2065 struct pci_bus *b;
2066 int max; 2066 int max;
2067 2067
2068 list_for_each_entry(window, resources, list) 2068 resource_list_for_each_entry(window, resources)
2069 if (window->res->flags & IORESOURCE_BUS) { 2069 if (window->res->flags & IORESOURCE_BUS) {
2070 found = true; 2070 found = true;
2071 break; 2071 break;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 903d5078b5ed..85f247e28a80 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3076,6 +3076,27 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
3076 */ 3076 */
3077DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); 3077DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3078 3078
3079static void quirk_no_pm_reset(struct pci_dev *dev)
3080{
3081 /*
3082 * We can't do a bus reset on root bus devices, but an ineffective
3083 * PM reset may be better than nothing.
3084 */
3085 if (!pci_is_root_bus(dev->bus))
3086 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3087}
3088
3089/*
3090 * Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition
3091 * causes a reset (i.e., they advertise NoSoftRst-). This transition seems
3092 * to have no effect on the device: it retains the framebuffer contents and
3093 * monitor sync. Advertising this support makes other layers, like VFIO,
3094 * assume pci_reset_function() is viable for this device. Mark it as
3095 * unavailable to skip it when testing reset methods.
3096 */
3097DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3098 PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
3099
3079#ifdef CONFIG_ACPI 3100#ifdef CONFIG_ACPI
3080/* 3101/*
3081 * Apple: Shutdown Cactus Ridge Thunderbolt controller. 3102 * Apple: Shutdown Cactus Ridge Thunderbolt controller.
@@ -3576,6 +3597,44 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
3576 quirk_dma_func1_alias); 3597 quirk_dma_func1_alias);
3577 3598
3578/* 3599/*
3600 * Some devices DMA with the wrong devfn, not just the wrong function.
3601 * quirk_fixed_dma_alias() uses this table to create fixed aliases, where
3602 * the alias is "fixed" and independent of the device devfn.
3603 *
3604 * For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O
3605 * processor. To software, this appears as a PCIe-to-PCI/X bridge with a
3606 * single device on the secondary bus. In reality, the single exposed
3607 * device at 0e.0 is the Address Translation Unit (ATU) of the controller
3608 * that provides a bridge to the internal bus of the I/O processor. The
3609 * controller supports private devices, which can be hidden from PCI config
3610 * space. In the case of the Adaptec 3405, a private device at 01.0
3611 * appears to be the DMA engine, which therefore needs to become a DMA
3612 * alias for the device.
3613 */
3614static const struct pci_device_id fixed_dma_alias_tbl[] = {
3615 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
3616 PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
3617 .driver_data = PCI_DEVFN(1, 0) },
3618 { 0 }
3619};
3620
3621static void quirk_fixed_dma_alias(struct pci_dev *dev)
3622{
3623 const struct pci_device_id *id;
3624
3625 id = pci_match_id(fixed_dma_alias_tbl, dev);
3626 if (id) {
3627 dev->dma_alias_devfn = id->driver_data;
3628 dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
3629 dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
3630 PCI_SLOT(dev->dma_alias_devfn),
3631 PCI_FUNC(dev->dma_alias_devfn));
3632 }
3633}
3634
3635DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
3636
3637/*
3579 * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in 3638 * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
3580 * using the wrong DMA alias for the device. Some of these devices can be 3639 * using the wrong DMA alias for the device. Some of these devices can be
3581 * used as either forward or reverse bridges, so we need to test whether the 3640 * used as either forward or reverse bridges, so we need to test whether the
@@ -3678,6 +3737,9 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = {
3678 0x9c98, 0x9c99, 0x9c9a, 0x9c9b, 3737 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
3679 /* Patsburg (X79) PCH */ 3738 /* Patsburg (X79) PCH */
3680 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e, 3739 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
3740 /* Wellsburg (X99) PCH */
3741 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
3742 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
3681}; 3743};
3682 3744
3683static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) 3745static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
@@ -3761,6 +3823,8 @@ static const struct pci_dev_acs_enabled {
3761 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs }, 3823 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
3762 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs }, 3824 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
3763 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, 3825 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
3826 { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
3827 { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
3764 { 0 } 3828 { 0 }
3765}; 3829};
3766 3830
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index f955edb9bea7..eb0ad530dc43 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -71,6 +71,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
71{ 71{
72 void __iomem *image; 72 void __iomem *image;
73 int last_image; 73 int last_image;
74 unsigned length;
74 75
75 image = rom; 76 image = rom;
76 do { 77 do {
@@ -93,9 +94,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
93 if (readb(pds + 3) != 'R') 94 if (readb(pds + 3) != 'R')
94 break; 95 break;
95 last_image = readb(pds + 21) & 0x80; 96 last_image = readb(pds + 21) & 0x80;
96 /* this length is reliable */ 97 length = readw(pds + 16);
97 image += readw(pds + 16) * 512; 98 image += length * 512;
98 } while (!last_image); 99 } while (length && !last_image);
99 100
100 /* never return a size larger than the PCI resource window */ 101 /* never return a size larger than the PCI resource window */
101 /* there are known ROMs that get the size wrong */ 102 /* there are known ROMs that get the size wrong */
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 66977ebf13b3..ff0356fb378f 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -180,20 +180,21 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
180 struct pnp_dev *dev = data; 180 struct pnp_dev *dev = data;
181 struct acpi_resource_dma *dma; 181 struct acpi_resource_dma *dma;
182 struct acpi_resource_vendor_typed *vendor_typed; 182 struct acpi_resource_vendor_typed *vendor_typed;
183 struct resource r = {0}; 183 struct resource_win win = {{0}, 0};
184 struct resource *r = &win.res;
184 int i, flags; 185 int i, flags;
185 186
186 if (acpi_dev_resource_address_space(res, &r) 187 if (acpi_dev_resource_address_space(res, &win)
187 || acpi_dev_resource_ext_address_space(res, &r)) { 188 || acpi_dev_resource_ext_address_space(res, &win)) {
188 pnp_add_resource(dev, &r); 189 pnp_add_resource(dev, &win.res);
189 return AE_OK; 190 return AE_OK;
190 } 191 }
191 192
192 r.flags = 0; 193 r->flags = 0;
193 if (acpi_dev_resource_interrupt(res, 0, &r)) { 194 if (acpi_dev_resource_interrupt(res, 0, r)) {
194 pnpacpi_add_irqresource(dev, &r); 195 pnpacpi_add_irqresource(dev, r);
195 for (i = 1; acpi_dev_resource_interrupt(res, i, &r); i++) 196 for (i = 1; acpi_dev_resource_interrupt(res, i, r); i++)
196 pnpacpi_add_irqresource(dev, &r); 197 pnpacpi_add_irqresource(dev, r);
197 198
198 if (i > 1) { 199 if (i > 1) {
199 /* 200 /*
@@ -209,7 +210,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
209 } 210 }
210 } 211 }
211 return AE_OK; 212 return AE_OK;
212 } else if (r.flags & IORESOURCE_DISABLED) { 213 } else if (r->flags & IORESOURCE_DISABLED) {
213 pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); 214 pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
214 return AE_OK; 215 return AE_OK;
215 } 216 }
@@ -218,13 +219,13 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
218 case ACPI_RESOURCE_TYPE_MEMORY24: 219 case ACPI_RESOURCE_TYPE_MEMORY24:
219 case ACPI_RESOURCE_TYPE_MEMORY32: 220 case ACPI_RESOURCE_TYPE_MEMORY32:
220 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 221 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
221 if (acpi_dev_resource_memory(res, &r)) 222 if (acpi_dev_resource_memory(res, r))
222 pnp_add_resource(dev, &r); 223 pnp_add_resource(dev, r);
223 break; 224 break;
224 case ACPI_RESOURCE_TYPE_IO: 225 case ACPI_RESOURCE_TYPE_IO:
225 case ACPI_RESOURCE_TYPE_FIXED_IO: 226 case ACPI_RESOURCE_TYPE_FIXED_IO:
226 if (acpi_dev_resource_io(res, &r)) 227 if (acpi_dev_resource_io(res, r))
227 pnp_add_resource(dev, &r); 228 pnp_add_resource(dev, r);
228 break; 229 break;
229 case ACPI_RESOURCE_TYPE_DMA: 230 case ACPI_RESOURCE_TYPE_DMA:
230 dma = &res->data.dma; 231 dma = &res->data.dma;
@@ -410,12 +411,12 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
410 if (p->resource_type == ACPI_MEMORY_RANGE) { 411 if (p->resource_type == ACPI_MEMORY_RANGE) {
411 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) 412 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
412 flags = IORESOURCE_MEM_WRITEABLE; 413 flags = IORESOURCE_MEM_WRITEABLE;
413 pnp_register_mem_resource(dev, option_flags, p->minimum, 414 pnp_register_mem_resource(dev, option_flags, p->address.minimum,
414 p->minimum, 0, p->address_length, 415 p->address.minimum, 0, p->address.address_length,
415 flags); 416 flags);
416 } else if (p->resource_type == ACPI_IO_RANGE) 417 } else if (p->resource_type == ACPI_IO_RANGE)
417 pnp_register_port_resource(dev, option_flags, p->minimum, 418 pnp_register_port_resource(dev, option_flags, p->address.minimum,
418 p->minimum, 0, p->address_length, 419 p->address.minimum, 0, p->address.address_length,
419 IORESOURCE_IO_FIXED); 420 IORESOURCE_IO_FIXED);
420} 421}
421 422
@@ -429,12 +430,12 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
429 if (p->resource_type == ACPI_MEMORY_RANGE) { 430 if (p->resource_type == ACPI_MEMORY_RANGE) {
430 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) 431 if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
431 flags = IORESOURCE_MEM_WRITEABLE; 432 flags = IORESOURCE_MEM_WRITEABLE;
432 pnp_register_mem_resource(dev, option_flags, p->minimum, 433 pnp_register_mem_resource(dev, option_flags, p->address.minimum,
433 p->minimum, 0, p->address_length, 434 p->address.minimum, 0, p->address.address_length,
434 flags); 435 flags);
435 } else if (p->resource_type == ACPI_IO_RANGE) 436 } else if (p->resource_type == ACPI_IO_RANGE)
436 pnp_register_port_resource(dev, option_flags, p->minimum, 437 pnp_register_port_resource(dev, option_flags, p->address.minimum,
437 p->minimum, 0, p->address_length, 438 p->address.minimum, 0, p->address.address_length,
438 IORESOURCE_IO_FIXED); 439 IORESOURCE_IO_FIXED);
439} 440}
440 441
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 8bcfecd66281..eeca70ddbf61 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -2430,7 +2430,7 @@ static int tsi721_probe(struct pci_dev *pdev,
2430 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, 2430 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
2431 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | 2431 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
2432 PCI_EXP_DEVCTL_NOSNOOP_EN, 2432 PCI_EXP_DEVCTL_NOSNOOP_EN,
2433 0x2 << MAX_READ_REQUEST_SZ_SHIFT); 2433 PCI_EXP_DEVCTL_READRQ_512B);
2434 2434
2435 /* Adjust PCIe completion timeout. */ 2435 /* Adjust PCIe completion timeout. */
2436 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); 2436 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index a7b42680a06a..9d2502543ef6 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,8 +72,6 @@
72#define TSI721_MSIXPBA_OFFSET 0x2a000 72#define TSI721_MSIXPBA_OFFSET 0x2a000
73#define TSI721_PCIECFG_EPCTL 0x400 73#define TSI721_PCIECFG_EPCTL 0x400
74 74
75#define MAX_READ_REQUEST_SZ_SHIFT 12
76
77/* 75/*
78 * Event Management Registers 76 * Event Management Registers
79 */ 77 */
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c3a60b57a865..a6f116aa5235 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -414,6 +414,14 @@ config REGULATOR_MAX77802
414 Exynos5420/Exynos5800 SoCs to control various voltages. 414 Exynos5420/Exynos5800 SoCs to control various voltages.
415 It includes support for control of voltage and ramp speed. 415 It includes support for control of voltage and ramp speed.
416 416
417config REGULATOR_MAX77843
418 tristate "Maxim 77843 regulator"
419 depends on MFD_MAX77843
420 help
421 This driver controls a Maxim 77843 regulator.
422 The regulator include two 'SAFEOUT' for USB(Universal Serial Bus)
423 This is suitable for Exynos5433 SoC chips.
424
417config REGULATOR_MC13XXX_CORE 425config REGULATOR_MC13XXX_CORE
418 tristate 426 tristate
419 427
@@ -433,6 +441,15 @@ config REGULATOR_MC13892
433 Say y here to support the regulators found on the Freescale MC13892 441 Say y here to support the regulators found on the Freescale MC13892
434 PMIC. 442 PMIC.
435 443
444config REGULATOR_MT6397
445 tristate "MediaTek MT6397 PMIC"
446 depends on MFD_MT6397
447 help
448 Say y here to select this option to enable the power regulator of
449 MediaTek MT6397 PMIC.
450 This driver supports the control of different power rails of device
451 through regulator interface.
452
436config REGULATOR_PALMAS 453config REGULATOR_PALMAS
437 tristate "TI Palmas PMIC Regulators" 454 tristate "TI Palmas PMIC Regulators"
438 depends on MFD_PALMAS 455 depends on MFD_PALMAS
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 1f28ebfc6f3a..2c4da15e1545 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -55,9 +55,11 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
55obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o 55obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
56obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o 56obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
57obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o 57obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
58obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o
58obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o 59obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
59obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o 60obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
60obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o 61obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
62obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
61obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o 63obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
62obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o 64obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
63obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o 65obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f23d7e1f2ee7..e4331f5e5d7d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -32,11 +32,13 @@
32 32
33#define AXP20X_FREQ_DCDC_MASK 0x0f 33#define AXP20X_FREQ_DCDC_MASK 0x0f
34 34
35#define AXP20X_DESC_IO(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ 35#define AXP20X_DESC_IO(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
36 _emask, _enable_val, _disable_val) \ 36 _ereg, _emask, _enable_val, _disable_val) \
37 [AXP20X_##_id] = { \ 37 [AXP20X_##_id] = { \
38 .name = #_id, \ 38 .name = #_id, \
39 .supply_name = (_supply), \ 39 .supply_name = (_supply), \
40 .of_match = of_match_ptr(_match), \
41 .regulators_node = of_match_ptr("regulators"), \
40 .type = REGULATOR_VOLTAGE, \ 42 .type = REGULATOR_VOLTAGE, \
41 .id = AXP20X_##_id, \ 43 .id = AXP20X_##_id, \
42 .n_voltages = (((_max) - (_min)) / (_step) + 1), \ 44 .n_voltages = (((_max) - (_min)) / (_step) + 1), \
@@ -52,11 +54,13 @@
52 .ops = &axp20x_ops, \ 54 .ops = &axp20x_ops, \
53 } 55 }
54 56
55#define AXP20X_DESC(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ 57#define AXP20X_DESC(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
56 _emask) \ 58 _ereg, _emask) \
57 [AXP20X_##_id] = { \ 59 [AXP20X_##_id] = { \
58 .name = #_id, \ 60 .name = #_id, \
59 .supply_name = (_supply), \ 61 .supply_name = (_supply), \
62 .of_match = of_match_ptr(_match), \
63 .regulators_node = of_match_ptr("regulators"), \
60 .type = REGULATOR_VOLTAGE, \ 64 .type = REGULATOR_VOLTAGE, \
61 .id = AXP20X_##_id, \ 65 .id = AXP20X_##_id, \
62 .n_voltages = (((_max) - (_min)) / (_step) + 1), \ 66 .n_voltages = (((_max) - (_min)) / (_step) + 1), \
@@ -70,10 +74,12 @@
70 .ops = &axp20x_ops, \ 74 .ops = &axp20x_ops, \
71 } 75 }
72 76
73#define AXP20X_DESC_FIXED(_id, _supply, _volt) \ 77#define AXP20X_DESC_FIXED(_id, _match, _supply, _volt) \
74 [AXP20X_##_id] = { \ 78 [AXP20X_##_id] = { \
75 .name = #_id, \ 79 .name = #_id, \
76 .supply_name = (_supply), \ 80 .supply_name = (_supply), \
81 .of_match = of_match_ptr(_match), \
82 .regulators_node = of_match_ptr("regulators"), \
77 .type = REGULATOR_VOLTAGE, \ 83 .type = REGULATOR_VOLTAGE, \
78 .id = AXP20X_##_id, \ 84 .id = AXP20X_##_id, \
79 .n_voltages = 1, \ 85 .n_voltages = 1, \
@@ -82,10 +88,13 @@
82 .ops = &axp20x_ops_fixed \ 88 .ops = &axp20x_ops_fixed \
83 } 89 }
84 90
85#define AXP20X_DESC_TABLE(_id, _supply, _table, _vreg, _vmask, _ereg, _emask) \ 91#define AXP20X_DESC_TABLE(_id, _match, _supply, _table, _vreg, _vmask, _ereg, \
92 _emask) \
86 [AXP20X_##_id] = { \ 93 [AXP20X_##_id] = { \
87 .name = #_id, \ 94 .name = #_id, \
88 .supply_name = (_supply), \ 95 .supply_name = (_supply), \
96 .of_match = of_match_ptr(_match), \
97 .regulators_node = of_match_ptr("regulators"), \
89 .type = REGULATOR_VOLTAGE, \ 98 .type = REGULATOR_VOLTAGE, \
90 .id = AXP20X_##_id, \ 99 .id = AXP20X_##_id, \
91 .n_voltages = ARRAY_SIZE(_table), \ 100 .n_voltages = ARRAY_SIZE(_table), \
@@ -127,36 +136,20 @@ static struct regulator_ops axp20x_ops = {
127}; 136};
128 137
129static const struct regulator_desc axp20x_regulators[] = { 138static const struct regulator_desc axp20x_regulators[] = {
130 AXP20X_DESC(DCDC2, "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, 0x3f, 139 AXP20X_DESC(DCDC2, "dcdc2", "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT,
131 AXP20X_PWR_OUT_CTRL, 0x10), 140 0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
132 AXP20X_DESC(DCDC3, "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, 0x7f, 141 AXP20X_DESC(DCDC3, "dcdc3", "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT,
133 AXP20X_PWR_OUT_CTRL, 0x02), 142 0x7f, AXP20X_PWR_OUT_CTRL, 0x02),
134 AXP20X_DESC_FIXED(LDO1, "acin", 1300), 143 AXP20X_DESC_FIXED(LDO1, "ldo1", "acin", 1300),
135 AXP20X_DESC(LDO2, "ldo24in", 1800, 3300, 100, AXP20X_LDO24_V_OUT, 0xf0, 144 AXP20X_DESC(LDO2, "ldo2", "ldo24in", 1800, 3300, 100,
136 AXP20X_PWR_OUT_CTRL, 0x04), 145 AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
137 AXP20X_DESC(LDO3, "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, 0x7f, 146 AXP20X_DESC(LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT,
138 AXP20X_PWR_OUT_CTRL, 0x40), 147 0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
139 AXP20X_DESC_TABLE(LDO4, "ldo24in", axp20x_ldo4_data, AXP20X_LDO24_V_OUT, 0x0f, 148 AXP20X_DESC_TABLE(LDO4, "ldo4", "ldo24in", axp20x_ldo4_data,
140 AXP20X_PWR_OUT_CTRL, 0x08), 149 AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08),
141 AXP20X_DESC_IO(LDO5, "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, 0xf0, 150 AXP20X_DESC_IO(LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
142 AXP20X_GPIO0_CTRL, 0x07, AXP20X_IO_ENABLED, 151 AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
143 AXP20X_IO_DISABLED), 152 AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
144};
145
146#define AXP_MATCH(_name, _id) \
147 [AXP20X_##_id] = { \
148 .name = #_name, \
149 .driver_data = (void *) &axp20x_regulators[AXP20X_##_id], \
150 }
151
152static struct of_regulator_match axp20x_matches[] = {
153 AXP_MATCH(dcdc2, DCDC2),
154 AXP_MATCH(dcdc3, DCDC3),
155 AXP_MATCH(ldo1, LDO1),
156 AXP_MATCH(ldo2, LDO2),
157 AXP_MATCH(ldo3, LDO3),
158 AXP_MATCH(ldo4, LDO4),
159 AXP_MATCH(ldo5, LDO5),
160}; 153};
161 154
162static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) 155static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
@@ -193,13 +186,6 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
193 if (!regulators) { 186 if (!regulators) {
194 dev_warn(&pdev->dev, "regulators node not found\n"); 187 dev_warn(&pdev->dev, "regulators node not found\n");
195 } else { 188 } else {
196 ret = of_regulator_match(&pdev->dev, regulators, axp20x_matches,
197 ARRAY_SIZE(axp20x_matches));
198 if (ret < 0) {
199 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
200 return ret;
201 }
202
203 dcdcfreq = 1500; 189 dcdcfreq = 1500;
204 of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq); 190 of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq);
205 ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); 191 ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
@@ -233,23 +219,17 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
233{ 219{
234 struct regulator_dev *rdev; 220 struct regulator_dev *rdev;
235 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); 221 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
236 struct regulator_config config = { }; 222 struct regulator_config config = {
237 struct regulator_init_data *init_data; 223 .dev = pdev->dev.parent,
224 .regmap = axp20x->regmap,
225 };
238 int ret, i; 226 int ret, i;
239 u32 workmode; 227 u32 workmode;
240 228
241 ret = axp20x_regulator_parse_dt(pdev); 229 /* This only sets the dcdc freq. Ignore any errors */
242 if (ret) 230 axp20x_regulator_parse_dt(pdev);
243 return ret;
244 231
245 for (i = 0; i < AXP20X_REG_ID_MAX; i++) { 232 for (i = 0; i < AXP20X_REG_ID_MAX; i++) {
246 init_data = axp20x_matches[i].init_data;
247
248 config.dev = pdev->dev.parent;
249 config.init_data = init_data;
250 config.regmap = axp20x->regmap;
251 config.of_node = axp20x_matches[i].of_node;
252
253 rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i], 233 rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i],
254 &config); 234 &config);
255 if (IS_ERR(rdev)) { 235 if (IS_ERR(rdev)) {
@@ -259,7 +239,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
259 return PTR_ERR(rdev); 239 return PTR_ERR(rdev);
260 } 240 }
261 241
262 ret = of_property_read_u32(axp20x_matches[i].of_node, "x-powers,dcdc-workmode", 242 ret = of_property_read_u32(rdev->dev.of_node,
243 "x-powers,dcdc-workmode",
263 &workmode); 244 &workmode);
264 if (!ret) { 245 if (!ret) {
265 if (axp20x_set_dcdc_workmode(rdev, i, workmode)) 246 if (axp20x_set_dcdc_workmode(rdev, i, workmode))
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 9c48fb32f660..b899947d839d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -632,49 +632,34 @@ static ssize_t regulator_bypass_show(struct device *dev,
632static DEVICE_ATTR(bypass, 0444, 632static DEVICE_ATTR(bypass, 0444,
633 regulator_bypass_show, NULL); 633 regulator_bypass_show, NULL);
634 634
635/*
636 * These are the only attributes are present for all regulators.
637 * Other attributes are a function of regulator functionality.
638 */
639static struct attribute *regulator_dev_attrs[] = {
640 &dev_attr_name.attr,
641 &dev_attr_num_users.attr,
642 &dev_attr_type.attr,
643 NULL,
644};
645ATTRIBUTE_GROUPS(regulator_dev);
646
647static void regulator_dev_release(struct device *dev)
648{
649 struct regulator_dev *rdev = dev_get_drvdata(dev);
650 kfree(rdev);
651}
652
653static struct class regulator_class = {
654 .name = "regulator",
655 .dev_release = regulator_dev_release,
656 .dev_groups = regulator_dev_groups,
657};
658
659/* Calculate the new optimum regulator operating mode based on the new total 635/* Calculate the new optimum regulator operating mode based on the new total
660 * consumer load. All locks held by caller */ 636 * consumer load. All locks held by caller */
661static void drms_uA_update(struct regulator_dev *rdev) 637static int drms_uA_update(struct regulator_dev *rdev)
662{ 638{
663 struct regulator *sibling; 639 struct regulator *sibling;
664 int current_uA = 0, output_uV, input_uV, err; 640 int current_uA = 0, output_uV, input_uV, err;
665 unsigned int mode; 641 unsigned int mode;
666 642
643 /*
644 * first check to see if we can set modes at all, otherwise just
645 * tell the consumer everything is OK.
646 */
667 err = regulator_check_drms(rdev); 647 err = regulator_check_drms(rdev);
668 if (err < 0 || !rdev->desc->ops->get_optimum_mode || 648 if (err < 0)
669 (!rdev->desc->ops->get_voltage && 649 return 0;
670 !rdev->desc->ops->get_voltage_sel) || 650
671 !rdev->desc->ops->set_mode) 651 if (!rdev->desc->ops->get_optimum_mode)
672 return; 652 return 0;
653
654 if (!rdev->desc->ops->set_mode)
655 return -EINVAL;
673 656
674 /* get output voltage */ 657 /* get output voltage */
675 output_uV = _regulator_get_voltage(rdev); 658 output_uV = _regulator_get_voltage(rdev);
676 if (output_uV <= 0) 659 if (output_uV <= 0) {
677 return; 660 rdev_err(rdev, "invalid output voltage found\n");
661 return -EINVAL;
662 }
678 663
679 /* get input voltage */ 664 /* get input voltage */
680 input_uV = 0; 665 input_uV = 0;
@@ -682,8 +667,10 @@ static void drms_uA_update(struct regulator_dev *rdev)
682 input_uV = regulator_get_voltage(rdev->supply); 667 input_uV = regulator_get_voltage(rdev->supply);
683 if (input_uV <= 0) 668 if (input_uV <= 0)
684 input_uV = rdev->constraints->input_uV; 669 input_uV = rdev->constraints->input_uV;
685 if (input_uV <= 0) 670 if (input_uV <= 0) {
686 return; 671 rdev_err(rdev, "invalid input voltage found\n");
672 return -EINVAL;
673 }
687 674
688 /* calc total requested load */ 675 /* calc total requested load */
689 list_for_each_entry(sibling, &rdev->consumer_list, list) 676 list_for_each_entry(sibling, &rdev->consumer_list, list)
@@ -695,8 +682,17 @@ static void drms_uA_update(struct regulator_dev *rdev)
695 682
696 /* check the new mode is allowed */ 683 /* check the new mode is allowed */
697 err = regulator_mode_constrain(rdev, &mode); 684 err = regulator_mode_constrain(rdev, &mode);
698 if (err == 0) 685 if (err < 0) {
699 rdev->desc->ops->set_mode(rdev, mode); 686 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
687 current_uA, input_uV, output_uV);
688 return err;
689 }
690
691 err = rdev->desc->ops->set_mode(rdev, mode);
692 if (err < 0)
693 rdev_err(rdev, "failed to set optimum mode %x\n", mode);
694
695 return err;
700} 696}
701 697
702static int suspend_set_state(struct regulator_dev *rdev, 698static int suspend_set_state(struct regulator_dev *rdev,
@@ -3026,75 +3022,13 @@ EXPORT_SYMBOL_GPL(regulator_get_mode);
3026int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) 3022int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
3027{ 3023{
3028 struct regulator_dev *rdev = regulator->rdev; 3024 struct regulator_dev *rdev = regulator->rdev;
3029 struct regulator *consumer; 3025 int ret;
3030 int ret, output_uV, input_uV = 0, total_uA_load = 0;
3031 unsigned int mode;
3032
3033 if (rdev->supply)
3034 input_uV = regulator_get_voltage(rdev->supply);
3035 3026
3036 mutex_lock(&rdev->mutex); 3027 mutex_lock(&rdev->mutex);
3037
3038 /*
3039 * first check to see if we can set modes at all, otherwise just
3040 * tell the consumer everything is OK.
3041 */
3042 regulator->uA_load = uA_load; 3028 regulator->uA_load = uA_load;
3043 ret = regulator_check_drms(rdev); 3029 ret = drms_uA_update(rdev);
3044 if (ret < 0) {
3045 ret = 0;
3046 goto out;
3047 }
3048
3049 if (!rdev->desc->ops->get_optimum_mode)
3050 goto out;
3051
3052 /*
3053 * we can actually do this so any errors are indicators of
3054 * potential real failure.
3055 */
3056 ret = -EINVAL;
3057
3058 if (!rdev->desc->ops->set_mode)
3059 goto out;
3060
3061 /* get output voltage */
3062 output_uV = _regulator_get_voltage(rdev);
3063 if (output_uV <= 0) {
3064 rdev_err(rdev, "invalid output voltage found\n");
3065 goto out;
3066 }
3067
3068 /* No supply? Use constraint voltage */
3069 if (input_uV <= 0)
3070 input_uV = rdev->constraints->input_uV;
3071 if (input_uV <= 0) {
3072 rdev_err(rdev, "invalid input voltage found\n");
3073 goto out;
3074 }
3075
3076 /* calc total requested load for this regulator */
3077 list_for_each_entry(consumer, &rdev->consumer_list, list)
3078 total_uA_load += consumer->uA_load;
3079
3080 mode = rdev->desc->ops->get_optimum_mode(rdev,
3081 input_uV, output_uV,
3082 total_uA_load);
3083 ret = regulator_mode_constrain(rdev, &mode);
3084 if (ret < 0) {
3085 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
3086 total_uA_load, input_uV, output_uV);
3087 goto out;
3088 }
3089
3090 ret = rdev->desc->ops->set_mode(rdev, mode);
3091 if (ret < 0) {
3092 rdev_err(rdev, "failed to set optimum mode %x\n", mode);
3093 goto out;
3094 }
3095 ret = mode;
3096out:
3097 mutex_unlock(&rdev->mutex); 3030 mutex_unlock(&rdev->mutex);
3031
3098 return ret; 3032 return ret;
3099} 3033}
3100EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); 3034EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
@@ -3436,126 +3370,136 @@ int regulator_mode_to_status(unsigned int mode)
3436} 3370}
3437EXPORT_SYMBOL_GPL(regulator_mode_to_status); 3371EXPORT_SYMBOL_GPL(regulator_mode_to_status);
3438 3372
3373static struct attribute *regulator_dev_attrs[] = {
3374 &dev_attr_name.attr,
3375 &dev_attr_num_users.attr,
3376 &dev_attr_type.attr,
3377 &dev_attr_microvolts.attr,
3378 &dev_attr_microamps.attr,
3379 &dev_attr_opmode.attr,
3380 &dev_attr_state.attr,
3381 &dev_attr_status.attr,
3382 &dev_attr_bypass.attr,
3383 &dev_attr_requested_microamps.attr,
3384 &dev_attr_min_microvolts.attr,
3385 &dev_attr_max_microvolts.attr,
3386 &dev_attr_min_microamps.attr,
3387 &dev_attr_max_microamps.attr,
3388 &dev_attr_suspend_standby_state.attr,
3389 &dev_attr_suspend_mem_state.attr,
3390 &dev_attr_suspend_disk_state.attr,
3391 &dev_attr_suspend_standby_microvolts.attr,
3392 &dev_attr_suspend_mem_microvolts.attr,
3393 &dev_attr_suspend_disk_microvolts.attr,
3394 &dev_attr_suspend_standby_mode.attr,
3395 &dev_attr_suspend_mem_mode.attr,
3396 &dev_attr_suspend_disk_mode.attr,
3397 NULL
3398};
3399
3439/* 3400/*
3440 * To avoid cluttering sysfs (and memory) with useless state, only 3401 * To avoid cluttering sysfs (and memory) with useless state, only
3441 * create attributes that can be meaningfully displayed. 3402 * create attributes that can be meaningfully displayed.
3442 */ 3403 */
3443static int add_regulator_attributes(struct regulator_dev *rdev) 3404static umode_t regulator_attr_is_visible(struct kobject *kobj,
3405 struct attribute *attr, int idx)
3444{ 3406{
3445 struct device *dev = &rdev->dev; 3407 struct device *dev = kobj_to_dev(kobj);
3408 struct regulator_dev *rdev = container_of(dev, struct regulator_dev, dev);
3446 const struct regulator_ops *ops = rdev->desc->ops; 3409 const struct regulator_ops *ops = rdev->desc->ops;
3447 int status = 0; 3410 umode_t mode = attr->mode;
3411
3412 /* these three are always present */
3413 if (attr == &dev_attr_name.attr ||
3414 attr == &dev_attr_num_users.attr ||
3415 attr == &dev_attr_type.attr)
3416 return mode;
3448 3417
3449 /* some attributes need specific methods to be displayed */ 3418 /* some attributes need specific methods to be displayed */
3450 if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || 3419 if (attr == &dev_attr_microvolts.attr) {
3451 (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || 3420 if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
3452 (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || 3421 (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
3453 (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) { 3422 (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
3454 status = device_create_file(dev, &dev_attr_microvolts); 3423 (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1))
3455 if (status < 0) 3424 return mode;
3456 return status; 3425 return 0;
3457 }
3458 if (ops->get_current_limit) {
3459 status = device_create_file(dev, &dev_attr_microamps);
3460 if (status < 0)
3461 return status;
3462 }
3463 if (ops->get_mode) {
3464 status = device_create_file(dev, &dev_attr_opmode);
3465 if (status < 0)
3466 return status;
3467 }
3468 if (rdev->ena_pin || ops->is_enabled) {
3469 status = device_create_file(dev, &dev_attr_state);
3470 if (status < 0)
3471 return status;
3472 }
3473 if (ops->get_status) {
3474 status = device_create_file(dev, &dev_attr_status);
3475 if (status < 0)
3476 return status;
3477 }
3478 if (ops->get_bypass) {
3479 status = device_create_file(dev, &dev_attr_bypass);
3480 if (status < 0)
3481 return status;
3482 } 3426 }
3483 3427
3428 if (attr == &dev_attr_microamps.attr)
3429 return ops->get_current_limit ? mode : 0;
3430
3431 if (attr == &dev_attr_opmode.attr)
3432 return ops->get_mode ? mode : 0;
3433
3434 if (attr == &dev_attr_state.attr)
3435 return (rdev->ena_pin || ops->is_enabled) ? mode : 0;
3436
3437 if (attr == &dev_attr_status.attr)
3438 return ops->get_status ? mode : 0;
3439
3440 if (attr == &dev_attr_bypass.attr)
3441 return ops->get_bypass ? mode : 0;
3442
3484 /* some attributes are type-specific */ 3443 /* some attributes are type-specific */
3485 if (rdev->desc->type == REGULATOR_CURRENT) { 3444 if (attr == &dev_attr_requested_microamps.attr)
3486 status = device_create_file(dev, &dev_attr_requested_microamps); 3445 return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
3487 if (status < 0)
3488 return status;
3489 }
3490 3446
3491 /* all the other attributes exist to support constraints; 3447 /* all the other attributes exist to support constraints;
3492 * don't show them if there are no constraints, or if the 3448 * don't show them if there are no constraints, or if the
3493 * relevant supporting methods are missing. 3449 * relevant supporting methods are missing.
3494 */ 3450 */
3495 if (!rdev->constraints) 3451 if (!rdev->constraints)
3496 return status; 3452 return 0;
3497 3453
3498 /* constraints need specific supporting methods */ 3454 /* constraints need specific supporting methods */
3499 if (ops->set_voltage || ops->set_voltage_sel) { 3455 if (attr == &dev_attr_min_microvolts.attr ||
3500 status = device_create_file(dev, &dev_attr_min_microvolts); 3456 attr == &dev_attr_max_microvolts.attr)
3501 if (status < 0) 3457 return (ops->set_voltage || ops->set_voltage_sel) ? mode : 0;
3502 return status; 3458
3503 status = device_create_file(dev, &dev_attr_max_microvolts); 3459 if (attr == &dev_attr_min_microamps.attr ||
3504 if (status < 0) 3460 attr == &dev_attr_max_microamps.attr)
3505 return status; 3461 return ops->set_current_limit ? mode : 0;
3506 }
3507 if (ops->set_current_limit) {
3508 status = device_create_file(dev, &dev_attr_min_microamps);
3509 if (status < 0)
3510 return status;
3511 status = device_create_file(dev, &dev_attr_max_microamps);
3512 if (status < 0)
3513 return status;
3514 }
3515
3516 status = device_create_file(dev, &dev_attr_suspend_standby_state);
3517 if (status < 0)
3518 return status;
3519 status = device_create_file(dev, &dev_attr_suspend_mem_state);
3520 if (status < 0)
3521 return status;
3522 status = device_create_file(dev, &dev_attr_suspend_disk_state);
3523 if (status < 0)
3524 return status;
3525 3462
3526 if (ops->set_suspend_voltage) { 3463 if (attr == &dev_attr_suspend_standby_state.attr ||
3527 status = device_create_file(dev, 3464 attr == &dev_attr_suspend_mem_state.attr ||
3528 &dev_attr_suspend_standby_microvolts); 3465 attr == &dev_attr_suspend_disk_state.attr)
3529 if (status < 0) 3466 return mode;
3530 return status; 3467
3531 status = device_create_file(dev, 3468 if (attr == &dev_attr_suspend_standby_microvolts.attr ||
3532 &dev_attr_suspend_mem_microvolts); 3469 attr == &dev_attr_suspend_mem_microvolts.attr ||
3533 if (status < 0) 3470 attr == &dev_attr_suspend_disk_microvolts.attr)
3534 return status; 3471 return ops->set_suspend_voltage ? mode : 0;
3535 status = device_create_file(dev, 3472
3536 &dev_attr_suspend_disk_microvolts); 3473 if (attr == &dev_attr_suspend_standby_mode.attr ||
3537 if (status < 0) 3474 attr == &dev_attr_suspend_mem_mode.attr ||
3538 return status; 3475 attr == &dev_attr_suspend_disk_mode.attr)
3539 } 3476 return ops->set_suspend_mode ? mode : 0;
3540 3477
3541 if (ops->set_suspend_mode) { 3478 return mode;
3542 status = device_create_file(dev,
3543 &dev_attr_suspend_standby_mode);
3544 if (status < 0)
3545 return status;
3546 status = device_create_file(dev,
3547 &dev_attr_suspend_mem_mode);
3548 if (status < 0)
3549 return status;
3550 status = device_create_file(dev,
3551 &dev_attr_suspend_disk_mode);
3552 if (status < 0)
3553 return status;
3554 }
3555
3556 return status;
3557} 3479}
3558 3480
3481static const struct attribute_group regulator_dev_group = {
3482 .attrs = regulator_dev_attrs,
3483 .is_visible = regulator_attr_is_visible,
3484};
3485
3486static const struct attribute_group *regulator_dev_groups[] = {
3487 &regulator_dev_group,
3488 NULL
3489};
3490
3491static void regulator_dev_release(struct device *dev)
3492{
3493 struct regulator_dev *rdev = dev_get_drvdata(dev);
3494 kfree(rdev);
3495}
3496
3497static struct class regulator_class = {
3498 .name = "regulator",
3499 .dev_release = regulator_dev_release,
3500 .dev_groups = regulator_dev_groups,
3501};
3502
3559static void rdev_init_debugfs(struct regulator_dev *rdev) 3503static void rdev_init_debugfs(struct regulator_dev *rdev)
3560{ 3504{
3561 rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); 3505 rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
@@ -3575,7 +3519,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
3575/** 3519/**
3576 * regulator_register - register regulator 3520 * regulator_register - register regulator
3577 * @regulator_desc: regulator to register 3521 * @regulator_desc: regulator to register
3578 * @config: runtime configuration for regulator 3522 * @cfg: runtime configuration for regulator
3579 * 3523 *
3580 * Called by regulator drivers to register a regulator. 3524 * Called by regulator drivers to register a regulator.
3581 * Returns a valid pointer to struct regulator_dev on success 3525 * Returns a valid pointer to struct regulator_dev on success
@@ -3583,20 +3527,21 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
3583 */ 3527 */
3584struct regulator_dev * 3528struct regulator_dev *
3585regulator_register(const struct regulator_desc *regulator_desc, 3529regulator_register(const struct regulator_desc *regulator_desc,
3586 const struct regulator_config *config) 3530 const struct regulator_config *cfg)
3587{ 3531{
3588 const struct regulation_constraints *constraints = NULL; 3532 const struct regulation_constraints *constraints = NULL;
3589 const struct regulator_init_data *init_data; 3533 const struct regulator_init_data *init_data;
3590 static atomic_t regulator_no = ATOMIC_INIT(0); 3534 struct regulator_config *config = NULL;
3535 static atomic_t regulator_no = ATOMIC_INIT(-1);
3591 struct regulator_dev *rdev; 3536 struct regulator_dev *rdev;
3592 struct device *dev; 3537 struct device *dev;
3593 int ret, i; 3538 int ret, i;
3594 const char *supply = NULL; 3539 const char *supply = NULL;
3595 3540
3596 if (regulator_desc == NULL || config == NULL) 3541 if (regulator_desc == NULL || cfg == NULL)
3597 return ERR_PTR(-EINVAL); 3542 return ERR_PTR(-EINVAL);
3598 3543
3599 dev = config->dev; 3544 dev = cfg->dev;
3600 WARN_ON(!dev); 3545 WARN_ON(!dev);
3601 3546
3602 if (regulator_desc->name == NULL || regulator_desc->ops == NULL) 3547 if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
@@ -3626,7 +3571,17 @@ regulator_register(const struct regulator_desc *regulator_desc,
3626 if (rdev == NULL) 3571 if (rdev == NULL)
3627 return ERR_PTR(-ENOMEM); 3572 return ERR_PTR(-ENOMEM);
3628 3573
3629 init_data = regulator_of_get_init_data(dev, regulator_desc, 3574 /*
3575 * Duplicate the config so the driver could override it after
3576 * parsing init data.
3577 */
3578 config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
3579 if (config == NULL) {
3580 kfree(rdev);
3581 return ERR_PTR(-ENOMEM);
3582 }
3583
3584 init_data = regulator_of_get_init_data(dev, regulator_desc, config,
3630 &rdev->dev.of_node); 3585 &rdev->dev.of_node);
3631 if (!init_data) { 3586 if (!init_data) {
3632 init_data = config->init_data; 3587 init_data = config->init_data;
@@ -3660,8 +3615,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
3660 /* register with sysfs */ 3615 /* register with sysfs */
3661 rdev->dev.class = &regulator_class; 3616 rdev->dev.class = &regulator_class;
3662 rdev->dev.parent = dev; 3617 rdev->dev.parent = dev;
3663 dev_set_name(&rdev->dev, "regulator.%d", 3618 dev_set_name(&rdev->dev, "regulator.%lu",
3664 atomic_inc_return(&regulator_no) - 1); 3619 (unsigned long) atomic_inc_return(&regulator_no));
3665 ret = device_register(&rdev->dev); 3620 ret = device_register(&rdev->dev);
3666 if (ret != 0) { 3621 if (ret != 0) {
3667 put_device(&rdev->dev); 3622 put_device(&rdev->dev);
@@ -3694,11 +3649,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
3694 if (ret < 0) 3649 if (ret < 0)
3695 goto scrub; 3650 goto scrub;
3696 3651
3697 /* add attributes supported by this regulator */
3698 ret = add_regulator_attributes(rdev);
3699 if (ret < 0)
3700 goto scrub;
3701
3702 if (init_data && init_data->supply_regulator) 3652 if (init_data && init_data->supply_regulator)
3703 supply = init_data->supply_regulator; 3653 supply = init_data->supply_regulator;
3704 else if (regulator_desc->supply_name) 3654 else if (regulator_desc->supply_name)
@@ -3754,6 +3704,7 @@ add_dev:
3754 rdev_init_debugfs(rdev); 3704 rdev_init_debugfs(rdev);
3755out: 3705out:
3756 mutex_unlock(&regulator_list_mutex); 3706 mutex_unlock(&regulator_list_mutex);
3707 kfree(config);
3757 return rdev; 3708 return rdev;
3758 3709
3759unset_supplies: 3710unset_supplies:
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index c78d2106d6cb..01343419555e 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -24,6 +24,7 @@
24#include <linux/regmap.h> 24#include <linux/regmap.h>
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/of_gpio.h>
27#include <linux/regulator/of_regulator.h> 28#include <linux/regulator/of_regulator.h>
28#include <linux/regulator/da9211.h> 29#include <linux/regulator/da9211.h>
29#include "da9211-regulator.h" 30#include "da9211-regulator.h"
@@ -276,7 +277,10 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
276 continue; 277 continue;
277 278
278 pdata->init_data[n] = da9211_matches[i].init_data; 279 pdata->init_data[n] = da9211_matches[i].init_data;
279 280 pdata->reg_node[n] = da9211_matches[i].of_node;
281 pdata->gpio_ren[n] =
282 of_get_named_gpio(da9211_matches[i].of_node,
283 "enable-gpios", 0);
280 n++; 284 n++;
281 } 285 }
282 286
@@ -364,7 +368,15 @@ static int da9211_regulator_init(struct da9211 *chip)
364 config.dev = chip->dev; 368 config.dev = chip->dev;
365 config.driver_data = chip; 369 config.driver_data = chip;
366 config.regmap = chip->regmap; 370 config.regmap = chip->regmap;
367 config.of_node = chip->dev->of_node; 371 config.of_node = chip->pdata->reg_node[i];
372
373 if (gpio_is_valid(chip->pdata->gpio_ren[i])) {
374 config.ena_gpio = chip->pdata->gpio_ren[i];
375 config.ena_gpio_initialized = true;
376 } else {
377 config.ena_gpio = -EINVAL;
378 config.ena_gpio_initialized = false;
379 }
368 380
369 chip->rdev[i] = devm_regulator_register(chip->dev, 381 chip->rdev[i] = devm_regulator_register(chip->dev,
370 &da9211_regulators[i], &config); 382 &da9211_regulators[i], &config);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 6c43ab2d5121..3c25db89a021 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -147,7 +147,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
147 return REGULATOR_MODE_NORMAL; 147 return REGULATOR_MODE_NORMAL;
148} 148}
149 149
150static int slew_rates[] = { 150static const int slew_rates[] = {
151 64000, 151 64000,
152 32000, 152 32000,
153 16000, 153 16000,
@@ -296,7 +296,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
296 return PTR_ERR_OR_ZERO(di->rdev); 296 return PTR_ERR_OR_ZERO(di->rdev);
297} 297}
298 298
299static struct regmap_config fan53555_regmap_config = { 299static const struct regmap_config fan53555_regmap_config = {
300 .reg_bits = 8, 300 .reg_bits = 8,
301 .val_bits = 8, 301 .val_bits = 8,
302}; 302};
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 80ba2a35a04b..c74ac8734023 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -38,11 +38,13 @@ struct regulator {
38#ifdef CONFIG_OF 38#ifdef CONFIG_OF
39struct regulator_init_data *regulator_of_get_init_data(struct device *dev, 39struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
40 const struct regulator_desc *desc, 40 const struct regulator_desc *desc,
41 struct regulator_config *config,
41 struct device_node **node); 42 struct device_node **node);
42#else 43#else
43static inline struct regulator_init_data * 44static inline struct regulator_init_data *
44regulator_of_get_init_data(struct device *dev, 45regulator_of_get_init_data(struct device *dev,
45 const struct regulator_desc *desc, 46 const struct regulator_desc *desc,
47 struct regulator_config *config,
46 struct device_node **node) 48 struct device_node **node)
47{ 49{
48 return NULL; 50 return NULL;
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 92fefd98da58..6e3a15fe00f1 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -177,8 +177,10 @@ static int isl9305_i2c_probe(struct i2c_client *i2c,
177 177
178#ifdef CONFIG_OF 178#ifdef CONFIG_OF
179static const struct of_device_id isl9305_dt_ids[] = { 179static const struct of_device_id isl9305_dt_ids[] = {
180 { .compatible = "isl,isl9305" }, 180 { .compatible = "isl,isl9305" }, /* for backward compat., don't use */
181 { .compatible = "isl,isl9305h" }, 181 { .compatible = "isil,isl9305" },
182 { .compatible = "isl,isl9305h" }, /* for backward compat., don't use */
183 { .compatible = "isil,isl9305h" },
182 {}, 184 {},
183}; 185};
184#endif 186#endif
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 021d64d856bb..3de328ab41f3 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -106,7 +106,6 @@ struct lp872x {
106 struct device *dev; 106 struct device *dev;
107 enum lp872x_id chipid; 107 enum lp872x_id chipid;
108 struct lp872x_platform_data *pdata; 108 struct lp872x_platform_data *pdata;
109 struct regulator_dev **regulators;
110 int num_regulators; 109 int num_regulators;
111 enum lp872x_dvs_state dvs_pin; 110 enum lp872x_dvs_state dvs_pin;
112 int dvs_gpio; 111 int dvs_gpio;
@@ -801,8 +800,6 @@ static int lp872x_regulator_register(struct lp872x *lp)
801 dev_err(lp->dev, "regulator register err"); 800 dev_err(lp->dev, "regulator register err");
802 return PTR_ERR(rdev); 801 return PTR_ERR(rdev);
803 } 802 }
804
805 *(lp->regulators + i) = rdev;
806 } 803 }
807 804
808 return 0; 805 return 0;
@@ -906,7 +903,7 @@ static struct lp872x_platform_data
906static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) 903static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
907{ 904{
908 struct lp872x *lp; 905 struct lp872x *lp;
909 int ret, size, num_regulators; 906 int ret;
910 const int lp872x_num_regulators[] = { 907 const int lp872x_num_regulators[] = {
911 [LP8720] = LP8720_NUM_REGULATORS, 908 [LP8720] = LP8720_NUM_REGULATORS,
912 [LP8725] = LP8725_NUM_REGULATORS, 909 [LP8725] = LP8725_NUM_REGULATORS,
@@ -918,38 +915,27 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
918 915
919 lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL); 916 lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
920 if (!lp) 917 if (!lp)
921 goto err_mem; 918 return -ENOMEM;
922
923 num_regulators = lp872x_num_regulators[id->driver_data];
924 size = sizeof(struct regulator_dev *) * num_regulators;
925 919
926 lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL); 920 lp->num_regulators = lp872x_num_regulators[id->driver_data];
927 if (!lp->regulators)
928 goto err_mem;
929 921
930 lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config); 922 lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
931 if (IS_ERR(lp->regmap)) { 923 if (IS_ERR(lp->regmap)) {
932 ret = PTR_ERR(lp->regmap); 924 ret = PTR_ERR(lp->regmap);
933 dev_err(&cl->dev, "regmap init i2c err: %d\n", ret); 925 dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
934 goto err_dev; 926 return ret;
935 } 927 }
936 928
937 lp->dev = &cl->dev; 929 lp->dev = &cl->dev;
938 lp->pdata = dev_get_platdata(&cl->dev); 930 lp->pdata = dev_get_platdata(&cl->dev);
939 lp->chipid = id->driver_data; 931 lp->chipid = id->driver_data;
940 lp->num_regulators = num_regulators;
941 i2c_set_clientdata(cl, lp); 932 i2c_set_clientdata(cl, lp);
942 933
943 ret = lp872x_config(lp); 934 ret = lp872x_config(lp);
944 if (ret) 935 if (ret)
945 goto err_dev; 936 return ret;
946 937
947 return lp872x_regulator_register(lp); 938 return lp872x_regulator_register(lp);
948
949err_mem:
950 return -ENOMEM;
951err_dev:
952 return ret;
953} 939}
954 940
955static const struct of_device_id lp872x_dt_ids[] = { 941static const struct of_device_id lp872x_dt_ids[] = {
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index bf9a44c5fdd2..b3678d289619 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -103,6 +103,8 @@ static struct regulator_ops max14577_charger_ops = {
103static const struct regulator_desc max14577_supported_regulators[] = { 103static const struct regulator_desc max14577_supported_regulators[] = {
104 [MAX14577_SAFEOUT] = { 104 [MAX14577_SAFEOUT] = {
105 .name = "SAFEOUT", 105 .name = "SAFEOUT",
106 .of_match = of_match_ptr("SAFEOUT"),
107 .regulators_node = of_match_ptr("regulators"),
106 .id = MAX14577_SAFEOUT, 108 .id = MAX14577_SAFEOUT,
107 .ops = &max14577_safeout_ops, 109 .ops = &max14577_safeout_ops,
108 .type = REGULATOR_VOLTAGE, 110 .type = REGULATOR_VOLTAGE,
@@ -114,6 +116,8 @@ static const struct regulator_desc max14577_supported_regulators[] = {
114 }, 116 },
115 [MAX14577_CHARGER] = { 117 [MAX14577_CHARGER] = {
116 .name = "CHARGER", 118 .name = "CHARGER",
119 .of_match = of_match_ptr("CHARGER"),
120 .regulators_node = of_match_ptr("regulators"),
117 .id = MAX14577_CHARGER, 121 .id = MAX14577_CHARGER,
118 .ops = &max14577_charger_ops, 122 .ops = &max14577_charger_ops,
119 .type = REGULATOR_CURRENT, 123 .type = REGULATOR_CURRENT,
@@ -137,6 +141,8 @@ static struct regulator_ops max77836_ldo_ops = {
137static const struct regulator_desc max77836_supported_regulators[] = { 141static const struct regulator_desc max77836_supported_regulators[] = {
138 [MAX14577_SAFEOUT] = { 142 [MAX14577_SAFEOUT] = {
139 .name = "SAFEOUT", 143 .name = "SAFEOUT",
144 .of_match = of_match_ptr("SAFEOUT"),
145 .regulators_node = of_match_ptr("regulators"),
140 .id = MAX14577_SAFEOUT, 146 .id = MAX14577_SAFEOUT,
141 .ops = &max14577_safeout_ops, 147 .ops = &max14577_safeout_ops,
142 .type = REGULATOR_VOLTAGE, 148 .type = REGULATOR_VOLTAGE,
@@ -148,6 +154,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
148 }, 154 },
149 [MAX14577_CHARGER] = { 155 [MAX14577_CHARGER] = {
150 .name = "CHARGER", 156 .name = "CHARGER",
157 .of_match = of_match_ptr("CHARGER"),
158 .regulators_node = of_match_ptr("regulators"),
151 .id = MAX14577_CHARGER, 159 .id = MAX14577_CHARGER,
152 .ops = &max14577_charger_ops, 160 .ops = &max14577_charger_ops,
153 .type = REGULATOR_CURRENT, 161 .type = REGULATOR_CURRENT,
@@ -157,6 +165,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
157 }, 165 },
158 [MAX77836_LDO1] = { 166 [MAX77836_LDO1] = {
159 .name = "LDO1", 167 .name = "LDO1",
168 .of_match = of_match_ptr("LDO1"),
169 .regulators_node = of_match_ptr("regulators"),
160 .id = MAX77836_LDO1, 170 .id = MAX77836_LDO1,
161 .ops = &max77836_ldo_ops, 171 .ops = &max77836_ldo_ops,
162 .type = REGULATOR_VOLTAGE, 172 .type = REGULATOR_VOLTAGE,
@@ -171,6 +181,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
171 }, 181 },
172 [MAX77836_LDO2] = { 182 [MAX77836_LDO2] = {
173 .name = "LDO2", 183 .name = "LDO2",
184 .of_match = of_match_ptr("LDO2"),
185 .regulators_node = of_match_ptr("regulators"),
174 .id = MAX77836_LDO2, 186 .id = MAX77836_LDO2,
175 .ops = &max77836_ldo_ops, 187 .ops = &max77836_ldo_ops,
176 .type = REGULATOR_VOLTAGE, 188 .type = REGULATOR_VOLTAGE,
@@ -198,43 +210,6 @@ static struct of_regulator_match max77836_regulator_matches[] = {
198 { .name = "LDO2", }, 210 { .name = "LDO2", },
199}; 211};
200 212
201static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
202 enum maxim_device_type dev_type)
203{
204 int ret;
205 struct device_node *np;
206 struct of_regulator_match *regulator_matches;
207 unsigned int regulator_matches_size;
208
209 np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
210 if (!np) {
211 dev_err(&pdev->dev, "Failed to get child OF node for regulators\n");
212 return -EINVAL;
213 }
214
215 switch (dev_type) {
216 case MAXIM_DEVICE_TYPE_MAX77836:
217 regulator_matches = max77836_regulator_matches;
218 regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches);
219 break;
220 case MAXIM_DEVICE_TYPE_MAX14577:
221 default:
222 regulator_matches = max14577_regulator_matches;
223 regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches);
224 }
225
226 ret = of_regulator_match(&pdev->dev, np, regulator_matches,
227 regulator_matches_size);
228 if (ret < 0)
229 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
230 else
231 ret = 0;
232
233 of_node_put(np);
234
235 return ret;
236}
237
238static inline struct regulator_init_data *match_init_data(int index, 213static inline struct regulator_init_data *match_init_data(int index,
239 enum maxim_device_type dev_type) 214 enum maxim_device_type dev_type)
240{ 215{
@@ -261,11 +236,6 @@ static inline struct device_node *match_of_node(int index,
261 } 236 }
262} 237}
263#else /* CONFIG_OF */ 238#else /* CONFIG_OF */
264static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
265 enum maxim_device_type dev_type)
266{
267 return 0;
268}
269static inline struct regulator_init_data *match_init_data(int index, 239static inline struct regulator_init_data *match_init_data(int index,
270 enum maxim_device_type dev_type) 240 enum maxim_device_type dev_type)
271{ 241{
@@ -308,16 +278,12 @@ static int max14577_regulator_probe(struct platform_device *pdev)
308{ 278{
309 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent); 279 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
310 struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); 280 struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
311 int i, ret; 281 int i, ret = 0;
312 struct regulator_config config = {}; 282 struct regulator_config config = {};
313 const struct regulator_desc *supported_regulators; 283 const struct regulator_desc *supported_regulators;
314 unsigned int supported_regulators_size; 284 unsigned int supported_regulators_size;
315 enum maxim_device_type dev_type = max14577->dev_type; 285 enum maxim_device_type dev_type = max14577->dev_type;
316 286
317 ret = max14577_regulator_dt_parse_pdata(pdev, dev_type);
318 if (ret)
319 return ret;
320
321 switch (dev_type) { 287 switch (dev_type) {
322 case MAXIM_DEVICE_TYPE_MAX77836: 288 case MAXIM_DEVICE_TYPE_MAX77836:
323 supported_regulators = max77836_supported_regulators; 289 supported_regulators = max77836_supported_regulators;
@@ -329,7 +295,7 @@ static int max14577_regulator_probe(struct platform_device *pdev)
329 supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators); 295 supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators);
330 } 296 }
331 297
332 config.dev = &pdev->dev; 298 config.dev = max14577->dev;
333 config.driver_data = max14577; 299 config.driver_data = max14577;
334 300
335 for (i = 0; i < supported_regulators_size; i++) { 301 for (i = 0; i < supported_regulators_size; i++) {
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index 10d206266ac2..15fb1416bfbd 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -26,6 +26,7 @@
26#include <linux/bug.h> 26#include <linux/bug.h>
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_gpio.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/regulator/driver.h> 32#include <linux/regulator/driver.h>
@@ -46,6 +47,11 @@
46#define MAX77686_DVS_UVSTEP 12500 47#define MAX77686_DVS_UVSTEP 12500
47 48
48/* 49/*
50 * Value for configuring buck[89] and LDO{20,21,22} as GPIO control.
51 * It is the same as 'off' for other regulators.
52 */
53#define MAX77686_GPIO_CONTROL 0x0
54/*
49 * Values used for configuring LDOs and bucks. 55 * Values used for configuring LDOs and bucks.
50 * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26 56 * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26
51 */ 57 */
@@ -82,6 +88,8 @@ enum max77686_ramp_rate {
82}; 88};
83 89
84struct max77686_data { 90struct max77686_data {
91 u64 gpio_enabled:MAX77686_REGULATORS;
92
85 /* Array indexed by regulator id */ 93 /* Array indexed by regulator id */
86 unsigned int opmode[MAX77686_REGULATORS]; 94 unsigned int opmode[MAX77686_REGULATORS];
87}; 95};
@@ -100,6 +108,26 @@ static unsigned int max77686_get_opmode_shift(int id)
100 } 108 }
101} 109}
102 110
111/*
112 * When regulator is configured for GPIO control then it
113 * replaces "normal" mode. Any change from low power mode to normal
114 * should actually change to GPIO control.
115 * Map normal mode to proper value for such regulators.
116 */
117static unsigned int max77686_map_normal_mode(struct max77686_data *max77686,
118 int id)
119{
120 switch (id) {
121 case MAX77686_BUCK8:
122 case MAX77686_BUCK9:
123 case MAX77686_LDO20 ... MAX77686_LDO22:
124 if (max77686->gpio_enabled & (1 << id))
125 return MAX77686_GPIO_CONTROL;
126 }
127
128 return MAX77686_NORMAL;
129}
130
103/* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */ 131/* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */
104static int max77686_set_suspend_disable(struct regulator_dev *rdev) 132static int max77686_set_suspend_disable(struct regulator_dev *rdev)
105{ 133{
@@ -136,7 +164,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
136 val = MAX77686_LDO_LOWPOWER_PWRREQ; 164 val = MAX77686_LDO_LOWPOWER_PWRREQ;
137 break; 165 break;
138 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 166 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
139 val = MAX77686_NORMAL; 167 val = max77686_map_normal_mode(max77686, id);
140 break; 168 break;
141 default: 169 default:
142 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", 170 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -160,7 +188,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
160{ 188{
161 unsigned int val; 189 unsigned int val;
162 struct max77686_data *max77686 = rdev_get_drvdata(rdev); 190 struct max77686_data *max77686 = rdev_get_drvdata(rdev);
163 int ret; 191 int ret, id = rdev_get_id(rdev);
164 192
165 switch (mode) { 193 switch (mode) {
166 case REGULATOR_MODE_STANDBY: /* switch off */ 194 case REGULATOR_MODE_STANDBY: /* switch off */
@@ -170,7 +198,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
170 val = MAX77686_LDO_LOWPOWER_PWRREQ; 198 val = MAX77686_LDO_LOWPOWER_PWRREQ;
171 break; 199 break;
172 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ 200 case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */
173 val = MAX77686_NORMAL; 201 val = max77686_map_normal_mode(max77686, id);
174 break; 202 break;
175 default: 203 default:
176 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", 204 pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -184,7 +212,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
184 if (ret) 212 if (ret)
185 return ret; 213 return ret;
186 214
187 max77686->opmode[rdev_get_id(rdev)] = val; 215 max77686->opmode[id] = val;
188 return 0; 216 return 0;
189} 217}
190 218
@@ -197,7 +225,7 @@ static int max77686_enable(struct regulator_dev *rdev)
197 shift = max77686_get_opmode_shift(id); 225 shift = max77686_get_opmode_shift(id);
198 226
199 if (max77686->opmode[id] == MAX77686_OFF_PWRREQ) 227 if (max77686->opmode[id] == MAX77686_OFF_PWRREQ)
200 max77686->opmode[id] = MAX77686_NORMAL; 228 max77686->opmode[id] = max77686_map_normal_mode(max77686, id);
201 229
202 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, 230 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
203 rdev->desc->enable_mask, 231 rdev->desc->enable_mask,
@@ -229,6 +257,36 @@ static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
229 MAX77686_RAMP_RATE_MASK, ramp_value << 6); 257 MAX77686_RAMP_RATE_MASK, ramp_value << 6);
230} 258}
231 259
260static int max77686_of_parse_cb(struct device_node *np,
261 const struct regulator_desc *desc,
262 struct regulator_config *config)
263{
264 struct max77686_data *max77686 = config->driver_data;
265
266 switch (desc->id) {
267 case MAX77686_BUCK8:
268 case MAX77686_BUCK9:
269 case MAX77686_LDO20 ... MAX77686_LDO22:
270 config->ena_gpio = of_get_named_gpio(np,
271 "maxim,ena-gpios", 0);
272 config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
273 config->ena_gpio_initialized = true;
274 break;
275 default:
276 return 0;
277 }
278
279 if (gpio_is_valid(config->ena_gpio)) {
280 max77686->gpio_enabled |= (1 << desc->id);
281
282 return regmap_update_bits(config->regmap, desc->enable_reg,
283 desc->enable_mask,
284 MAX77686_GPIO_CONTROL);
285 }
286
287 return 0;
288}
289
232static struct regulator_ops max77686_ops = { 290static struct regulator_ops max77686_ops = {
233 .list_voltage = regulator_list_voltage_linear, 291 .list_voltage = regulator_list_voltage_linear,
234 .map_voltage = regulator_map_voltage_linear, 292 .map_voltage = regulator_map_voltage_linear,
@@ -283,6 +341,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
283 .name = "LDO"#num, \ 341 .name = "LDO"#num, \
284 .of_match = of_match_ptr("LDO"#num), \ 342 .of_match = of_match_ptr("LDO"#num), \
285 .regulators_node = of_match_ptr("voltage-regulators"), \ 343 .regulators_node = of_match_ptr("voltage-regulators"), \
344 .of_parse_cb = max77686_of_parse_cb, \
286 .id = MAX77686_LDO##num, \ 345 .id = MAX77686_LDO##num, \
287 .ops = &max77686_ops, \ 346 .ops = &max77686_ops, \
288 .type = REGULATOR_VOLTAGE, \ 347 .type = REGULATOR_VOLTAGE, \
@@ -355,6 +414,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
355 .name = "BUCK"#num, \ 414 .name = "BUCK"#num, \
356 .of_match = of_match_ptr("BUCK"#num), \ 415 .of_match = of_match_ptr("BUCK"#num), \
357 .regulators_node = of_match_ptr("voltage-regulators"), \ 416 .regulators_node = of_match_ptr("voltage-regulators"), \
417 .of_parse_cb = max77686_of_parse_cb, \
358 .id = MAX77686_BUCK##num, \ 418 .id = MAX77686_BUCK##num, \
359 .ops = &max77686_ops, \ 419 .ops = &max77686_ops, \
360 .type = REGULATOR_VOLTAGE, \ 420 .type = REGULATOR_VOLTAGE, \
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c
new file mode 100644
index 000000000000..c132ef527cdd
--- /dev/null
+++ b/drivers/regulator/max77843.c
@@ -0,0 +1,227 @@
1/*
2 * max77843.c - Regulator driver for the Maxim MAX77843
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 * Author: Jaewon Kim <jaewon02.kim@samsung.com>
6 * Author: Beomho Seo <beomho.seo@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/regulator/driver.h>
17#include <linux/regulator/machine.h>
18#include <linux/mfd/max77843-private.h>
19#include <linux/regulator/of_regulator.h>
20
21enum max77843_regulator_type {
22 MAX77843_SAFEOUT1 = 0,
23 MAX77843_SAFEOUT2,
24 MAX77843_CHARGER,
25
26 MAX77843_NUM,
27};
28
29static const unsigned int max77843_safeout_voltage_table[] = {
30 4850000,
31 4900000,
32 4950000,
33 3300000,
34};
35
36static int max77843_reg_is_enabled(struct regulator_dev *rdev)
37{
38 struct regmap *regmap = rdev->regmap;
39 int ret;
40 unsigned int reg;
41
42 ret = regmap_read(regmap, rdev->desc->enable_reg, &reg);
43 if (ret) {
44 dev_err(&rdev->dev, "Fialed to read charger register\n");
45 return ret;
46 }
47
48 return (reg & rdev->desc->enable_mask) == rdev->desc->enable_mask;
49}
50
51static int max77843_reg_get_current_limit(struct regulator_dev *rdev)
52{
53 struct regmap *regmap = rdev->regmap;
54 unsigned int chg_min_uA = rdev->constraints->min_uA;
55 unsigned int chg_max_uA = rdev->constraints->max_uA;
56 unsigned int val;
57 int ret;
58 unsigned int reg, sel;
59
60 ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, &reg);
61 if (ret) {
62 dev_err(&rdev->dev, "Failed to read charger register\n");
63 return ret;
64 }
65
66 sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK;
67
68 if (sel < 0x03)
69 sel = 0;
70 else
71 sel -= 2;
72
73 val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel;
74 if (val > chg_max_uA)
75 return -EINVAL;
76
77 return val;
78}
79
80static int max77843_reg_set_current_limit(struct regulator_dev *rdev,
81 int min_uA, int max_uA)
82{
83 struct regmap *regmap = rdev->regmap;
84 unsigned int chg_min_uA = rdev->constraints->min_uA;
85 int sel = 0;
86
87 while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA)
88 sel++;
89
90 if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA)
91 return -EINVAL;
92
93 sel += 2;
94
95 return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel);
96}
97
98static struct regulator_ops max77843_charger_ops = {
99 .is_enabled = max77843_reg_is_enabled,
100 .enable = regulator_enable_regmap,
101 .disable = regulator_disable_regmap,
102 .get_current_limit = max77843_reg_get_current_limit,
103 .set_current_limit = max77843_reg_set_current_limit,
104};
105
106static struct regulator_ops max77843_regulator_ops = {
107 .is_enabled = regulator_is_enabled_regmap,
108 .enable = regulator_enable_regmap,
109 .disable = regulator_disable_regmap,
110 .list_voltage = regulator_list_voltage_table,
111 .get_voltage_sel = regulator_get_voltage_sel_regmap,
112 .set_voltage_sel = regulator_set_voltage_sel_regmap,
113};
114
115static const struct regulator_desc max77843_supported_regulators[] = {
116 [MAX77843_SAFEOUT1] = {
117 .name = "SAFEOUT1",
118 .id = MAX77843_SAFEOUT1,
119 .ops = &max77843_regulator_ops,
120 .of_match = of_match_ptr("SAFEOUT1"),
121 .regulators_node = of_match_ptr("regulators"),
122 .type = REGULATOR_VOLTAGE,
123 .owner = THIS_MODULE,
124 .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table),
125 .volt_table = max77843_safeout_voltage_table,
126 .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
127 .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1,
128 .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
129 .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK,
130 },
131 [MAX77843_SAFEOUT2] = {
132 .name = "SAFEOUT2",
133 .id = MAX77843_SAFEOUT2,
134 .ops = &max77843_regulator_ops,
135 .of_match = of_match_ptr("SAFEOUT2"),
136 .regulators_node = of_match_ptr("regulators"),
137 .type = REGULATOR_VOLTAGE,
138 .owner = THIS_MODULE,
139 .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table),
140 .volt_table = max77843_safeout_voltage_table,
141 .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
142 .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2,
143 .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL,
144 .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK,
145 },
146 [MAX77843_CHARGER] = {
147 .name = "CHARGER",
148 .id = MAX77843_CHARGER,
149 .ops = &max77843_charger_ops,
150 .of_match = of_match_ptr("CHARGER"),
151 .regulators_node = of_match_ptr("regulators"),
152 .type = REGULATOR_CURRENT,
153 .owner = THIS_MODULE,
154 .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00,
155 .enable_mask = MAX77843_CHG_MASK,
156 },
157};
158
159static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id)
160{
161 switch (reg_id) {
162 case MAX77843_SAFEOUT1:
163 case MAX77843_SAFEOUT2:
164 return max77843->regmap;
165 case MAX77843_CHARGER:
166 return max77843->regmap_chg;
167 default:
168 return max77843->regmap;
169 }
170}
171
172static int max77843_regulator_probe(struct platform_device *pdev)
173{
174 struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
175 struct regulator_config config = {};
176 int i;
177
178 config.dev = max77843->dev;
179 config.driver_data = max77843;
180
181 for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) {
182 struct regulator_dev *regulator;
183
184 config.regmap = max77843_get_regmap(max77843,
185 max77843_supported_regulators[i].id);
186
187 regulator = devm_regulator_register(&pdev->dev,
188 &max77843_supported_regulators[i], &config);
189 if (IS_ERR(regulator)) {
190 dev_err(&pdev->dev,
191 "Failed to regiser regulator-%d\n", i);
192 return PTR_ERR(regulator);
193 }
194 }
195
196 return 0;
197}
198
199static const struct platform_device_id max77843_regulator_id[] = {
200 { "max77843-regulator", },
201 { /* sentinel */ },
202};
203
204static struct platform_driver max77843_regulator_driver = {
205 .driver = {
206 .name = "max77843-regulator",
207 },
208 .probe = max77843_regulator_probe,
209 .id_table = max77843_regulator_id,
210};
211
212static int __init max77843_regulator_init(void)
213{
214 return platform_driver_register(&max77843_regulator_driver);
215}
216subsys_initcall(max77843_regulator_init);
217
218static void __exit max77843_regulator_exit(void)
219{
220 platform_driver_unregister(&max77843_regulator_driver);
221}
222module_exit(max77843_regulator_exit);
223
224MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
225MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
226MODULE_DESCRIPTION("Maxim MAX77843 regulator driver");
227MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index c8bddcc8f911..81229579ece9 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -115,7 +115,7 @@ static unsigned int max8649_get_mode(struct regulator_dev *rdev)
115 return REGULATOR_MODE_NORMAL; 115 return REGULATOR_MODE_NORMAL;
116} 116}
117 117
118static struct regulator_ops max8649_dcdc_ops = { 118static const struct regulator_ops max8649_dcdc_ops = {
119 .set_voltage_sel = regulator_set_voltage_sel_regmap, 119 .set_voltage_sel = regulator_set_voltage_sel_regmap,
120 .get_voltage_sel = regulator_get_voltage_sel_regmap, 120 .get_voltage_sel = regulator_get_voltage_sel_regmap,
121 .list_voltage = regulator_list_voltage_linear, 121 .list_voltage = regulator_list_voltage_linear,
@@ -143,7 +143,7 @@ static struct regulator_desc dcdc_desc = {
143 .enable_is_inverted = true, 143 .enable_is_inverted = true,
144}; 144};
145 145
146static struct regmap_config max8649_regmap_config = { 146static const struct regmap_config max8649_regmap_config = {
147 .reg_bits = 8, 147 .reg_bits = 8,
148 .val_bits = 8, 148 .val_bits = 8,
149}; 149};
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
new file mode 100644
index 000000000000..a5b2f4762677
--- /dev/null
+++ b/drivers/regulator/mt6397-regulator.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Flora Fu <flora.fu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/regmap.h>
19#include <linux/mfd/mt6397/core.h>
20#include <linux/mfd/mt6397/registers.h>
21#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h>
23#include <linux/regulator/mt6397-regulator.h>
24#include <linux/regulator/of_regulator.h>
25
26/*
27 * MT6397 regulators' information
28 *
29 * @desc: standard fields of regulator description.
30 * @qi: Mask for query enable signal status of regulators
31 * @vselon_reg: Register sections for hardware control mode of bucks
32 * @vselctrl_reg: Register for controlling the buck control mode.
33 * @vselctrl_mask: Mask for query buck's voltage control mode.
34 */
35struct mt6397_regulator_info {
36 struct regulator_desc desc;
37 u32 qi;
38 u32 vselon_reg;
39 u32 vselctrl_reg;
40 u32 vselctrl_mask;
41};
42
43#define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
44 vosel, vosel_mask, voselon, vosel_ctrl) \
45[MT6397_ID_##vreg] = { \
46 .desc = { \
47 .name = #vreg, \
48 .of_match = of_match_ptr(match), \
49 .ops = &mt6397_volt_range_ops, \
50 .type = REGULATOR_VOLTAGE, \
51 .id = MT6397_ID_##vreg, \
52 .owner = THIS_MODULE, \
53 .n_voltages = (max - min)/step + 1, \
54 .linear_ranges = volt_ranges, \
55 .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
56 .vsel_reg = vosel, \
57 .vsel_mask = vosel_mask, \
58 .enable_reg = enreg, \
59 .enable_mask = BIT(0), \
60 }, \
61 .qi = BIT(13), \
62 .vselon_reg = voselon, \
63 .vselctrl_reg = vosel_ctrl, \
64 .vselctrl_mask = BIT(1), \
65}
66
67#define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
68 vosel_mask) \
69[MT6397_ID_##vreg] = { \
70 .desc = { \
71 .name = #vreg, \
72 .of_match = of_match_ptr(match), \
73 .ops = &mt6397_volt_table_ops, \
74 .type = REGULATOR_VOLTAGE, \
75 .id = MT6397_ID_##vreg, \
76 .owner = THIS_MODULE, \
77 .n_voltages = ARRAY_SIZE(ldo_volt_table), \
78 .volt_table = ldo_volt_table, \
79 .vsel_reg = vosel, \
80 .vsel_mask = vosel_mask, \
81 .enable_reg = enreg, \
82 .enable_mask = BIT(enbit), \
83 }, \
84 .qi = BIT(15), \
85}
86
87#define MT6397_REG_FIXED(match, vreg, enreg, enbit, volt) \
88[MT6397_ID_##vreg] = { \
89 .desc = { \
90 .name = #vreg, \
91 .of_match = of_match_ptr(match), \
92 .ops = &mt6397_volt_fixed_ops, \
93 .type = REGULATOR_VOLTAGE, \
94 .id = MT6397_ID_##vreg, \
95 .owner = THIS_MODULE, \
96 .n_voltages = 1, \
97 .enable_reg = enreg, \
98 .enable_mask = BIT(enbit), \
99 .min_uV = volt, \
100 }, \
101 .qi = BIT(15), \
102}
103
104static const struct regulator_linear_range buck_volt_range1[] = {
105 REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
106};
107
108static const struct regulator_linear_range buck_volt_range2[] = {
109 REGULATOR_LINEAR_RANGE(800000, 0, 0x7f, 6250),
110};
111
112static const struct regulator_linear_range buck_volt_range3[] = {
113 REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000),
114};
115
116static const u32 ldo_volt_table1[] = {
117 1500000, 1800000, 2500000, 2800000,
118};
119
120static const u32 ldo_volt_table2[] = {
121 1800000, 3300000,
122};
123
124static const u32 ldo_volt_table3[] = {
125 3000000, 3300000,
126};
127
128static const u32 ldo_volt_table4[] = {
129 1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
130};
131
132static const u32 ldo_volt_table5[] = {
133 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
134};
135
136static const u32 ldo_volt_table5_v2[] = {
137 1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
138};
139
140static const u32 ldo_volt_table6[] = {
141 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
142};
143
144static const u32 ldo_volt_table7[] = {
145 1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
146};
147
148static int mt6397_get_status(struct regulator_dev *rdev)
149{
150 int ret;
151 u32 regval;
152 struct mt6397_regulator_info *info = rdev_get_drvdata(rdev);
153
154 ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
155 if (ret != 0) {
156 dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
157 return ret;
158 }
159
160 return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
161}
162
163static struct regulator_ops mt6397_volt_range_ops = {
164 .list_voltage = regulator_list_voltage_linear_range,
165 .map_voltage = regulator_map_voltage_linear_range,
166 .set_voltage_sel = regulator_set_voltage_sel_regmap,
167 .get_voltage_sel = regulator_get_voltage_sel_regmap,
168 .set_voltage_time_sel = regulator_set_voltage_time_sel,
169 .enable = regulator_enable_regmap,
170 .disable = regulator_disable_regmap,
171 .is_enabled = regulator_is_enabled_regmap,
172 .get_status = mt6397_get_status,
173};
174
175static struct regulator_ops mt6397_volt_table_ops = {
176 .list_voltage = regulator_list_voltage_table,
177 .map_voltage = regulator_map_voltage_iterate,
178 .set_voltage_sel = regulator_set_voltage_sel_regmap,
179 .get_voltage_sel = regulator_get_voltage_sel_regmap,
180 .set_voltage_time_sel = regulator_set_voltage_time_sel,
181 .enable = regulator_enable_regmap,
182 .disable = regulator_disable_regmap,
183 .is_enabled = regulator_is_enabled_regmap,
184 .get_status = mt6397_get_status,
185};
186
187static struct regulator_ops mt6397_volt_fixed_ops = {
188 .list_voltage = regulator_list_voltage_linear,
189 .enable = regulator_enable_regmap,
190 .disable = regulator_disable_regmap,
191 .is_enabled = regulator_is_enabled_regmap,
192 .get_status = mt6397_get_status,
193};
194
195/* The array is indexed by id(MT6397_ID_XXX) */
196static struct mt6397_regulator_info mt6397_regulators[] = {
197 MT6397_BUCK("buck_vpca15", VPCA15, 700000, 1493750, 6250,
198 buck_volt_range1, MT6397_VCA15_CON7, MT6397_VCA15_CON9, 0x7f,
199 MT6397_VCA15_CON10, MT6397_VCA15_CON5),
200 MT6397_BUCK("buck_vpca7", VPCA7, 700000, 1493750, 6250,
201 buck_volt_range1, MT6397_VPCA7_CON7, MT6397_VPCA7_CON9, 0x7f,
202 MT6397_VPCA7_CON10, MT6397_VPCA7_CON5),
203 MT6397_BUCK("buck_vsramca15", VSRAMCA15, 700000, 1493750, 6250,
204 buck_volt_range1, MT6397_VSRMCA15_CON7, MT6397_VSRMCA15_CON9,
205 0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5),
206 MT6397_BUCK("buck_vsramca7", VSRAMCA7, 700000, 1493750, 6250,
207 buck_volt_range1, MT6397_VSRMCA7_CON7, MT6397_VSRMCA7_CON9,
208 0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5),
209 MT6397_BUCK("buck_vcore", VCORE, 700000, 1493750, 6250,
210 buck_volt_range1, MT6397_VCORE_CON7, MT6397_VCORE_CON9, 0x7f,
211 MT6397_VCORE_CON10, MT6397_VCORE_CON5),
212 MT6397_BUCK("buck_vgpu", VGPU, 700000, 1493750, 6250, buck_volt_range1,
213 MT6397_VGPU_CON7, MT6397_VGPU_CON9, 0x7f,
214 MT6397_VGPU_CON10, MT6397_VGPU_CON5),
215 MT6397_BUCK("buck_vdrm", VDRM, 800000, 1593750, 6250, buck_volt_range2,
216 MT6397_VDRM_CON7, MT6397_VDRM_CON9, 0x7f,
217 MT6397_VDRM_CON10, MT6397_VDRM_CON5),
218 MT6397_BUCK("buck_vio18", VIO18, 1500000, 2120000, 20000,
219 buck_volt_range3, MT6397_VIO18_CON7, MT6397_VIO18_CON9, 0x1f,
220 MT6397_VIO18_CON10, MT6397_VIO18_CON5),
221 MT6397_REG_FIXED("ldo_vtcxo", VTCXO, MT6397_ANALDO_CON0, 10, 2800000),
222 MT6397_REG_FIXED("ldo_va28", VA28, MT6397_ANALDO_CON1, 14, 2800000),
223 MT6397_LDO("ldo_vcama", VCAMA, ldo_volt_table1,
224 MT6397_ANALDO_CON2, 15, MT6397_ANALDO_CON6, 0xC0),
225 MT6397_REG_FIXED("ldo_vio28", VIO28, MT6397_DIGLDO_CON0, 14, 2800000),
226 MT6397_REG_FIXED("ldo_vusb", VUSB, MT6397_DIGLDO_CON1, 14, 3300000),
227 MT6397_LDO("ldo_vmc", VMC, ldo_volt_table2,
228 MT6397_DIGLDO_CON2, 12, MT6397_DIGLDO_CON29, 0x10),
229 MT6397_LDO("ldo_vmch", VMCH, ldo_volt_table3,
230 MT6397_DIGLDO_CON3, 14, MT6397_DIGLDO_CON17, 0x80),
231 MT6397_LDO("ldo_vemc3v3", VEMC3V3, ldo_volt_table3,
232 MT6397_DIGLDO_CON4, 14, MT6397_DIGLDO_CON18, 0x10),
233 MT6397_LDO("ldo_vgp1", VGP1, ldo_volt_table4,
234 MT6397_DIGLDO_CON5, 15, MT6397_DIGLDO_CON19, 0xE0),
235 MT6397_LDO("ldo_vgp2", VGP2, ldo_volt_table5,
236 MT6397_DIGLDO_CON6, 15, MT6397_DIGLDO_CON20, 0xE0),
237 MT6397_LDO("ldo_vgp3", VGP3, ldo_volt_table5,
238 MT6397_DIGLDO_CON7, 15, MT6397_DIGLDO_CON21, 0xE0),
239 MT6397_LDO("ldo_vgp4", VGP4, ldo_volt_table5,
240 MT6397_DIGLDO_CON8, 15, MT6397_DIGLDO_CON22, 0xE0),
241 MT6397_LDO("ldo_vgp5", VGP5, ldo_volt_table6,
242 MT6397_DIGLDO_CON9, 15, MT6397_DIGLDO_CON23, 0xE0),
243 MT6397_LDO("ldo_vgp6", VGP6, ldo_volt_table5,
244 MT6397_DIGLDO_CON10, 15, MT6397_DIGLDO_CON33, 0xE0),
245 MT6397_LDO("ldo_vibr", VIBR, ldo_volt_table7,
246 MT6397_DIGLDO_CON24, 15, MT6397_DIGLDO_CON25, 0xE00),
247};
248
249static int mt6397_set_buck_vosel_reg(struct platform_device *pdev)
250{
251 struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
252 int i;
253 u32 regval;
254
255 for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
256 if (mt6397_regulators[i].vselctrl_reg) {
257 if (regmap_read(mt6397->regmap,
258 mt6397_regulators[i].vselctrl_reg,
259 &regval) < 0) {
260 dev_err(&pdev->dev,
261 "Failed to read buck ctrl\n");
262 return -EIO;
263 }
264
265 if (regval & mt6397_regulators[i].vselctrl_mask) {
266 mt6397_regulators[i].desc.vsel_reg =
267 mt6397_regulators[i].vselon_reg;
268 }
269 }
270 }
271
272 return 0;
273}
274
275static int mt6397_regulator_probe(struct platform_device *pdev)
276{
277 struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
278 struct regulator_config config = {};
279 struct regulator_dev *rdev;
280 int i;
281 u32 reg_value, version;
282
283 /* Query buck controller to select activated voltage register part */
284 if (mt6397_set_buck_vosel_reg(pdev))
285 return -EIO;
286
287 /* Read PMIC chip revision to update constraints and voltage table */
288 if (regmap_read(mt6397->regmap, MT6397_CID, &reg_value) < 0) {
289 dev_err(&pdev->dev, "Failed to read Chip ID\n");
290 return -EIO;
291 }
292 dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
293
294 version = (reg_value & 0xFF);
295 switch (version) {
296 case MT6397_REGULATOR_ID91:
297 mt6397_regulators[MT6397_ID_VGP2].desc.volt_table =
298 ldo_volt_table5_v2;
299 break;
300 default:
301 break;
302 }
303
304 for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
305 config.dev = &pdev->dev;
306 config.driver_data = &mt6397_regulators[i];
307 config.regmap = mt6397->regmap;
308 rdev = devm_regulator_register(&pdev->dev,
309 &mt6397_regulators[i].desc, &config);
310 if (IS_ERR(rdev)) {
311 dev_err(&pdev->dev, "failed to register %s\n",
312 mt6397_regulators[i].desc.name);
313 return PTR_ERR(rdev);
314 }
315 }
316
317 return 0;
318}
319
320static struct platform_driver mt6397_regulator_driver = {
321 .driver = {
322 .name = "mt6397-regulator",
323 },
324 .probe = mt6397_regulator_probe,
325};
326
327module_platform_driver(mt6397_regulator_driver);
328
329MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>");
330MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC");
331MODULE_LICENSE("GPL");
332MODULE_ALIAS("platform:mt6397-regulator");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 91eaaf010524..24e812c48d93 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -270,6 +270,7 @@ EXPORT_SYMBOL_GPL(of_regulator_match);
270 270
271struct regulator_init_data *regulator_of_get_init_data(struct device *dev, 271struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
272 const struct regulator_desc *desc, 272 const struct regulator_desc *desc,
273 struct regulator_config *config,
273 struct device_node **node) 274 struct device_node **node)
274{ 275{
275 struct device_node *search, *child; 276 struct device_node *search, *child;
@@ -307,6 +308,16 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
307 break; 308 break;
308 } 309 }
309 310
311 if (desc->of_parse_cb) {
312 if (desc->of_parse_cb(child, desc, config)) {
313 dev_err(dev,
314 "driver callback failed to parse DT for regulator %s\n",
315 child->name);
316 init_data = NULL;
317 break;
318 }
319 }
320
310 of_node_get(child); 321 of_node_get(child);
311 *node = child; 322 *node = child;
312 break; 323 break;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index c879dff597ee..8cc8d1877c44 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -56,7 +56,7 @@
56#define PFUZE100_VGEN5VOL 0x70 56#define PFUZE100_VGEN5VOL 0x70
57#define PFUZE100_VGEN6VOL 0x71 57#define PFUZE100_VGEN6VOL 0x71
58 58
59enum chips { PFUZE100, PFUZE200 }; 59enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 };
60 60
61struct pfuze_regulator { 61struct pfuze_regulator {
62 struct regulator_desc desc; 62 struct regulator_desc desc;
@@ -80,9 +80,18 @@ static const int pfuze100_vsnvs[] = {
80 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000, 80 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000,
81}; 81};
82 82
83static const int pfuze3000_sw2lo[] = {
84 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000,
85};
86
87static const int pfuze3000_sw2hi[] = {
88 2500000, 2800000, 2850000, 3000000, 3100000, 3150000, 3200000, 3300000,
89};
90
83static const struct i2c_device_id pfuze_device_id[] = { 91static const struct i2c_device_id pfuze_device_id[] = {
84 {.name = "pfuze100", .driver_data = PFUZE100}, 92 {.name = "pfuze100", .driver_data = PFUZE100},
85 {.name = "pfuze200", .driver_data = PFUZE200}, 93 {.name = "pfuze200", .driver_data = PFUZE200},
94 {.name = "pfuze3000", .driver_data = PFUZE3000},
86 { } 95 { }
87}; 96};
88MODULE_DEVICE_TABLE(i2c, pfuze_device_id); 97MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
@@ -90,6 +99,7 @@ MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
90static const struct of_device_id pfuze_dt_ids[] = { 99static const struct of_device_id pfuze_dt_ids[] = {
91 { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100}, 100 { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
92 { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200}, 101 { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
102 { .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000},
93 { } 103 { }
94}; 104};
95MODULE_DEVICE_TABLE(of, pfuze_dt_ids); 105MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
@@ -219,6 +229,60 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
219 .stby_mask = 0x20, \ 229 .stby_mask = 0x20, \
220 } 230 }
221 231
232#define PFUZE3000_VCC_REG(_chip, _name, base, min, max, step) { \
233 .desc = { \
234 .name = #_name, \
235 .n_voltages = ((max) - (min)) / (step) + 1, \
236 .ops = &pfuze100_ldo_regulator_ops, \
237 .type = REGULATOR_VOLTAGE, \
238 .id = _chip ## _ ## _name, \
239 .owner = THIS_MODULE, \
240 .min_uV = (min), \
241 .uV_step = (step), \
242 .vsel_reg = (base), \
243 .vsel_mask = 0x3, \
244 .enable_reg = (base), \
245 .enable_mask = 0x10, \
246 }, \
247 .stby_reg = (base), \
248 .stby_mask = 0x20, \
249}
250
251
252#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
253 .desc = { \
254 .name = #_name,\
255 .n_voltages = ((max) - (min)) / (step) + 1, \
256 .ops = &pfuze100_sw_regulator_ops, \
257 .type = REGULATOR_VOLTAGE, \
258 .id = _chip ## _ ## _name, \
259 .owner = THIS_MODULE, \
260 .min_uV = (min), \
261 .uV_step = (step), \
262 .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
263 .vsel_mask = 0x7, \
264 }, \
265 .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
266 .stby_mask = 0x7, \
267}
268
269#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
270 .desc = { \
271 .name = #_name,\
272 .n_voltages = ((max) - (min)) / (step) + 1, \
273 .ops = &pfuze100_sw_regulator_ops, \
274 .type = REGULATOR_VOLTAGE, \
275 .id = _chip ## _ ## _name, \
276 .owner = THIS_MODULE, \
277 .min_uV = (min), \
278 .uV_step = (step), \
279 .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
280 .vsel_mask = 0xf, \
281 }, \
282 .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
283 .stby_mask = 0xf, \
284}
285
222/* PFUZE100 */ 286/* PFUZE100 */
223static struct pfuze_regulator pfuze100_regulators[] = { 287static struct pfuze_regulator pfuze100_regulators[] = {
224 PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), 288 PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
@@ -254,6 +318,22 @@ static struct pfuze_regulator pfuze200_regulators[] = {
254 PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), 318 PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
255}; 319};
256 320
321static struct pfuze_regulator pfuze3000_regulators[] = {
322 PFUZE100_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 700000, 1475000, 25000),
323 PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
324 PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
325 PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
326 PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
327 PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
328 PFUZE100_FIXED_REG(PFUZE3000, VREFDDR, PFUZE100_VREFDDRCON, 750000),
329 PFUZE100_VGEN_REG(PFUZE3000, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
330 PFUZE100_VGEN_REG(PFUZE3000, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
331 PFUZE3000_VCC_REG(PFUZE3000, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
332 PFUZE3000_VCC_REG(PFUZE3000, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
333 PFUZE100_VGEN_REG(PFUZE3000, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
334 PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
335};
336
257static struct pfuze_regulator *pfuze_regulators; 337static struct pfuze_regulator *pfuze_regulators;
258 338
259#ifdef CONFIG_OF 339#ifdef CONFIG_OF
@@ -294,6 +374,24 @@ static struct of_regulator_match pfuze200_matches[] = {
294 { .name = "vgen6", }, 374 { .name = "vgen6", },
295}; 375};
296 376
377/* PFUZE3000 */
378static struct of_regulator_match pfuze3000_matches[] = {
379
380 { .name = "sw1a", },
381 { .name = "sw1b", },
382 { .name = "sw2", },
383 { .name = "sw3", },
384 { .name = "swbst", },
385 { .name = "vsnvs", },
386 { .name = "vrefddr", },
387 { .name = "vldo1", },
388 { .name = "vldo2", },
389 { .name = "vccsd", },
390 { .name = "v33", },
391 { .name = "vldo3", },
392 { .name = "vldo4", },
393};
394
297static struct of_regulator_match *pfuze_matches; 395static struct of_regulator_match *pfuze_matches;
298 396
299static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) 397static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -313,6 +411,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
313 } 411 }
314 412
315 switch (chip->chip_id) { 413 switch (chip->chip_id) {
414 case PFUZE3000:
415 pfuze_matches = pfuze3000_matches;
416 ret = of_regulator_match(dev, parent, pfuze3000_matches,
417 ARRAY_SIZE(pfuze3000_matches));
418 break;
316 case PFUZE200: 419 case PFUZE200:
317 pfuze_matches = pfuze200_matches; 420 pfuze_matches = pfuze200_matches;
318 ret = of_regulator_match(dev, parent, pfuze200_matches, 421 ret = of_regulator_match(dev, parent, pfuze200_matches,
@@ -378,7 +481,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
378 * as ID=8 in PFUZE100 481 * as ID=8 in PFUZE100
379 */ 482 */
380 dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); 483 dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
381 } else if ((value & 0x0f) != pfuze_chip->chip_id) { 484 } else if ((value & 0x0f) != pfuze_chip->chip_id &&
485 (value & 0xf0) >> 4 != pfuze_chip->chip_id) {
382 /* device id NOT match with your setting */ 486 /* device id NOT match with your setting */
383 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); 487 dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
384 return -ENODEV; 488 return -ENODEV;
@@ -417,7 +521,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
417 int i, ret; 521 int i, ret;
418 const struct of_device_id *match; 522 const struct of_device_id *match;
419 u32 regulator_num; 523 u32 regulator_num;
420 u32 sw_check_start, sw_check_end; 524 u32 sw_check_start, sw_check_end, sw_hi = 0x40;
421 525
422 pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip), 526 pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
423 GFP_KERNEL); 527 GFP_KERNEL);
@@ -458,13 +562,19 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
458 562
459 /* use the right regulators after identify the right device */ 563 /* use the right regulators after identify the right device */
460 switch (pfuze_chip->chip_id) { 564 switch (pfuze_chip->chip_id) {
565 case PFUZE3000:
566 pfuze_regulators = pfuze3000_regulators;
567 regulator_num = ARRAY_SIZE(pfuze3000_regulators);
568 sw_check_start = PFUZE3000_SW2;
569 sw_check_end = PFUZE3000_SW2;
570 sw_hi = 1 << 3;
571 break;
461 case PFUZE200: 572 case PFUZE200:
462 pfuze_regulators = pfuze200_regulators; 573 pfuze_regulators = pfuze200_regulators;
463 regulator_num = ARRAY_SIZE(pfuze200_regulators); 574 regulator_num = ARRAY_SIZE(pfuze200_regulators);
464 sw_check_start = PFUZE200_SW2; 575 sw_check_start = PFUZE200_SW2;
465 sw_check_end = PFUZE200_SW3B; 576 sw_check_end = PFUZE200_SW3B;
466 break; 577 break;
467
468 case PFUZE100: 578 case PFUZE100:
469 default: 579 default:
470 pfuze_regulators = pfuze100_regulators; 580 pfuze_regulators = pfuze100_regulators;
@@ -474,7 +584,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
474 break; 584 break;
475 } 585 }
476 dev_info(&client->dev, "pfuze%s found.\n", 586 dev_info(&client->dev, "pfuze%s found.\n",
477 (pfuze_chip->chip_id == PFUZE100) ? "100" : "200"); 587 (pfuze_chip->chip_id == PFUZE100) ? "100" :
588 ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000"));
478 589
479 memcpy(pfuze_chip->regulator_descs, pfuze_regulators, 590 memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
480 sizeof(pfuze_chip->regulator_descs)); 591 sizeof(pfuze_chip->regulator_descs));
@@ -498,10 +609,15 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
498 /* SW2~SW4 high bit check and modify the voltage value table */ 609 /* SW2~SW4 high bit check and modify the voltage value table */
499 if (i >= sw_check_start && i <= sw_check_end) { 610 if (i >= sw_check_start && i <= sw_check_end) {
500 regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); 611 regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
501 if (val & 0x40) { 612 if (val & sw_hi) {
502 desc->min_uV = 800000; 613 if (pfuze_chip->chip_id == PFUZE3000) {
503 desc->uV_step = 50000; 614 desc->volt_table = pfuze3000_sw2hi;
504 desc->n_voltages = 51; 615 desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi);
616 } else {
617 desc->min_uV = 800000;
618 desc->uV_step = 50000;
619 desc->n_voltages = 51;
620 }
505 } 621 }
506 } 622 }
507 623
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 8364ff331a81..e8647f7cf25e 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -227,9 +227,11 @@ static int rpm_reg_set_mV_sel(struct regulator_dev *rdev,
227 return uV; 227 return uV;
228 228
229 mutex_lock(&vreg->lock); 229 mutex_lock(&vreg->lock);
230 vreg->uV = uV;
231 if (vreg->is_enabled) 230 if (vreg->is_enabled)
232 ret = rpm_reg_write(vreg, req, vreg->uV / 1000); 231 ret = rpm_reg_write(vreg, req, uV / 1000);
232
233 if (!ret)
234 vreg->uV = uV;
233 mutex_unlock(&vreg->lock); 235 mutex_unlock(&vreg->lock);
234 236
235 return ret; 237 return ret;
@@ -252,9 +254,11 @@ static int rpm_reg_set_uV_sel(struct regulator_dev *rdev,
252 return uV; 254 return uV;
253 255
254 mutex_lock(&vreg->lock); 256 mutex_lock(&vreg->lock);
255 vreg->uV = uV;
256 if (vreg->is_enabled) 257 if (vreg->is_enabled)
257 ret = rpm_reg_write(vreg, req, vreg->uV); 258 ret = rpm_reg_write(vreg, req, uV);
259
260 if (!ret)
261 vreg->uV = uV;
258 mutex_unlock(&vreg->lock); 262 mutex_unlock(&vreg->lock);
259 263
260 return ret; 264 return ret;
@@ -674,6 +678,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
674 vreg->desc.owner = THIS_MODULE; 678 vreg->desc.owner = THIS_MODULE;
675 vreg->desc.type = REGULATOR_VOLTAGE; 679 vreg->desc.type = REGULATOR_VOLTAGE;
676 vreg->desc.name = pdev->dev.of_node->name; 680 vreg->desc.name = pdev->dev.of_node->name;
681 vreg->desc.supply_name = "vin";
677 682
678 vreg->rpm = dev_get_drvdata(pdev->dev.parent); 683 vreg->rpm = dev_get_drvdata(pdev->dev.parent);
679 if (!vreg->rpm) { 684 if (!vreg->rpm) {
@@ -768,7 +773,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
768 break; 773 break;
769 } 774 }
770 775
771 if (force_mode < 0) { 776 if (force_mode == -1) {
772 dev_err(&pdev->dev, "invalid force mode\n"); 777 dev_err(&pdev->dev, "invalid force mode\n");
773 return -EINVAL; 778 return -EINVAL;
774 } 779 }
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index c94a3e0f3b91..1f93b752a81c 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -97,7 +97,7 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
97 RK808_RAMP_RATE_MASK, ramp_value); 97 RK808_RAMP_RATE_MASK, ramp_value);
98} 98}
99 99
100int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv) 100static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
101{ 101{
102 unsigned int reg; 102 unsigned int reg;
103 int sel = regulator_map_voltage_linear_range(rdev, uv, uv); 103 int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
@@ -112,7 +112,7 @@ int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
112 sel); 112 sel);
113} 113}
114 114
115int rk808_set_suspend_enable(struct regulator_dev *rdev) 115static int rk808_set_suspend_enable(struct regulator_dev *rdev)
116{ 116{
117 unsigned int reg; 117 unsigned int reg;
118 118
@@ -123,7 +123,7 @@ int rk808_set_suspend_enable(struct regulator_dev *rdev)
123 0); 123 0);
124} 124}
125 125
126int rk808_set_suspend_disable(struct regulator_dev *rdev) 126static int rk808_set_suspend_disable(struct regulator_dev *rdev)
127{ 127{
128 unsigned int reg; 128 unsigned int reg;
129 129
diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c
index 870cc49438db..96d2c18e051a 100644
--- a/drivers/regulator/rt5033-regulator.c
+++ b/drivers/regulator/rt5033-regulator.c
@@ -36,6 +36,8 @@ static struct regulator_ops rt5033_buck_ops = {
36static const struct regulator_desc rt5033_supported_regulators[] = { 36static const struct regulator_desc rt5033_supported_regulators[] = {
37 [RT5033_BUCK] = { 37 [RT5033_BUCK] = {
38 .name = "BUCK", 38 .name = "BUCK",
39 .of_match = of_match_ptr("BUCK"),
40 .regulators_node = of_match_ptr("regulators"),
39 .id = RT5033_BUCK, 41 .id = RT5033_BUCK,
40 .ops = &rt5033_buck_ops, 42 .ops = &rt5033_buck_ops,
41 .type = REGULATOR_VOLTAGE, 43 .type = REGULATOR_VOLTAGE,
@@ -50,6 +52,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
50 }, 52 },
51 [RT5033_LDO] = { 53 [RT5033_LDO] = {
52 .name = "LDO", 54 .name = "LDO",
55 .of_match = of_match_ptr("LDO"),
56 .regulators_node = of_match_ptr("regulators"),
53 .id = RT5033_LDO, 57 .id = RT5033_LDO,
54 .ops = &rt5033_buck_ops, 58 .ops = &rt5033_buck_ops,
55 .type = REGULATOR_VOLTAGE, 59 .type = REGULATOR_VOLTAGE,
@@ -64,6 +68,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
64 }, 68 },
65 [RT5033_SAFE_LDO] = { 69 [RT5033_SAFE_LDO] = {
66 .name = "SAFE_LDO", 70 .name = "SAFE_LDO",
71 .of_match = of_match_ptr("SAFE_LDO"),
72 .regulators_node = of_match_ptr("regulators"),
67 .id = RT5033_SAFE_LDO, 73 .id = RT5033_SAFE_LDO,
68 .ops = &rt5033_safe_ldo_ops, 74 .ops = &rt5033_safe_ldo_ops,
69 .type = REGULATOR_VOLTAGE, 75 .type = REGULATOR_VOLTAGE,
@@ -81,7 +87,7 @@ static int rt5033_regulator_probe(struct platform_device *pdev)
81 int ret, i; 87 int ret, i;
82 struct regulator_config config = {}; 88 struct regulator_config config = {};
83 89
84 config.dev = &pdev->dev; 90 config.dev = rt5033->dev;
85 config.driver_data = rt5033; 91 config.driver_data = rt5033;
86 92
87 for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) { 93 for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) {
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 7380af8bd50d..b941e564b3f3 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -173,7 +173,7 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
173} 173}
174 174
175/* Operations permitted on VDCDCx */ 175/* Operations permitted on VDCDCx */
176static struct regulator_ops tps65023_dcdc_ops = { 176static const struct regulator_ops tps65023_dcdc_ops = {
177 .is_enabled = regulator_is_enabled_regmap, 177 .is_enabled = regulator_is_enabled_regmap,
178 .enable = regulator_enable_regmap, 178 .enable = regulator_enable_regmap,
179 .disable = regulator_disable_regmap, 179 .disable = regulator_disable_regmap,
@@ -184,7 +184,7 @@ static struct regulator_ops tps65023_dcdc_ops = {
184}; 184};
185 185
186/* Operations permitted on LDOx */ 186/* Operations permitted on LDOx */
187static struct regulator_ops tps65023_ldo_ops = { 187static const struct regulator_ops tps65023_ldo_ops = {
188 .is_enabled = regulator_is_enabled_regmap, 188 .is_enabled = regulator_is_enabled_regmap,
189 .enable = regulator_enable_regmap, 189 .enable = regulator_enable_regmap,
190 .disable = regulator_disable_regmap, 190 .disable = regulator_disable_regmap,
@@ -194,7 +194,7 @@ static struct regulator_ops tps65023_ldo_ops = {
194 .map_voltage = regulator_map_voltage_ascend, 194 .map_voltage = regulator_map_voltage_ascend,
195}; 195};
196 196
197static struct regmap_config tps65023_regmap_config = { 197static const struct regmap_config tps65023_regmap_config = {
198 .reg_bits = 8, 198 .reg_bits = 8,
199 .val_bits = 8, 199 .val_bits = 8,
200}; 200};
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index 4aa60d74004e..6c719f23520a 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -26,7 +26,7 @@ static int __init rtc_hctosys(void)
26{ 26{
27 int err = -ENODEV; 27 int err = -ENODEV;
28 struct rtc_time tm; 28 struct rtc_time tm;
29 struct timespec tv = { 29 struct timespec64 tv64 = {
30 .tv_nsec = NSEC_PER_SEC >> 1, 30 .tv_nsec = NSEC_PER_SEC >> 1,
31 }; 31 };
32 struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); 32 struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
@@ -45,25 +45,17 @@ static int __init rtc_hctosys(void)
45 45
46 } 46 }
47 47
48 err = rtc_valid_tm(&tm); 48 tv64.tv_sec = rtc_tm_to_time64(&tm);
49 if (err) {
50 dev_err(rtc->dev.parent,
51 "hctosys: invalid date/time\n");
52 goto err_invalid;
53 }
54
55 rtc_tm_to_time(&tm, &tv.tv_sec);
56 49
57 err = do_settimeofday(&tv); 50 err = do_settimeofday64(&tv64);
58 51
59 dev_info(rtc->dev.parent, 52 dev_info(rtc->dev.parent,
60 "setting system clock to " 53 "setting system clock to "
61 "%d-%02d-%02d %02d:%02d:%02d UTC (%u)\n", 54 "%d-%02d-%02d %02d:%02d:%02d UTC (%lld)\n",
62 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 55 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
63 tm.tm_hour, tm.tm_min, tm.tm_sec, 56 tm.tm_hour, tm.tm_min, tm.tm_sec,
64 (unsigned int) tv.tv_sec); 57 (long long) tv64.tv_sec);
65 58
66err_invalid:
67err_read: 59err_read:
68 rtc_class_close(rtc); 60 rtc_class_close(rtc);
69 61
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 45bfc28ee3aa..37215cf983e9 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -73,10 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
73 else if (rtc->ops->set_time) 73 else if (rtc->ops->set_time)
74 err = rtc->ops->set_time(rtc->dev.parent, tm); 74 err = rtc->ops->set_time(rtc->dev.parent, tm);
75 else if (rtc->ops->set_mmss) { 75 else if (rtc->ops->set_mmss) {
76 unsigned long secs; 76 time64_t secs64 = rtc_tm_to_time64(tm);
77 err = rtc_tm_to_time(tm, &secs); 77 err = rtc->ops->set_mmss(rtc->dev.parent, secs64);
78 if (err == 0)
79 err = rtc->ops->set_mmss(rtc->dev.parent, secs);
80 } else 78 } else
81 err = -EINVAL; 79 err = -EINVAL;
82 80
@@ -105,7 +103,7 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
105 103
106 err = rtc->ops->read_time(rtc->dev.parent, &old); 104 err = rtc->ops->read_time(rtc->dev.parent, &old);
107 if (err == 0) { 105 if (err == 0) {
108 rtc_time_to_tm(secs, &new); 106 rtc_time64_to_tm(secs, &new);
109 107
110 /* 108 /*
111 * avoid writing when we're going to change the day of 109 * avoid writing when we're going to change the day of
@@ -157,7 +155,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
157 int err; 155 int err;
158 struct rtc_time before, now; 156 struct rtc_time before, now;
159 int first_time = 1; 157 int first_time = 1;
160 unsigned long t_now, t_alm; 158 time64_t t_now, t_alm;
161 enum { none, day, month, year } missing = none; 159 enum { none, day, month, year } missing = none;
162 unsigned days; 160 unsigned days;
163 161
@@ -258,8 +256,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
258 } 256 }
259 257
260 /* with luck, no rollover is needed */ 258 /* with luck, no rollover is needed */
261 rtc_tm_to_time(&now, &t_now); 259 t_now = rtc_tm_to_time64(&now);
262 rtc_tm_to_time(&alarm->time, &t_alm); 260 t_alm = rtc_tm_to_time64(&alarm->time);
263 if (t_now < t_alm) 261 if (t_now < t_alm)
264 goto done; 262 goto done;
265 263
@@ -273,7 +271,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
273 case day: 271 case day:
274 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); 272 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
275 t_alm += 24 * 60 * 60; 273 t_alm += 24 * 60 * 60;
276 rtc_time_to_tm(t_alm, &alarm->time); 274 rtc_time64_to_tm(t_alm, &alarm->time);
277 break; 275 break;
278 276
279 /* Month rollover ... if it's the 31th, an alarm on the 3rd will 277 /* Month rollover ... if it's the 31th, an alarm on the 3rd will
@@ -346,19 +344,19 @@ EXPORT_SYMBOL_GPL(rtc_read_alarm);
346static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 344static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
347{ 345{
348 struct rtc_time tm; 346 struct rtc_time tm;
349 long now, scheduled; 347 time64_t now, scheduled;
350 int err; 348 int err;
351 349
352 err = rtc_valid_tm(&alarm->time); 350 err = rtc_valid_tm(&alarm->time);
353 if (err) 351 if (err)
354 return err; 352 return err;
355 rtc_tm_to_time(&alarm->time, &scheduled); 353 scheduled = rtc_tm_to_time64(&alarm->time);
356 354
357 /* Make sure we're not setting alarms in the past */ 355 /* Make sure we're not setting alarms in the past */
358 err = __rtc_read_time(rtc, &tm); 356 err = __rtc_read_time(rtc, &tm);
359 if (err) 357 if (err)
360 return err; 358 return err;
361 rtc_tm_to_time(&tm, &now); 359 now = rtc_tm_to_time64(&tm);
362 if (scheduled <= now) 360 if (scheduled <= now)
363 return -ETIME; 361 return -ETIME;
364 /* 362 /*
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index d04939369251..799c34bcb26f 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -304,12 +304,12 @@ static long rtc_dev_ioctl(struct file *file,
304 * Not supported here. 304 * Not supported here.
305 */ 305 */
306 { 306 {
307 unsigned long now, then; 307 time64_t now, then;
308 308
309 err = rtc_read_time(rtc, &tm); 309 err = rtc_read_time(rtc, &tm);
310 if (err < 0) 310 if (err < 0)
311 return err; 311 return err;
312 rtc_tm_to_time(&tm, &now); 312 now = rtc_tm_to_time64(&tm);
313 313
314 alarm.time.tm_mday = tm.tm_mday; 314 alarm.time.tm_mday = tm.tm_mday;
315 alarm.time.tm_mon = tm.tm_mon; 315 alarm.time.tm_mon = tm.tm_mon;
@@ -317,11 +317,11 @@ static long rtc_dev_ioctl(struct file *file,
317 err = rtc_valid_tm(&alarm.time); 317 err = rtc_valid_tm(&alarm.time);
318 if (err < 0) 318 if (err < 0)
319 return err; 319 return err;
320 rtc_tm_to_time(&alarm.time, &then); 320 then = rtc_tm_to_time64(&alarm.time);
321 321
322 /* alarm may need to wrap into tomorrow */ 322 /* alarm may need to wrap into tomorrow */
323 if (then < now) { 323 if (then < now) {
324 rtc_time_to_tm(now + 24 * 60 * 60, &tm); 324 rtc_time64_to_tm(now + 24 * 60 * 60, &tm);
325 alarm.time.tm_mday = tm.tm_mday; 325 alarm.time.tm_mday = tm.tm_mday;
326 alarm.time.tm_mon = tm.tm_mon; 326 alarm.time.tm_mon = tm.tm_mon;
327 alarm.time.tm_year = tm.tm_year; 327 alarm.time.tm_year = tm.tm_year;
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index b37b0c80bd5a..cb989cd00b14 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -218,6 +218,7 @@ static int __init efi_rtc_probe(struct platform_device *dev)
218 if (IS_ERR(rtc)) 218 if (IS_ERR(rtc))
219 return PTR_ERR(rtc); 219 return PTR_ERR(rtc);
220 220
221 rtc->uie_unsupported = 1;
221 platform_set_drvdata(dev, rtc); 222 platform_set_drvdata(dev, rtc);
222 223
223 return 0; 224 return 0;
diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c
index bf3e242ccc5c..eb71872d0361 100644
--- a/drivers/rtc/systohc.c
+++ b/drivers/rtc/systohc.c
@@ -20,16 +20,16 @@
20 * 20 *
21 * If temporary failure is indicated the caller should try again 'soon' 21 * If temporary failure is indicated the caller should try again 'soon'
22 */ 22 */
23int rtc_set_ntp_time(struct timespec now) 23int rtc_set_ntp_time(struct timespec64 now)
24{ 24{
25 struct rtc_device *rtc; 25 struct rtc_device *rtc;
26 struct rtc_time tm; 26 struct rtc_time tm;
27 int err = -ENODEV; 27 int err = -ENODEV;
28 28
29 if (now.tv_nsec < (NSEC_PER_SEC >> 1)) 29 if (now.tv_nsec < (NSEC_PER_SEC >> 1))
30 rtc_time_to_tm(now.tv_sec, &tm); 30 rtc_time64_to_tm(now.tv_sec, &tm);
31 else 31 else
32 rtc_time_to_tm(now.tv_sec + 1, &tm); 32 rtc_time64_to_tm(now.tv_sec + 1, &tm);
33 33
34 rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); 34 rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
35 if (rtc) { 35 if (rtc) {
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 6776931e25d4..78ce4d61a69b 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -813,12 +813,13 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
813 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, 813 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
814 &devcontrol); 814 &devcontrol);
815 815
816 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) { 816 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
817 PCI_EXP_DEVCTL_READRQ_512B) {
817 esas2r_log(ESAS2R_LOG_INFO, 818 esas2r_log(ESAS2R_LOG_INFO,
818 "max read request size > 512B"); 819 "max read request size > 512B");
819 820
820 devcontrol &= ~PCI_EXP_DEVCTL_READRQ; 821 devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
821 devcontrol |= 0x2000; 822 devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
822 pci_write_config_word(a->pcid, 823 pci_write_config_word(a->pcid,
823 pcie_cap_reg + PCI_EXP_DEVCTL, 824 pcie_cap_reg + PCI_EXP_DEVCTL,
824 devcontrol); 825 devcontrol);
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index 1e824fb1649b..296db7a69c27 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -161,7 +161,7 @@ static int sfi_verify_table(struct sfi_table_header *table)
161 * Check for common case that we can re-use mapping to SYST, 161 * Check for common case that we can re-use mapping to SYST,
162 * which requires syst_pa, syst_va to be initialized. 162 * which requires syst_pa, syst_va to be initialized.
163 */ 163 */
164struct sfi_table_header *sfi_map_table(u64 pa) 164static struct sfi_table_header *sfi_map_table(u64 pa)
165{ 165{
166 struct sfi_table_header *th; 166 struct sfi_table_header *th;
167 u32 length; 167 u32 length;
@@ -189,7 +189,7 @@ struct sfi_table_header *sfi_map_table(u64 pa)
189 * Undoes effect of sfi_map_table() by unmapping table 189 * Undoes effect of sfi_map_table() by unmapping table
190 * if it did not completely fit on same page as SYST. 190 * if it did not completely fit on same page as SYST.
191 */ 191 */
192void sfi_unmap_table(struct sfi_table_header *th) 192static void sfi_unmap_table(struct sfi_table_header *th)
193{ 193{
194 if (!TABLE_ON_PAGE(syst_va, th, th->len)) 194 if (!TABLE_ON_PAGE(syst_va, th, th->len))
195 sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ? 195 sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ?
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 99829985c1a1..95ccedabba4f 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -185,6 +185,16 @@ config SPI_DAVINCI
185 help 185 help
186 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 186 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
187 187
188config SPI_DLN2
189 tristate "Diolan DLN-2 USB SPI adapter"
190 depends on MFD_DLN2
191 help
192 If you say yes to this option, support will be included for Diolan
193 DLN2, a USB to SPI interface.
194
195 This driver can also be built as a module. If so, the module
196 will be called spi-dln2.
197
188config SPI_EFM32 198config SPI_EFM32
189 tristate "EFM32 SPI controller" 199 tristate "EFM32 SPI controller"
190 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) 200 depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
@@ -279,7 +289,7 @@ config SPI_FSL_CPM
279 depends on FSL_SOC 289 depends on FSL_SOC
280 290
281config SPI_FSL_SPI 291config SPI_FSL_SPI
282 bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller" 292 tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
283 depends on OF 293 depends on OF
284 select SPI_FSL_LIB 294 select SPI_FSL_LIB
285 select SPI_FSL_CPM if FSL_SOC 295 select SPI_FSL_CPM if FSL_SOC
@@ -292,7 +302,6 @@ config SPI_FSL_SPI
292 302
293config SPI_FSL_DSPI 303config SPI_FSL_DSPI
294 tristate "Freescale DSPI controller" 304 tristate "Freescale DSPI controller"
295 select SPI_BITBANG
296 select REGMAP_MMIO 305 select REGMAP_MMIO
297 depends on SOC_VF610 || COMPILE_TEST 306 depends on SOC_VF610 || COMPILE_TEST
298 help 307 help
@@ -300,7 +309,7 @@ config SPI_FSL_DSPI
300 mode. VF610 platform uses the controller. 309 mode. VF610 platform uses the controller.
301 310
302config SPI_FSL_ESPI 311config SPI_FSL_ESPI
303 bool "Freescale eSPI controller" 312 tristate "Freescale eSPI controller"
304 depends on FSL_SOC 313 depends on FSL_SOC
305 select SPI_FSL_LIB 314 select SPI_FSL_LIB
306 help 315 help
@@ -460,7 +469,6 @@ config SPI_S3C24XX_FIQ
460config SPI_S3C64XX 469config SPI_S3C64XX
461 tristate "Samsung S3C64XX series type SPI" 470 tristate "Samsung S3C64XX series type SPI"
462 depends on (PLAT_SAMSUNG || ARCH_EXYNOS) 471 depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
463 select S3C64XX_PL080 if ARCH_S3C64XX
464 help 472 help
465 SPI driver for Samsung S3C64XX and newer SoCs. 473 SPI driver for Samsung S3C64XX and newer SoCs.
466 474
@@ -503,6 +511,13 @@ config SPI_SIRF
503 help 511 help
504 SPI driver for CSR SiRFprimaII SoCs 512 SPI driver for CSR SiRFprimaII SoCs
505 513
514config SPI_ST_SSC4
515 tristate "STMicroelectronics SPI SSC-based driver"
516 depends on ARCH_STI
517 help
518 STMicroelectronics SoCs support for SPI. If you say yes to
519 this option, support will be included for the SSC driven SPI.
520
506config SPI_SUN4I 521config SPI_SUN4I
507 tristate "Allwinner A10 SoCs SPI controller" 522 tristate "Allwinner A10 SoCs SPI controller"
508 depends on ARCH_SUNXI || COMPILE_TEST 523 depends on ARCH_SUNXI || COMPILE_TEST
@@ -595,7 +610,6 @@ config SPI_XTENSA_XTFPGA
595 16 bit words in SPI mode 0, automatically asserting CS on transfer 610 16 bit words in SPI mode 0, automatically asserting CS on transfer
596 start and deasserting on end. 611 start and deasserting on end.
597 612
598
599config SPI_NUC900 613config SPI_NUC900
600 tristate "Nuvoton NUC900 series SPI" 614 tristate "Nuvoton NUC900 series SPI"
601 depends on ARCH_W90X900 615 depends on ARCH_W90X900
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6b9d2ac629cc..d8cbf654976b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
27obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o 27obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
28obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o 28obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
29obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o 29obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
30obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
30obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o 31obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
31obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o 32obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
32obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o 33obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
@@ -76,6 +77,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
76obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o 77obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
77obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o 78obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
78obj-$(CONFIG_SPI_SIRF) += spi-sirf.o 79obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
80obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
79obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o 81obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
80obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o 82obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
81obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o 83obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 23d8f5f56579..9af7841f2e8c 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1046,6 +1046,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
1046 struct atmel_spi_device *asd; 1046 struct atmel_spi_device *asd;
1047 int timeout; 1047 int timeout;
1048 int ret; 1048 int ret;
1049 unsigned long dma_timeout;
1049 1050
1050 as = spi_master_get_devdata(master); 1051 as = spi_master_get_devdata(master);
1051 1052
@@ -1103,15 +1104,12 @@ static int atmel_spi_one_transfer(struct spi_master *master,
1103 1104
1104 /* interrupts are disabled, so free the lock for schedule */ 1105 /* interrupts are disabled, so free the lock for schedule */
1105 atmel_spi_unlock(as); 1106 atmel_spi_unlock(as);
1106 ret = wait_for_completion_timeout(&as->xfer_completion, 1107 dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
1107 SPI_DMA_TIMEOUT); 1108 SPI_DMA_TIMEOUT);
1108 atmel_spi_lock(as); 1109 atmel_spi_lock(as);
1109 if (WARN_ON(ret == 0)) { 1110 if (WARN_ON(dma_timeout == 0)) {
1110 dev_err(&spi->dev, 1111 dev_err(&spi->dev, "spi transfer timeout\n");
1111 "spi trasfer timeout, err %d\n", ret);
1112 as->done_status = -EIO; 1112 as->done_status = -EIO;
1113 } else {
1114 ret = 0;
1115 } 1113 }
1116 1114
1117 if (as->done_status) 1115 if (as->done_status)
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 326f47973684..f45e085c01a6 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -15,10 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 18 */
23 19
24#include <linux/init.h> 20#include <linux/init.h>
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 98aab457b24d..419a782ab6d5 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -17,10 +17,6 @@
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */ 20 */
25 21
26#include <linux/clk.h> 22#include <linux/clk.h>
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index c20530982e26..e73e2b052c9c 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
20 */ 16 */
21 17
22#include <linux/kernel.h> 18#include <linux/kernel.h>
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index dc7d2c2d643e..5ef6638d5e8a 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index ee4f91ccd8fd..9a95862986c8 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20#include <linux/kernel.h> 16#include <linux/kernel.h>
21#include <linux/init.h> 17#include <linux/init.h>
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 41b5dc4445f6..688956ff5095 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -12,11 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
19 *
20*/ 15*/
21 16
22#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index b3707badb1e5..5e991065f5b0 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 14 */
19 15
20#include <linux/interrupt.h> 16#include <linux/interrupt.h>
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
new file mode 100644
index 000000000000..3b7d91d94fea
--- /dev/null
+++ b/drivers/spi/spi-dln2.c
@@ -0,0 +1,881 @@
1/*
2 * Driver for the Diolan DLN-2 USB-SPI adapter
3 *
4 * Copyright (c) 2014 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/mfd/dln2.h>
15#include <linux/spi/spi.h>
16#include <linux/pm_runtime.h>
17#include <asm/unaligned.h>
18
19#define DLN2_SPI_MODULE_ID 0x02
20#define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID)
21
22/* SPI commands */
23#define DLN2_SPI_GET_PORT_COUNT DLN2_SPI_CMD(0x00)
24#define DLN2_SPI_ENABLE DLN2_SPI_CMD(0x11)
25#define DLN2_SPI_DISABLE DLN2_SPI_CMD(0x12)
26#define DLN2_SPI_IS_ENABLED DLN2_SPI_CMD(0x13)
27#define DLN2_SPI_SET_MODE DLN2_SPI_CMD(0x14)
28#define DLN2_SPI_GET_MODE DLN2_SPI_CMD(0x15)
29#define DLN2_SPI_SET_FRAME_SIZE DLN2_SPI_CMD(0x16)
30#define DLN2_SPI_GET_FRAME_SIZE DLN2_SPI_CMD(0x17)
31#define DLN2_SPI_SET_FREQUENCY DLN2_SPI_CMD(0x18)
32#define DLN2_SPI_GET_FREQUENCY DLN2_SPI_CMD(0x19)
33#define DLN2_SPI_READ_WRITE DLN2_SPI_CMD(0x1A)
34#define DLN2_SPI_READ DLN2_SPI_CMD(0x1B)
35#define DLN2_SPI_WRITE DLN2_SPI_CMD(0x1C)
36#define DLN2_SPI_SET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x20)
37#define DLN2_SPI_GET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x21)
38#define DLN2_SPI_SET_DELAY_AFTER_SS DLN2_SPI_CMD(0x22)
39#define DLN2_SPI_GET_DELAY_AFTER_SS DLN2_SPI_CMD(0x23)
40#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x24)
41#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x25)
42#define DLN2_SPI_SET_SS DLN2_SPI_CMD(0x26)
43#define DLN2_SPI_GET_SS DLN2_SPI_CMD(0x27)
44#define DLN2_SPI_RELEASE_SS DLN2_SPI_CMD(0x28)
45#define DLN2_SPI_SS_VARIABLE_ENABLE DLN2_SPI_CMD(0x2B)
46#define DLN2_SPI_SS_VARIABLE_DISABLE DLN2_SPI_CMD(0x2C)
47#define DLN2_SPI_SS_VARIABLE_IS_ENABLED DLN2_SPI_CMD(0x2D)
48#define DLN2_SPI_SS_AAT_ENABLE DLN2_SPI_CMD(0x2E)
49#define DLN2_SPI_SS_AAT_DISABLE DLN2_SPI_CMD(0x2F)
50#define DLN2_SPI_SS_AAT_IS_ENABLED DLN2_SPI_CMD(0x30)
51#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE DLN2_SPI_CMD(0x31)
52#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE DLN2_SPI_CMD(0x32)
53#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED DLN2_SPI_CMD(0x33)
54#define DLN2_SPI_SET_CPHA DLN2_SPI_CMD(0x34)
55#define DLN2_SPI_GET_CPHA DLN2_SPI_CMD(0x35)
56#define DLN2_SPI_SET_CPOL DLN2_SPI_CMD(0x36)
57#define DLN2_SPI_GET_CPOL DLN2_SPI_CMD(0x37)
58#define DLN2_SPI_SS_MULTI_ENABLE DLN2_SPI_CMD(0x38)
59#define DLN2_SPI_SS_MULTI_DISABLE DLN2_SPI_CMD(0x39)
60#define DLN2_SPI_SS_MULTI_IS_ENABLED DLN2_SPI_CMD(0x3A)
61#define DLN2_SPI_GET_SUPPORTED_MODES DLN2_SPI_CMD(0x40)
62#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES DLN2_SPI_CMD(0x41)
63#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES DLN2_SPI_CMD(0x42)
64#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES DLN2_SPI_CMD(0x43)
65#define DLN2_SPI_GET_SS_COUNT DLN2_SPI_CMD(0x44)
66#define DLN2_SPI_GET_MIN_FREQUENCY DLN2_SPI_CMD(0x45)
67#define DLN2_SPI_GET_MAX_FREQUENCY DLN2_SPI_CMD(0x46)
68#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x47)
69#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x48)
70#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS DLN2_SPI_CMD(0x49)
71#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS DLN2_SPI_CMD(0x4A)
72#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4B)
73#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4C)
74
75#define DLN2_SPI_MAX_XFER_SIZE 256
76#define DLN2_SPI_BUF_SIZE (DLN2_SPI_MAX_XFER_SIZE + 16)
77#define DLN2_SPI_ATTR_LEAVE_SS_LOW BIT(0)
78#define DLN2_TRANSFERS_WAIT_COMPLETE 1
79#define DLN2_TRANSFERS_CANCEL 0
80#define DLN2_RPM_AUTOSUSPEND_TIMEOUT 2000
81
82struct dln2_spi {
83 struct platform_device *pdev;
84 struct spi_master *master;
85 u8 port;
86
87 /*
88 * This buffer will be used mainly for read/write operations. Since
89 * they're quite large, we cannot use the stack. Protection is not
90 * needed because all SPI communication is serialized by the SPI core.
91 */
92 void *buf;
93
94 u8 bpw;
95 u32 speed;
96 u16 mode;
97 u8 cs;
98};
99
100/*
101 * Enable/Disable SPI module. The disable command will wait for transfers to
102 * complete first.
103 */
104static int dln2_spi_enable(struct dln2_spi *dln2, bool enable)
105{
106 u16 cmd;
107 struct {
108 u8 port;
109 u8 wait_for_completion;
110 } tx;
111 unsigned len = sizeof(tx);
112
113 tx.port = dln2->port;
114
115 if (enable) {
116 cmd = DLN2_SPI_ENABLE;
117 len -= sizeof(tx.wait_for_completion);
118 } else {
119 tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE;
120 cmd = DLN2_SPI_DISABLE;
121 }
122
123 return dln2_transfer_tx(dln2->pdev, cmd, &tx, len);
124}
125
126/*
127 * Select/unselect multiple CS lines. The selected lines will be automatically
128 * toggled LOW/HIGH by the board firmware during transfers, provided they're
129 * enabled first.
130 *
131 * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation
132 * will toggle the lines LOW/HIGH automatically.
133 */
134static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask)
135{
136 struct {
137 u8 port;
138 u8 cs;
139 } tx;
140
141 tx.port = dln2->port;
142
143 /*
144 * According to Diolan docs, "a slave device can be selected by changing
145 * the corresponding bit value to 0". The rest must be set to 1. Hence
146 * the bitwise NOT in front.
147 */
148 tx.cs = ~cs_mask;
149
150 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx));
151}
152
153/*
154 * Select one CS line. The other lines will be un-selected.
155 */
156static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs)
157{
158 return dln2_spi_cs_set(dln2, BIT(cs));
159}
160
161/*
162 * Enable/disable CS lines for usage. The module has to be disabled first.
163 */
164static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
165{
166 struct {
167 u8 port;
168 u8 cs;
169 } tx;
170 u16 cmd;
171
172 tx.port = dln2->port;
173 tx.cs = cs_mask;
174 cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE;
175
176 return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
177}
178
179static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
180{
181 u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0);
182
183 return dln2_spi_cs_enable(dln2, cs_mask, enable);
184}
185
186static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num)
187{
188 int ret;
189 struct {
190 u8 port;
191 } tx;
192 struct {
193 __le16 cs_count;
194 } rx;
195 unsigned rx_len = sizeof(rx);
196
197 tx.port = dln2->port;
198 ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx),
199 &rx, &rx_len);
200 if (ret < 0)
201 return ret;
202 if (rx_len < sizeof(rx))
203 return -EPROTO;
204
205 *cs_num = le16_to_cpu(rx.cs_count);
206
207 dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num);
208
209 return 0;
210}
211
212static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq)
213{
214 int ret;
215 struct {
216 u8 port;
217 } tx;
218 struct {
219 __le32 speed;
220 } rx;
221 unsigned rx_len = sizeof(rx);
222
223 tx.port = dln2->port;
224
225 ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len);
226 if (ret < 0)
227 return ret;
228 if (rx_len < sizeof(rx))
229 return -EPROTO;
230
231 *freq = le32_to_cpu(rx.speed);
232
233 return 0;
234}
235
236/*
237 * Get bus min/max frequencies.
238 */
239static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax)
240{
241 int ret;
242
243 ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin);
244 if (ret < 0)
245 return ret;
246
247 ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax);
248 if (ret < 0)
249 return ret;
250
251 dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n",
252 *fmin, *fmax);
253
254 return 0;
255}
256
257/*
258 * Set the bus speed. The module will automatically round down to the closest
259 * available frequency and returns it. The module has to be disabled first.
260 */
261static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed)
262{
263 int ret;
264 struct {
265 u8 port;
266 __le32 speed;
267 } __packed tx;
268 struct {
269 __le32 speed;
270 } rx;
271 int rx_len = sizeof(rx);
272
273 tx.port = dln2->port;
274 tx.speed = cpu_to_le32(speed);
275
276 ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx),
277 &rx, &rx_len);
278 if (ret < 0)
279 return ret;
280 if (rx_len < sizeof(rx))
281 return -EPROTO;
282
283 return 0;
284}
285
286/*
287 * Change CPOL & CPHA. The module has to be disabled first.
288 */
289static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode)
290{
291 struct {
292 u8 port;
293 u8 mode;
294 } tx;
295
296 tx.port = dln2->port;
297 tx.mode = mode;
298
299 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx));
300}
301
302/*
303 * Change frame size. The module has to be disabled first.
304 */
305static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw)
306{
307 struct {
308 u8 port;
309 u8 bpw;
310 } tx;
311
312 tx.port = dln2->port;
313 tx.bpw = bpw;
314
315 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE,
316 &tx, sizeof(tx));
317}
318
319static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2,
320 u32 *bpw_mask)
321{
322 int ret;
323 struct {
324 u8 port;
325 } tx;
326 struct {
327 u8 count;
328 u8 frame_sizes[36];
329 } *rx = dln2->buf;
330 unsigned rx_len = sizeof(*rx);
331 int i;
332
333 tx.port = dln2->port;
334
335 ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES,
336 &tx, sizeof(tx), rx, &rx_len);
337 if (ret < 0)
338 return ret;
339 if (rx_len < sizeof(*rx))
340 return -EPROTO;
341 if (rx->count > ARRAY_SIZE(rx->frame_sizes))
342 return -EPROTO;
343
344 *bpw_mask = 0;
345 for (i = 0; i < rx->count; i++)
346 *bpw_mask |= BIT(rx->frame_sizes[i] - 1);
347
348 dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask);
349
350 return 0;
351}
352
353/*
354 * Copy the data to DLN2 buffer and change the byte order to LE, requested by
355 * DLN2 module. SPI core makes sure that the data length is a multiple of word
356 * size.
357 */
358static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw)
359{
360#ifdef __LITTLE_ENDIAN
361 memcpy(dln2_buf, src, len);
362#else
363 if (bpw <= 8) {
364 memcpy(dln2_buf, src, len);
365 } else if (bpw <= 16) {
366 __le16 *d = (__le16 *)dln2_buf;
367 u16 *s = (u16 *)src;
368
369 len = len / 2;
370 while (len--)
371 *d++ = cpu_to_le16p(s++);
372 } else {
373 __le32 *d = (__le32 *)dln2_buf;
374 u32 *s = (u32 *)src;
375
376 len = len / 4;
377 while (len--)
378 *d++ = cpu_to_le32p(s++);
379 }
380#endif
381
382 return 0;
383}
384
385/*
386 * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2
387 * buffer is LE ordered. SPI core makes sure that the data length is a multiple
388 * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make
389 * sure we avoid unaligned accesses for 32 bit case.
390 */
391static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw)
392{
393#ifdef __LITTLE_ENDIAN
394 memcpy(dest, dln2_buf, len);
395#else
396 if (bpw <= 8) {
397 memcpy(dest, dln2_buf, len);
398 } else if (bpw <= 16) {
399 u16 *d = (u16 *)dest;
400 __le16 *s = (__le16 *)dln2_buf;
401
402 len = len / 2;
403 while (len--)
404 *d++ = le16_to_cpup(s++);
405 } else {
406 u32 *d = (u32 *)dest;
407 __le32 *s = (__le32 *)dln2_buf;
408
409 len = len / 4;
410 while (len--)
411 *d++ = get_unaligned_le32(s++);
412 }
413#endif
414
415 return 0;
416}
417
418/*
419 * Perform one write operation.
420 */
421static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data,
422 u16 data_len, u8 attr)
423{
424 struct {
425 u8 port;
426 __le16 size;
427 u8 attr;
428 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
429 } __packed *tx = dln2->buf;
430 unsigned tx_len;
431
432 BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE);
433
434 if (data_len > DLN2_SPI_MAX_XFER_SIZE)
435 return -EINVAL;
436
437 tx->port = dln2->port;
438 tx->size = cpu_to_le16(data_len);
439 tx->attr = attr;
440
441 dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw);
442
443 tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
444 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len);
445}
446
447/*
448 * Perform one read operation.
449 */
450static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data,
451 u16 data_len, u8 attr)
452{
453 int ret;
454 struct {
455 u8 port;
456 __le16 size;
457 u8 attr;
458 } __packed tx;
459 struct {
460 __le16 size;
461 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
462 } __packed *rx = dln2->buf;
463 unsigned rx_len = sizeof(*rx);
464
465 BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE);
466
467 if (data_len > DLN2_SPI_MAX_XFER_SIZE)
468 return -EINVAL;
469
470 tx.port = dln2->port;
471 tx.size = cpu_to_le16(data_len);
472 tx.attr = attr;
473
474 ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx),
475 rx, &rx_len);
476 if (ret < 0)
477 return ret;
478 if (rx_len < sizeof(rx->size) + data_len)
479 return -EPROTO;
480 if (le16_to_cpu(rx->size) != data_len)
481 return -EPROTO;
482
483 dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw);
484
485 return 0;
486}
487
488/*
489 * Perform one write & read operation.
490 */
491static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data,
492 u8 *rx_data, u16 data_len, u8 attr)
493{
494 int ret;
495 struct {
496 u8 port;
497 __le16 size;
498 u8 attr;
499 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
500 } __packed *tx;
501 struct {
502 __le16 size;
503 u8 buf[DLN2_SPI_MAX_XFER_SIZE];
504 } __packed *rx;
505 unsigned tx_len, rx_len;
506
507 BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE ||
508 sizeof(*rx) > DLN2_SPI_BUF_SIZE);
509
510 if (data_len > DLN2_SPI_MAX_XFER_SIZE)
511 return -EINVAL;
512
513 /*
514 * Since this is a pseudo full-duplex communication, we're perfectly
515 * safe to use the same buffer for both tx and rx. When DLN2 sends the
516 * response back, with the rx data, we don't need the tx buffer anymore.
517 */
518 tx = dln2->buf;
519 rx = dln2->buf;
520
521 tx->port = dln2->port;
522 tx->size = cpu_to_le16(data_len);
523 tx->attr = attr;
524
525 dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw);
526
527 tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
528 rx_len = sizeof(*rx);
529
530 ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len,
531 rx, &rx_len);
532 if (ret < 0)
533 return ret;
534 if (rx_len < sizeof(rx->size) + data_len)
535 return -EPROTO;
536 if (le16_to_cpu(rx->size) != data_len)
537 return -EPROTO;
538
539 dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw);
540
541 return 0;
542}
543
544/*
545 * Read/Write wrapper. It will automatically split an operation into multiple
546 * single ones due to device buffer constraints.
547 */
548static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
549 u8 *rx_data, u16 data_len, u8 attr) {
550 int ret;
551 u16 len;
552 u8 temp_attr;
553 u16 remaining = data_len;
554 u16 offset;
555
556 do {
557 if (remaining > DLN2_SPI_MAX_XFER_SIZE) {
558 len = DLN2_SPI_MAX_XFER_SIZE;
559 temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
560 } else {
561 len = remaining;
562 temp_attr = attr;
563 }
564
565 offset = data_len - remaining;
566
567 if (tx_data && rx_data) {
568 ret = dln2_spi_read_write_one(dln2,
569 tx_data + offset,
570 rx_data + offset,
571 len, temp_attr);
572 } else if (tx_data) {
573 ret = dln2_spi_write_one(dln2,
574 tx_data + offset,
575 len, temp_attr);
576 } else if (rx_data) {
577 ret = dln2_spi_read_one(dln2,
578 rx_data + offset,
579 len, temp_attr);
580 } else {
581 return -EINVAL;
582 }
583
584 if (ret < 0)
585 return ret;
586
587 remaining -= len;
588 } while (remaining);
589
590 return 0;
591}
592
593static int dln2_spi_prepare_message(struct spi_master *master,
594 struct spi_message *message)
595{
596 int ret;
597 struct dln2_spi *dln2 = spi_master_get_devdata(master);
598 struct spi_device *spi = message->spi;
599
600 if (dln2->cs != spi->chip_select) {
601 ret = dln2_spi_cs_set_one(dln2, spi->chip_select);
602 if (ret < 0)
603 return ret;
604
605 dln2->cs = spi->chip_select;
606 }
607
608 return 0;
609}
610
611static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
612 u8 bpw, u8 mode)
613{
614 int ret;
615 bool bus_setup_change;
616
617 bus_setup_change = dln2->speed != speed || dln2->mode != mode ||
618 dln2->bpw != bpw;
619
620 if (!bus_setup_change)
621 return 0;
622
623 ret = dln2_spi_enable(dln2, false);
624 if (ret < 0)
625 return ret;
626
627 if (dln2->speed != speed) {
628 ret = dln2_spi_set_speed(dln2, speed);
629 if (ret < 0)
630 return ret;
631
632 dln2->speed = speed;
633 }
634
635 if (dln2->mode != mode) {
636 ret = dln2_spi_set_mode(dln2, mode & 0x3);
637 if (ret < 0)
638 return ret;
639
640 dln2->mode = mode;
641 }
642
643 if (dln2->bpw != bpw) {
644 ret = dln2_spi_set_bpw(dln2, bpw);
645 if (ret < 0)
646 return ret;
647
648 dln2->bpw = bpw;
649 }
650
651 return dln2_spi_enable(dln2, true);
652}
653
654static int dln2_spi_transfer_one(struct spi_master *master,
655 struct spi_device *spi,
656 struct spi_transfer *xfer)
657{
658 struct dln2_spi *dln2 = spi_master_get_devdata(master);
659 int status;
660 u8 attr = 0;
661
662 status = dln2_spi_transfer_setup(dln2, xfer->speed_hz,
663 xfer->bits_per_word,
664 spi->mode);
665 if (status < 0) {
666 dev_err(&dln2->pdev->dev, "Cannot setup transfer\n");
667 return status;
668 }
669
670 if (!xfer->cs_change && !spi_transfer_is_last(master, xfer))
671 attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
672
673 status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
674 xfer->len, attr);
675 if (status < 0)
676 dev_err(&dln2->pdev->dev, "write/read failed!\n");
677
678 return status;
679}
680
681static int dln2_spi_probe(struct platform_device *pdev)
682{
683 struct spi_master *master;
684 struct dln2_spi *dln2;
685 struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
686 int ret;
687
688 master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
689 if (!master)
690 return -ENOMEM;
691
692 platform_set_drvdata(pdev, master);
693
694 dln2 = spi_master_get_devdata(master);
695
696 dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
697 if (!dln2->buf) {
698 ret = -ENOMEM;
699 goto exit_free_master;
700 }
701
702 dln2->master = master;
703 dln2->pdev = pdev;
704 dln2->port = pdata->port;
705 /* cs/mode can never be 0xff, so the first transfer will set them */
706 dln2->cs = 0xff;
707 dln2->mode = 0xff;
708
709 /* disable SPI module before continuing with the setup */
710 ret = dln2_spi_enable(dln2, false);
711 if (ret < 0) {
712 dev_err(&pdev->dev, "Failed to disable SPI module\n");
713 goto exit_free_master;
714 }
715
716 ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect);
717 if (ret < 0) {
718 dev_err(&pdev->dev, "Failed to get number of CS pins\n");
719 goto exit_free_master;
720 }
721
722 ret = dln2_spi_get_speed_range(dln2,
723 &master->min_speed_hz,
724 &master->max_speed_hz);
725 if (ret < 0) {
726 dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
727 goto exit_free_master;
728 }
729
730 ret = dln2_spi_get_supported_frame_sizes(dln2,
731 &master->bits_per_word_mask);
732 if (ret < 0) {
733 dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
734 goto exit_free_master;
735 }
736
737 ret = dln2_spi_cs_enable_all(dln2, true);
738 if (ret < 0) {
739 dev_err(&pdev->dev, "Failed to enable CS pins\n");
740 goto exit_free_master;
741 }
742
743 master->bus_num = -1;
744 master->mode_bits = SPI_CPOL | SPI_CPHA;
745 master->prepare_message = dln2_spi_prepare_message;
746 master->transfer_one = dln2_spi_transfer_one;
747 master->auto_runtime_pm = true;
748
749 /* enable SPI module, we're good to go */
750 ret = dln2_spi_enable(dln2, true);
751 if (ret < 0) {
752 dev_err(&pdev->dev, "Failed to enable SPI module\n");
753 goto exit_free_master;
754 }
755
756 pm_runtime_set_autosuspend_delay(&pdev->dev,
757 DLN2_RPM_AUTOSUSPEND_TIMEOUT);
758 pm_runtime_use_autosuspend(&pdev->dev);
759 pm_runtime_set_active(&pdev->dev);
760 pm_runtime_enable(&pdev->dev);
761
762 ret = devm_spi_register_master(&pdev->dev, master);
763 if (ret < 0) {
764 dev_err(&pdev->dev, "Failed to register master\n");
765 goto exit_register;
766 }
767
768 return ret;
769
770exit_register:
771 pm_runtime_disable(&pdev->dev);
772 pm_runtime_set_suspended(&pdev->dev);
773
774 if (dln2_spi_enable(dln2, false) < 0)
775 dev_err(&pdev->dev, "Failed to disable SPI module\n");
776exit_free_master:
777 spi_master_put(master);
778
779 return ret;
780}
781
782static int dln2_spi_remove(struct platform_device *pdev)
783{
784 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
785 struct dln2_spi *dln2 = spi_master_get_devdata(master);
786
787 pm_runtime_disable(&pdev->dev);
788
789 if (dln2_spi_enable(dln2, false) < 0)
790 dev_err(&pdev->dev, "Failed to disable SPI module\n");
791
792 return 0;
793}
794
795#ifdef CONFIG_PM_SLEEP
796static int dln2_spi_suspend(struct device *dev)
797{
798 int ret;
799 struct spi_master *master = dev_get_drvdata(dev);
800 struct dln2_spi *dln2 = spi_master_get_devdata(master);
801
802 ret = spi_master_suspend(master);
803 if (ret < 0)
804 return ret;
805
806 if (!pm_runtime_suspended(dev)) {
807 ret = dln2_spi_enable(dln2, false);
808 if (ret < 0)
809 return ret;
810 }
811
812 /*
813 * USB power may be cut off during sleep. Resetting the following
814 * parameters will force the board to be set up before first transfer.
815 */
816 dln2->cs = 0xff;
817 dln2->speed = 0;
818 dln2->bpw = 0;
819 dln2->mode = 0xff;
820
821 return 0;
822}
823
824static int dln2_spi_resume(struct device *dev)
825{
826 int ret;
827 struct spi_master *master = dev_get_drvdata(dev);
828 struct dln2_spi *dln2 = spi_master_get_devdata(master);
829
830 if (!pm_runtime_suspended(dev)) {
831 ret = dln2_spi_cs_enable_all(dln2, true);
832 if (ret < 0)
833 return ret;
834
835 ret = dln2_spi_enable(dln2, true);
836 if (ret < 0)
837 return ret;
838 }
839
840 return spi_master_resume(master);
841}
842#endif /* CONFIG_PM_SLEEP */
843
844#ifdef CONFIG_PM
845static int dln2_spi_runtime_suspend(struct device *dev)
846{
847 struct spi_master *master = dev_get_drvdata(dev);
848 struct dln2_spi *dln2 = spi_master_get_devdata(master);
849
850 return dln2_spi_enable(dln2, false);
851}
852
853static int dln2_spi_runtime_resume(struct device *dev)
854{
855 struct spi_master *master = dev_get_drvdata(dev);
856 struct dln2_spi *dln2 = spi_master_get_devdata(master);
857
858 return dln2_spi_enable(dln2, true);
859}
860#endif /* CONFIG_PM */
861
862static const struct dev_pm_ops dln2_spi_pm = {
863 SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume)
864 SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend,
865 dln2_spi_runtime_resume, NULL)
866};
867
868static struct platform_driver spi_dln2_driver = {
869 .driver = {
870 .name = "dln2-spi",
871 .pm = &dln2_spi_pm,
872 },
873 .probe = dln2_spi_probe,
874 .remove = dln2_spi_remove,
875};
876module_platform_driver(spi_dln2_driver);
877
878MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface");
879MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
880MODULE_LICENSE("GPL v2");
881MODULE_ALIAS("platform:dln2-spi");
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index a67d37c7e3c0..a0197fd4e95c 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -247,9 +247,9 @@ static struct dw_spi_dma_ops mid_dma_ops = {
247 247
248/* Some specific info for SPI0 controller on Intel MID */ 248/* Some specific info for SPI0 controller on Intel MID */
249 249
250/* HW info for MRST CLk Control Unit, one 32b reg */ 250/* HW info for MRST Clk Control Unit, 32b reg per controller */
251#define MRST_SPI_CLK_BASE 100000000 /* 100m */ 251#define MRST_SPI_CLK_BASE 100000000 /* 100m */
252#define MRST_CLK_SPI0_REG 0xff11d86c 252#define MRST_CLK_SPI_REG 0xff11d86c
253#define CLK_SPI_BDIV_OFFSET 0 253#define CLK_SPI_BDIV_OFFSET 0
254#define CLK_SPI_BDIV_MASK 0x00000007 254#define CLK_SPI_BDIV_MASK 0x00000007
255#define CLK_SPI_CDIV_OFFSET 9 255#define CLK_SPI_CDIV_OFFSET 9
@@ -261,16 +261,17 @@ int dw_spi_mid_init(struct dw_spi *dws)
261 void __iomem *clk_reg; 261 void __iomem *clk_reg;
262 u32 clk_cdiv; 262 u32 clk_cdiv;
263 263
264 clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); 264 clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
265 if (!clk_reg) 265 if (!clk_reg)
266 return -ENOMEM; 266 return -ENOMEM;
267 267
268 /* get SPI controller operating freq info */ 268 /* Get SPI controller operating freq info */
269 clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; 269 clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
270 clk_cdiv &= CLK_SPI_CDIV_MASK;
271 clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
270 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); 272 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
271 iounmap(clk_reg);
272 273
273 dws->num_cs = 16; 274 iounmap(clk_reg);
274 275
275#ifdef CONFIG_SPI_DW_MID_DMA 276#ifdef CONFIG_SPI_DW_MID_DMA
276 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); 277 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index ba68da12cdf0..5ba331047cbe 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -30,10 +30,20 @@ struct dw_spi_pci {
30 30
31struct spi_pci_desc { 31struct spi_pci_desc {
32 int (*setup)(struct dw_spi *); 32 int (*setup)(struct dw_spi *);
33 u16 num_cs;
34 u16 bus_num;
33}; 35};
34 36
35static struct spi_pci_desc spi_pci_mid_desc = { 37static struct spi_pci_desc spi_pci_mid_desc_1 = {
36 .setup = dw_spi_mid_init, 38 .setup = dw_spi_mid_init,
39 .num_cs = 32,
40 .bus_num = 0,
41};
42
43static struct spi_pci_desc spi_pci_mid_desc_2 = {
44 .setup = dw_spi_mid_init,
45 .num_cs = 4,
46 .bus_num = 1,
37}; 47};
38 48
39static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 49static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -65,18 +75,23 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
65 75
66 dws->regs = pcim_iomap_table(pdev)[pci_bar]; 76 dws->regs = pcim_iomap_table(pdev)[pci_bar];
67 77
68 dws->bus_num = 0;
69 dws->num_cs = 4;
70 dws->irq = pdev->irq; 78 dws->irq = pdev->irq;
71 79
72 /* 80 /*
73 * Specific handling for paltforms, like dma setup, 81 * Specific handling for paltforms, like dma setup,
74 * clock rate, FIFO depth. 82 * clock rate, FIFO depth.
75 */ 83 */
76 if (desc && desc->setup) { 84 if (desc) {
77 ret = desc->setup(dws); 85 dws->num_cs = desc->num_cs;
78 if (ret) 86 dws->bus_num = desc->bus_num;
79 return ret; 87
88 if (desc->setup) {
89 ret = desc->setup(dws);
90 if (ret)
91 return ret;
92 }
93 } else {
94 return -ENODEV;
80 } 95 }
81 96
82 ret = dw_spi_add_host(&pdev->dev, dws); 97 ret = dw_spi_add_host(&pdev->dev, dws);
@@ -121,7 +136,14 @@ static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume);
121 136
122static const struct pci_device_id pci_ids[] = { 137static const struct pci_device_id pci_ids[] = {
123 /* Intel MID platform SPI controller 0 */ 138 /* Intel MID platform SPI controller 0 */
124 { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc}, 139 /*
140 * The access to the device 8086:0801 is disabled by HW, since it's
141 * exclusively used by SCU to communicate with MSIC.
142 */
143 /* Intel MID platform SPI controller 1 */
144 { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
145 /* Intel MID platform SPI controller 2 */
146 { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
125 {}, 147 {},
126}; 148};
127 149
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 8edcd1b84562..5a97a62b298a 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -608,7 +608,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
608} 608}
609 609
610/* Restart the controller, disable all interrupts, clean rx fifo */ 610/* Restart the controller, disable all interrupts, clean rx fifo */
611static void spi_hw_init(struct dw_spi *dws) 611static void spi_hw_init(struct device *dev, struct dw_spi *dws)
612{ 612{
613 spi_enable_chip(dws, 0); 613 spi_enable_chip(dws, 0);
614 spi_mask_intr(dws, 0xff); 614 spi_mask_intr(dws, 0xff);
@@ -626,9 +626,10 @@ static void spi_hw_init(struct dw_spi *dws)
626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) 626 if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
627 break; 627 break;
628 } 628 }
629 dw_writew(dws, DW_SPI_TXFLTR, 0);
629 630
630 dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; 631 dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
631 dw_writew(dws, DW_SPI_TXFLTR, 0); 632 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
632 } 633 }
633} 634}
634 635
@@ -668,7 +669,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
668 master->dev.of_node = dev->of_node; 669 master->dev.of_node = dev->of_node;
669 670
670 /* Basic HW init */ 671 /* Basic HW init */
671 spi_hw_init(dws); 672 spi_hw_init(dev, dws);
672 673
673 if (dws->dma_ops && dws->dma_ops->dma_init) { 674 if (dws->dma_ops && dws->dma_ops->dma_init) {
674 ret = dws->dma_ops->dma_init(dws); 675 ret = dws->dma_ops->dma_init(dws);
@@ -731,7 +732,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
731{ 732{
732 int ret; 733 int ret;
733 734
734 spi_hw_init(dws); 735 spi_hw_init(&dws->master->dev, dws);
735 ret = spi_master_resume(dws->master); 736 ret = spi_master_resume(dws->master);
736 if (ret) 737 if (ret)
737 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); 738 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 912b9037e9cf..286b2c81fc6b 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -353,16 +353,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
353 return 0; 353 return 0;
354} 354}
355 355
356static int falcon_sflash_prepare_xfer(struct spi_master *master)
357{
358 return 0;
359}
360
361static int falcon_sflash_unprepare_xfer(struct spi_master *master)
362{
363 return 0;
364}
365
366static int falcon_sflash_xfer_one(struct spi_master *master, 356static int falcon_sflash_xfer_one(struct spi_master *master,
367 struct spi_message *m) 357 struct spi_message *m)
368{ 358{
@@ -420,9 +410,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
420 master->mode_bits = SPI_MODE_3; 410 master->mode_bits = SPI_MODE_3;
421 master->flags = SPI_MASTER_HALF_DUPLEX; 411 master->flags = SPI_MASTER_HALF_DUPLEX;
422 master->setup = falcon_sflash_setup; 412 master->setup = falcon_sflash_setup;
423 master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
424 master->transfer_one_message = falcon_sflash_xfer_one; 413 master->transfer_one_message = falcon_sflash_xfer_one;
425 master->unprepare_transfer_hardware = falcon_sflash_unprepare_xfer;
426 master->dev.of_node = pdev->dev.of_node; 414 master->dev.of_node = pdev->dev.of_node;
427 415
428 ret = devm_spi_register_master(&pdev->dev, master); 416 ret = devm_spi_register_master(&pdev->dev, master);
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index e85ab1cb17a2..9c46a3058743 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -20,6 +20,7 @@
20#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
21#include <linux/fsl_devices.h> 21#include <linux/fsl_devices.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h>
23#include <linux/of_address.h> 24#include <linux/of_address.h>
24#include <linux/spi/spi.h> 25#include <linux/spi/spi.h>
25#include <linux/types.h> 26#include <linux/types.h>
@@ -68,6 +69,7 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
68 } 69 }
69 } 70 }
70} 71}
72EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
71 73
72static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) 74static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
73{ 75{
@@ -162,6 +164,7 @@ err_rx_dma:
162 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); 164 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
163 return -ENOMEM; 165 return -ENOMEM;
164} 166}
167EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
165 168
166void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) 169void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
167{ 170{
@@ -174,6 +177,7 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
174 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); 177 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
175 mspi->xfer_in_progress = NULL; 178 mspi->xfer_in_progress = NULL;
176} 179}
180EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
177 181
178void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) 182void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
179{ 183{
@@ -198,6 +202,7 @@ void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
198 else 202 else
199 complete(&mspi->done); 203 complete(&mspi->done);
200} 204}
205EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
201 206
202static void *fsl_spi_alloc_dummy_rx(void) 207static void *fsl_spi_alloc_dummy_rx(void)
203{ 208{
@@ -375,6 +380,7 @@ err_pram:
375 fsl_spi_free_dummy_rx(); 380 fsl_spi_free_dummy_rx();
376 return -ENOMEM; 381 return -ENOMEM;
377} 382}
383EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
378 384
379void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) 385void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
380{ 386{
@@ -389,3 +395,6 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
389 cpm_muram_free(cpm_muram_offset(mspi->pram)); 395 cpm_muram_free(cpm_muram_offset(mspi->pram));
390 fsl_spi_free_dummy_rx(); 396 fsl_spi_free_dummy_rx();
391} 397}
398EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
399
400MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 9b80d54d4ddb..d1a39249704a 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -106,7 +106,7 @@ struct chip_data {
106}; 106};
107 107
108struct fsl_dspi { 108struct fsl_dspi {
109 struct spi_bitbang bitbang; 109 struct spi_master *master;
110 struct platform_device *pdev; 110 struct platform_device *pdev;
111 111
112 struct regmap *regmap; 112 struct regmap *regmap;
@@ -114,6 +114,7 @@ struct fsl_dspi {
114 struct clk *clk; 114 struct clk *clk;
115 115
116 struct spi_transfer *cur_transfer; 116 struct spi_transfer *cur_transfer;
117 struct spi_message *cur_msg;
117 struct chip_data *cur_chip; 118 struct chip_data *cur_chip;
118 size_t len; 119 size_t len;
119 void *tx; 120 void *tx;
@@ -123,6 +124,7 @@ struct fsl_dspi {
123 char dataflags; 124 char dataflags;
124 u8 cs; 125 u8 cs;
125 u16 void_write_data; 126 u16 void_write_data;
127 u32 cs_change;
126 128
127 wait_queue_head_t waitq; 129 wait_queue_head_t waitq;
128 u32 waitflags; 130 u32 waitflags;
@@ -225,6 +227,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
225 if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) { 227 if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
226 /* last transfer in the transfer */ 228 /* last transfer in the transfer */
227 dspi_pushr |= SPI_PUSHR_EOQ; 229 dspi_pushr |= SPI_PUSHR_EOQ;
230 if ((dspi->cs_change) && (!dspi->len))
231 dspi_pushr &= ~SPI_PUSHR_CONT;
228 } else if (tx_word && (dspi->len == 1)) 232 } else if (tx_word && (dspi->len == 1))
229 dspi_pushr |= SPI_PUSHR_EOQ; 233 dspi_pushr |= SPI_PUSHR_EOQ;
230 234
@@ -246,6 +250,7 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
246 int rx_count = 0; 250 int rx_count = 0;
247 int rx_word = is_double_byte_mode(dspi); 251 int rx_word = is_double_byte_mode(dspi);
248 u16 d; 252 u16 d;
253
249 while ((dspi->rx < dspi->rx_end) 254 while ((dspi->rx < dspi->rx_end)
250 && (rx_count < DSPI_FIFO_SIZE)) { 255 && (rx_count < DSPI_FIFO_SIZE)) {
251 if (rx_word) { 256 if (rx_word) {
@@ -276,69 +281,79 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
276 return rx_count; 281 return rx_count;
277} 282}
278 283
279static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t) 284static int dspi_transfer_one_message(struct spi_master *master,
285 struct spi_message *message)
280{ 286{
281 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); 287 struct fsl_dspi *dspi = spi_master_get_devdata(master);
282 dspi->cur_transfer = t; 288 struct spi_device *spi = message->spi;
283 dspi->cur_chip = spi_get_ctldata(spi); 289 struct spi_transfer *transfer;
284 dspi->cs = spi->chip_select; 290 int status = 0;
285 dspi->void_write_data = dspi->cur_chip->void_write_data; 291 message->actual_length = 0;
286 292
287 dspi->dataflags = 0; 293 list_for_each_entry(transfer, &message->transfers, transfer_list) {
288 dspi->tx = (void *)t->tx_buf; 294 dspi->cur_transfer = transfer;
289 dspi->tx_end = dspi->tx + t->len; 295 dspi->cur_msg = message;
290 dspi->rx = t->rx_buf; 296 dspi->cur_chip = spi_get_ctldata(spi);
291 dspi->rx_end = dspi->rx + t->len; 297 dspi->cs = spi->chip_select;
292 dspi->len = t->len; 298 if (dspi->cur_transfer->transfer_list.next
293 299 == &dspi->cur_msg->transfers)
294 if (!dspi->rx) 300 transfer->cs_change = 1;
295 dspi->dataflags |= TRAN_STATE_RX_VOID; 301 dspi->cs_change = transfer->cs_change;
296 302 dspi->void_write_data = dspi->cur_chip->void_write_data;
297 if (!dspi->tx) 303
298 dspi->dataflags |= TRAN_STATE_TX_VOID; 304 dspi->dataflags = 0;
299 305 dspi->tx = (void *)transfer->tx_buf;
300 regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val); 306 dspi->tx_end = dspi->tx + transfer->len;
301 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val); 307 dspi->rx = transfer->rx_buf;
302 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); 308 dspi->rx_end = dspi->rx + transfer->len;
303 309 dspi->len = transfer->len;
304 if (t->speed_hz) 310
311 if (!dspi->rx)
312 dspi->dataflags |= TRAN_STATE_RX_VOID;
313
314 if (!dspi->tx)
315 dspi->dataflags |= TRAN_STATE_TX_VOID;
316
317 regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
318 regmap_update_bits(dspi->regmap, SPI_MCR,
319 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
320 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
305 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), 321 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
306 dspi->cur_chip->ctar_val); 322 dspi->cur_chip->ctar_val);
323 if (transfer->speed_hz)
324 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
325 dspi->cur_chip->ctar_val);
307 326
308 dspi_transfer_write(dspi); 327 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
309 328 message->actual_length += dspi_transfer_write(dspi);
310 if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
311 dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
312 dspi->waitflags = 0;
313
314 return t->len - dspi->len;
315}
316 329
317static void dspi_chipselect(struct spi_device *spi, int value) 330 if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
318{ 331 dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
319 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); 332 dspi->waitflags = 0;
320 unsigned int pushr;
321 333
322 regmap_read(dspi->regmap, SPI_PUSHR, &pushr); 334 if (transfer->delay_usecs)
323 335 udelay(transfer->delay_usecs);
324 switch (value) {
325 case BITBANG_CS_ACTIVE:
326 pushr |= SPI_PUSHR_CONT;
327 break;
328 case BITBANG_CS_INACTIVE:
329 pushr &= ~SPI_PUSHR_CONT;
330 break;
331 } 336 }
332 337
333 regmap_write(dspi->regmap, SPI_PUSHR, pushr); 338 message->status = status;
339 spi_finalize_current_message(master);
340
341 return status;
334} 342}
335 343
336static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 344static int dspi_setup(struct spi_device *spi)
337{ 345{
338 struct chip_data *chip; 346 struct chip_data *chip;
339 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); 347 struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
340 unsigned char br = 0, pbr = 0, fmsz = 0; 348 unsigned char br = 0, pbr = 0, fmsz = 0;
341 349
350 if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
351 fmsz = spi->bits_per_word - 1;
352 } else {
353 pr_err("Invalid wordsize\n");
354 return -ENODEV;
355 }
356
342 /* Only alloc on first setup */ 357 /* Only alloc on first setup */
343 chip = spi_get_ctldata(spi); 358 chip = spi_get_ctldata(spi);
344 if (chip == NULL) { 359 if (chip == NULL) {
@@ -349,12 +364,6 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
349 364
350 chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | 365 chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
351 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; 366 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
352 if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
353 fmsz = spi->bits_per_word - 1;
354 } else {
355 pr_err("Invalid wordsize\n");
356 return -ENODEV;
357 }
358 367
359 chip->void_write_data = 0; 368 chip->void_write_data = 0;
360 369
@@ -373,14 +382,6 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
373 return 0; 382 return 0;
374} 383}
375 384
376static int dspi_setup(struct spi_device *spi)
377{
378 if (!spi->max_speed_hz)
379 return -EINVAL;
380
381 return dspi_setup_transfer(spi, NULL);
382}
383
384static void dspi_cleanup(struct spi_device *spi) 385static void dspi_cleanup(struct spi_device *spi)
385{ 386{
386 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); 387 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
@@ -395,22 +396,20 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
395{ 396{
396 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; 397 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
397 398
398 regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF); 399 struct spi_message *msg = dspi->cur_msg;
399 400
401 regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
400 dspi_transfer_read(dspi); 402 dspi_transfer_read(dspi);
401 403
402 if (!dspi->len) { 404 if (!dspi->len) {
403 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) 405 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
404 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 406 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
405 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16)); 407 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
406 408
407 dspi->waitflags = 1; 409 dspi->waitflags = 1;
408 wake_up_interruptible(&dspi->waitq); 410 wake_up_interruptible(&dspi->waitq);
409 } else { 411 } else
410 dspi_transfer_write(dspi); 412 msg->actual_length += dspi_transfer_write(dspi);
411
412 return IRQ_HANDLED;
413 }
414 413
415 return IRQ_HANDLED; 414 return IRQ_HANDLED;
416} 415}
@@ -469,12 +468,12 @@ static int dspi_probe(struct platform_device *pdev)
469 468
470 dspi = spi_master_get_devdata(master); 469 dspi = spi_master_get_devdata(master);
471 dspi->pdev = pdev; 470 dspi->pdev = pdev;
472 dspi->bitbang.master = master; 471 dspi->master = master;
473 dspi->bitbang.chipselect = dspi_chipselect; 472
474 dspi->bitbang.setup_transfer = dspi_setup_transfer; 473 master->transfer = NULL;
475 dspi->bitbang.txrx_bufs = dspi_txrx_transfer; 474 master->setup = dspi_setup;
476 dspi->bitbang.master->setup = dspi_setup; 475 master->transfer_one_message = dspi_transfer_one_message;
477 dspi->bitbang.master->dev.of_node = pdev->dev.of_node; 476 master->dev.of_node = pdev->dev.of_node;
478 477
479 master->cleanup = dspi_cleanup; 478 master->cleanup = dspi_cleanup;
480 master->mode_bits = SPI_CPOL | SPI_CPHA; 479 master->mode_bits = SPI_CPOL | SPI_CPHA;
@@ -535,7 +534,7 @@ static int dspi_probe(struct platform_device *pdev)
535 init_waitqueue_head(&dspi->waitq); 534 init_waitqueue_head(&dspi->waitq);
536 platform_set_drvdata(pdev, master); 535 platform_set_drvdata(pdev, master);
537 536
538 ret = spi_bitbang_start(&dspi->bitbang); 537 ret = spi_register_master(master);
539 if (ret != 0) { 538 if (ret != 0) {
540 dev_err(&pdev->dev, "Problem registering DSPI master\n"); 539 dev_err(&pdev->dev, "Problem registering DSPI master\n");
541 goto out_clk_put; 540 goto out_clk_put;
@@ -557,9 +556,9 @@ static int dspi_remove(struct platform_device *pdev)
557 struct fsl_dspi *dspi = spi_master_get_devdata(master); 556 struct fsl_dspi *dspi = spi_master_get_devdata(master);
558 557
559 /* Disconnect from the SPI framework */ 558 /* Disconnect from the SPI framework */
560 spi_bitbang_stop(&dspi->bitbang);
561 clk_disable_unprepare(dspi->clk); 559 clk_disable_unprepare(dspi->clk);
562 spi_master_put(dspi->bitbang.master); 560 spi_unregister_master(dspi->master);
561 spi_master_put(dspi->master);
563 562
564 return 0; 563 return 0;
565} 564}
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 446b737e1532..cb35d2f0d0e6 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/module.h>
24#include <linux/of_platform.h> 25#include <linux/of_platform.h>
25#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
26#ifdef CONFIG_FSL_SOC 27#ifdef CONFIG_FSL_SOC
@@ -35,7 +36,8 @@ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
35 type *rx = mpc8xxx_spi->rx; \ 36 type *rx = mpc8xxx_spi->rx; \
36 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ 37 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
37 mpc8xxx_spi->rx = rx; \ 38 mpc8xxx_spi->rx = rx; \
38} 39} \
40EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type);
39 41
40#define MPC8XXX_SPI_TX_BUF(type) \ 42#define MPC8XXX_SPI_TX_BUF(type) \
41u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ 43u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
@@ -47,7 +49,8 @@ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
47 data = *tx++ << mpc8xxx_spi->tx_shift; \ 49 data = *tx++ << mpc8xxx_spi->tx_shift; \
48 mpc8xxx_spi->tx = tx; \ 50 mpc8xxx_spi->tx = tx; \
49 return data; \ 51 return data; \
50} 52} \
53EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type);
51 54
52MPC8XXX_SPI_RX_BUF(u8) 55MPC8XXX_SPI_RX_BUF(u8)
53MPC8XXX_SPI_RX_BUF(u16) 56MPC8XXX_SPI_RX_BUF(u16)
@@ -60,6 +63,7 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
60{ 63{
61 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); 64 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
62} 65}
66EXPORT_SYMBOL_GPL(to_of_pinfo);
63 67
64const char *mpc8xxx_spi_strmode(unsigned int flags) 68const char *mpc8xxx_spi_strmode(unsigned int flags)
65{ 69{
@@ -75,6 +79,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
75 } 79 }
76 return "CPU"; 80 return "CPU";
77} 81}
82EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode);
78 83
79void mpc8xxx_spi_probe(struct device *dev, struct resource *mem, 84void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
80 unsigned int irq) 85 unsigned int irq)
@@ -102,13 +107,12 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
102 mpc8xxx_spi->rx_shift = 0; 107 mpc8xxx_spi->rx_shift = 0;
103 mpc8xxx_spi->tx_shift = 0; 108 mpc8xxx_spi->tx_shift = 0;
104 109
105 init_completion(&mpc8xxx_spi->done);
106
107 master->bus_num = pdata->bus_num; 110 master->bus_num = pdata->bus_num;
108 master->num_chipselect = pdata->max_chipselect; 111 master->num_chipselect = pdata->max_chipselect;
109 112
110 init_completion(&mpc8xxx_spi->done); 113 init_completion(&mpc8xxx_spi->done);
111} 114}
115EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
112 116
113int mpc8xxx_spi_remove(struct device *dev) 117int mpc8xxx_spi_remove(struct device *dev)
114{ 118{
@@ -127,6 +131,7 @@ int mpc8xxx_spi_remove(struct device *dev)
127 131
128 return 0; 132 return 0;
129} 133}
134EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove);
130 135
131int of_mpc8xxx_spi_probe(struct platform_device *ofdev) 136int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
132{ 137{
@@ -173,3 +178,6 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
173 178
174 return 0; 179 return 0;
175} 180}
181EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
182
183MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index b4ed04e8862f..1326a392adca 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -28,7 +28,7 @@ struct mpc8xxx_spi {
28 /* rx & tx bufs from the spi_transfer */ 28 /* rx & tx bufs from the spi_transfer */
29 const void *tx; 29 const void *tx;
30 void *rx; 30 void *rx;
31#ifdef CONFIG_SPI_FSL_ESPI 31#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
32 int len; 32 int len;
33#endif 33#endif
34 34
@@ -68,7 +68,7 @@ struct mpc8xxx_spi {
68 68
69 unsigned int flags; 69 unsigned int flags;
70 70
71#ifdef CONFIG_SPI_FSL_SPI 71#if IS_ENABLED(CONFIG_SPI_FSL_SPI)
72 int type; 72 int type;
73 int native_chipselects; 73 int native_chipselects;
74 u8 max_bits_per_word; 74 u8 max_bits_per_word;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index aee4e7589568..1c34c9314c8a 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20#include <linux/kernel.h> 16#include <linux/kernel.h>
21#include <linux/module.h> 17#include <linux/module.h>
@@ -92,7 +88,7 @@ struct spi_gpio {
92 88
93/*----------------------------------------------------------------------*/ 89/*----------------------------------------------------------------------*/
94 90
95static inline struct spi_gpio * __pure 91static inline struct spi_gpio *__pure
96spi_to_spi_gpio(const struct spi_device *spi) 92spi_to_spi_gpio(const struct spi_device *spi)
97{ 93{
98 const struct spi_bitbang *bang; 94 const struct spi_bitbang *bang;
@@ -103,7 +99,7 @@ spi_to_spi_gpio(const struct spi_device *spi)
103 return spi_gpio; 99 return spi_gpio;
104} 100}
105 101
106static inline struct spi_gpio_platform_data * __pure 102static inline struct spi_gpio_platform_data *__pure
107spi_to_pdata(const struct spi_device *spi) 103spi_to_pdata(const struct spi_device *spi)
108{ 104{
109 return &spi_to_spi_gpio(spi)->pdata; 105 return &spi_to_spi_gpio(spi)->pdata;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index aad6683db81b..c01567d53581 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -160,16 +160,16 @@ static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
160 unsigned int count = 0; 160 unsigned int count = 0;
161 u32 status; 161 u32 status;
162 162
163 while (count < max) { 163 while (count < max / 4) {
164 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); 164 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
165 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 165 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
166 if (status & SPFI_INTERRUPT_SDFUL) 166 if (status & SPFI_INTERRUPT_SDFUL)
167 break; 167 break;
168 spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA); 168 spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
169 count += 4; 169 count++;
170 } 170 }
171 171
172 return count; 172 return count * 4;
173} 173}
174 174
175static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, 175static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
@@ -196,17 +196,17 @@ static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
196 unsigned int count = 0; 196 unsigned int count = 0;
197 u32 status; 197 u32 status;
198 198
199 while (count < max) { 199 while (count < max / 4) {
200 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, 200 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
201 SPFI_INTERRUPT_CLEAR); 201 SPFI_INTERRUPT_CLEAR);
202 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 202 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
203 if (!(status & SPFI_INTERRUPT_GDEX32BIT)) 203 if (!(status & SPFI_INTERRUPT_GDEX32BIT))
204 break; 204 break;
205 buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); 205 buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
206 count += 4; 206 count++;
207 } 207 }
208 208
209 return count; 209 return count * 4;
210} 210}
211 211
212static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, 212static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
@@ -251,17 +251,15 @@ static int img_spfi_start_pio(struct spi_master *master,
251 time_before(jiffies, timeout)) { 251 time_before(jiffies, timeout)) {
252 unsigned int tx_count, rx_count; 252 unsigned int tx_count, rx_count;
253 253
254 switch (xfer->bits_per_word) { 254 if (tx_bytes >= 4)
255 case 32:
256 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); 255 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
257 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); 256 else
258 break;
259 case 8:
260 default:
261 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); 257 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
258
259 if (rx_bytes >= 4)
260 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
261 else
262 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); 262 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
263 break;
264 }
265 263
266 tx_buf += tx_count; 264 tx_buf += tx_count;
267 rx_buf += rx_count; 265 rx_buf += rx_count;
@@ -331,14 +329,11 @@ static int img_spfi_start_dma(struct spi_master *master,
331 329
332 if (xfer->rx_buf) { 330 if (xfer->rx_buf) {
333 rxconf.direction = DMA_DEV_TO_MEM; 331 rxconf.direction = DMA_DEV_TO_MEM;
334 switch (xfer->bits_per_word) { 332 if (xfer->len % 4 == 0) {
335 case 32:
336 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; 333 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
337 rxconf.src_addr_width = 4; 334 rxconf.src_addr_width = 4;
338 rxconf.src_maxburst = 4; 335 rxconf.src_maxburst = 4;
339 break; 336 } else {
340 case 8:
341 default:
342 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; 337 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
343 rxconf.src_addr_width = 1; 338 rxconf.src_addr_width = 1;
344 rxconf.src_maxburst = 4; 339 rxconf.src_maxburst = 4;
@@ -358,18 +353,14 @@ static int img_spfi_start_dma(struct spi_master *master,
358 353
359 if (xfer->tx_buf) { 354 if (xfer->tx_buf) {
360 txconf.direction = DMA_MEM_TO_DEV; 355 txconf.direction = DMA_MEM_TO_DEV;
361 switch (xfer->bits_per_word) { 356 if (xfer->len % 4 == 0) {
362 case 32:
363 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; 357 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
364 txconf.dst_addr_width = 4; 358 txconf.dst_addr_width = 4;
365 txconf.dst_maxburst = 4; 359 txconf.dst_maxburst = 4;
366 break; 360 } else {
367 case 8:
368 default:
369 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; 361 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
370 txconf.dst_addr_width = 1; 362 txconf.dst_addr_width = 1;
371 txconf.dst_maxburst = 4; 363 txconf.dst_maxburst = 4;
372 break;
373 } 364 }
374 dmaengine_slave_config(spfi->tx_ch, &txconf); 365 dmaengine_slave_config(spfi->tx_ch, &txconf);
375 366
@@ -508,9 +499,7 @@ static void img_spfi_set_cs(struct spi_device *spi, bool enable)
508static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, 499static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
509 struct spi_transfer *xfer) 500 struct spi_transfer *xfer)
510{ 501{
511 if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE) 502 if (xfer->len > SPFI_32BIT_FIFO_SIZE)
512 return true;
513 if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE)
514 return true; 503 return true;
515 return false; 504 return false;
516} 505}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index fe1b7699fab6..6fea4af51c41 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -89,7 +89,6 @@ struct spi_imx_data {
89 89
90 struct completion xfer_done; 90 struct completion xfer_done;
91 void __iomem *base; 91 void __iomem *base;
92 int irq;
93 struct clk *clk_per; 92 struct clk *clk_per;
94 struct clk *clk_ipg; 93 struct clk *clk_ipg;
95 unsigned long spi_clk; 94 unsigned long spi_clk;
@@ -896,6 +895,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
896{ 895{
897 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 896 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
898 int ret; 897 int ret;
898 unsigned long timeout;
899 u32 dma; 899 u32 dma;
900 int left; 900 int left;
901 struct spi_master *master = spi_imx->bitbang.master; 901 struct spi_master *master = spi_imx->bitbang.master;
@@ -943,17 +943,17 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
943 dma_async_issue_pending(master->dma_tx); 943 dma_async_issue_pending(master->dma_tx);
944 dma_async_issue_pending(master->dma_rx); 944 dma_async_issue_pending(master->dma_rx);
945 /* Wait SDMA to finish the data transfer.*/ 945 /* Wait SDMA to finish the data transfer.*/
946 ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 946 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
947 IMX_DMA_TIMEOUT); 947 IMX_DMA_TIMEOUT);
948 if (!ret) { 948 if (!timeout) {
949 pr_warn("%s %s: I/O Error in DMA TX\n", 949 pr_warn("%s %s: I/O Error in DMA TX\n",
950 dev_driver_string(&master->dev), 950 dev_driver_string(&master->dev),
951 dev_name(&master->dev)); 951 dev_name(&master->dev));
952 dmaengine_terminate_all(master->dma_tx); 952 dmaengine_terminate_all(master->dma_tx);
953 } else { 953 } else {
954 ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion, 954 timeout = wait_for_completion_timeout(
955 IMX_DMA_TIMEOUT); 955 &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
956 if (!ret) { 956 if (!timeout) {
957 pr_warn("%s %s: I/O Error in DMA RX\n", 957 pr_warn("%s %s: I/O Error in DMA RX\n",
958 dev_driver_string(&master->dev), 958 dev_driver_string(&master->dev),
959 dev_name(&master->dev)); 959 dev_name(&master->dev));
@@ -968,9 +968,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
968 spi_imx->dma_finished = 1; 968 spi_imx->dma_finished = 1;
969 spi_imx->devtype_data->trigger(spi_imx); 969 spi_imx->devtype_data->trigger(spi_imx);
970 970
971 if (!ret) 971 if (!timeout)
972 ret = -ETIMEDOUT; 972 ret = -ETIMEDOUT;
973 else if (ret > 0) 973 else
974 ret = transfer->len; 974 ret = transfer->len;
975 975
976 return ret; 976 return ret;
@@ -1080,7 +1080,7 @@ static int spi_imx_probe(struct platform_device *pdev)
1080 struct spi_master *master; 1080 struct spi_master *master;
1081 struct spi_imx_data *spi_imx; 1081 struct spi_imx_data *spi_imx;
1082 struct resource *res; 1082 struct resource *res;
1083 int i, ret, num_cs; 1083 int i, ret, num_cs, irq;
1084 1084
1085 if (!np && !mxc_platform_info) { 1085 if (!np && !mxc_platform_info) {
1086 dev_err(&pdev->dev, "can't get the platform data\n"); 1086 dev_err(&pdev->dev, "can't get the platform data\n");
@@ -1147,16 +1147,16 @@ static int spi_imx_probe(struct platform_device *pdev)
1147 goto out_master_put; 1147 goto out_master_put;
1148 } 1148 }
1149 1149
1150 spi_imx->irq = platform_get_irq(pdev, 0); 1150 irq = platform_get_irq(pdev, 0);
1151 if (spi_imx->irq < 0) { 1151 if (irq < 0) {
1152 ret = spi_imx->irq; 1152 ret = irq;
1153 goto out_master_put; 1153 goto out_master_put;
1154 } 1154 }
1155 1155
1156 ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0, 1156 ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
1157 dev_name(&pdev->dev), spi_imx); 1157 dev_name(&pdev->dev), spi_imx);
1158 if (ret) { 1158 if (ret) {
1159 dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); 1159 dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
1160 goto out_master_put; 1160 goto out_master_put;
1161 } 1161 }
1162 1162
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 41c5765be746..ba72347cb99d 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21#include <linux/init.h> 17#include <linux/init.h>
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 1bbac0378bf7..5468fc70dbf8 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -85,7 +85,7 @@ struct meson_spifc {
85 struct device *dev; 85 struct device *dev;
86}; 86};
87 87
88static struct regmap_config spifc_regmap_config = { 88static const struct regmap_config spifc_regmap_config = {
89 .reg_bits = 32, 89 .reg_bits = 32,
90 .val_bits = 32, 90 .val_bits = 32,
91 .reg_stride = 4, 91 .reg_stride = 4,
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 4045a1e580e1..5b0e9a3e83f6 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -282,9 +282,8 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
282 dmaengine_submit(desc); 282 dmaengine_submit(desc);
283 dma_async_issue_pending(ssp->dmach); 283 dma_async_issue_pending(ssp->dmach);
284 284
285 ret = wait_for_completion_timeout(&spi->c, 285 if (!wait_for_completion_timeout(&spi->c,
286 msecs_to_jiffies(SSP_TIMEOUT)); 286 msecs_to_jiffies(SSP_TIMEOUT))) {
287 if (!ret) {
288 dev_err(ssp->dev, "DMA transfer timeout\n"); 287 dev_err(ssp->dev, "DMA transfer timeout\n");
289 ret = -ETIMEDOUT; 288 ret = -ETIMEDOUT;
290 dmaengine_terminate_all(ssp->dmach); 289 dmaengine_terminate_all(ssp->dmach);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 79399ae9c84c..d890d309dff9 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -16,11 +16,6 @@
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */ 19 */
25#include <linux/kernel.h> 20#include <linux/kernel.h>
26#include <linux/init.h> 21#include <linux/init.h>
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index daf1ada5cd11..3c0844457c07 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -28,10 +28,6 @@
28 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * You should have received a copy of the GNU General Public License along
33 * with this program; if not, write to the Free Software Foundation, Inc.,
34 * 675 Mass Ave, Cambridge, MA 02139, USA.
35 */ 31 */
36#include <linux/kernel.h> 32#include <linux/kernel.h>
37#include <linux/init.h> 33#include <linux/init.h>
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 3bc3cbabbbc0..4df8942058de 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -14,11 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */ 17 */
23 18
24#include <linux/kernel.h> 19#include <linux/kernel.h>
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 3dec9e0b99b8..861664776672 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -28,7 +28,12 @@
28/* Runtime PM autosuspend timeout: PM is fairly light on this driver */ 28/* Runtime PM autosuspend timeout: PM is fairly light on this driver */
29#define SPI_AUTOSUSPEND_TIMEOUT 200 29#define SPI_AUTOSUSPEND_TIMEOUT 200
30 30
31#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/ 31/* Some SoCs using this driver support up to 8 chip selects.
32 * It is up to the implementer to only use the chip selects
33 * that are available.
34 */
35#define ORION_NUM_CHIPSELECTS 8
36
32#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ 37#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */
33 38
34#define ORION_SPI_IF_CTRL_REG 0x00 39#define ORION_SPI_IF_CTRL_REG 0x00
@@ -44,6 +49,10 @@
44#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF 49#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF
45#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \ 50#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
46 ORION_SPI_MODE_CPHA) 51 ORION_SPI_MODE_CPHA)
52#define ORION_SPI_CS_MASK 0x1C
53#define ORION_SPI_CS_SHIFT 2
54#define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \
55 ORION_SPI_CS_MASK)
47 56
48enum orion_spi_type { 57enum orion_spi_type {
49 ORION_SPI, 58 ORION_SPI,
@@ -215,9 +224,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
215 return 0; 224 return 0;
216} 225}
217 226
218static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable) 227static void orion_spi_set_cs(struct spi_device *spi, bool enable)
219{ 228{
220 if (enable) 229 struct orion_spi *orion_spi;
230
231 orion_spi = spi_master_get_devdata(spi->master);
232
233 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
234 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
235 ORION_SPI_CS(spi->chip_select));
236
237 /* Chip select logic is inverted from spi_set_cs */
238 if (!enable)
221 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); 239 orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
222 else 240 else
223 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); 241 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
@@ -332,64 +350,31 @@ out:
332 return xfer->len - count; 350 return xfer->len - count;
333} 351}
334 352
335static int orion_spi_transfer_one_message(struct spi_master *master, 353static int orion_spi_transfer_one(struct spi_master *master,
336 struct spi_message *m) 354 struct spi_device *spi,
355 struct spi_transfer *t)
337{ 356{
338 struct orion_spi *orion_spi = spi_master_get_devdata(master);
339 struct spi_device *spi = m->spi;
340 struct spi_transfer *t = NULL;
341 int par_override = 0;
342 int status = 0; 357 int status = 0;
343 int cs_active = 0;
344
345 /* Load defaults */
346 status = orion_spi_setup_transfer(spi, NULL);
347 358
359 status = orion_spi_setup_transfer(spi, t);
348 if (status < 0) 360 if (status < 0)
349 goto msg_done; 361 return status;
350
351 list_for_each_entry(t, &m->transfers, transfer_list) {
352 if (par_override || t->speed_hz || t->bits_per_word) {
353 par_override = 1;
354 status = orion_spi_setup_transfer(spi, t);
355 if (status < 0)
356 break;
357 if (!t->speed_hz && !t->bits_per_word)
358 par_override = 0;
359 }
360
361 if (!cs_active) {
362 orion_spi_set_cs(orion_spi, 1);
363 cs_active = 1;
364 }
365 362
366 if (t->len) 363 if (t->len)
367 m->actual_length += orion_spi_write_read(spi, t); 364 orion_spi_write_read(spi, t);
368 365
369 if (t->delay_usecs) 366 return status;
370 udelay(t->delay_usecs); 367}
371
372 if (t->cs_change) {
373 orion_spi_set_cs(orion_spi, 0);
374 cs_active = 0;
375 }
376 }
377
378msg_done:
379 if (cs_active)
380 orion_spi_set_cs(orion_spi, 0);
381
382 m->status = status;
383 spi_finalize_current_message(master);
384 368
385 return 0; 369static int orion_spi_setup(struct spi_device *spi)
370{
371 return orion_spi_setup_transfer(spi, NULL);
386} 372}
387 373
388static int orion_spi_reset(struct orion_spi *orion_spi) 374static int orion_spi_reset(struct orion_spi *orion_spi)
389{ 375{
390 /* Verify that the CS is deasserted */ 376 /* Verify that the CS is deasserted */
391 orion_spi_set_cs(orion_spi, 0); 377 orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
392
393 return 0; 378 return 0;
394} 379}
395 380
@@ -442,9 +427,10 @@ static int orion_spi_probe(struct platform_device *pdev)
442 427
443 /* we support only mode 0, and no options */ 428 /* we support only mode 0, and no options */
444 master->mode_bits = SPI_CPHA | SPI_CPOL; 429 master->mode_bits = SPI_CPHA | SPI_CPOL;
445 430 master->set_cs = orion_spi_set_cs;
446 master->transfer_one_message = orion_spi_transfer_one_message; 431 master->transfer_one = orion_spi_transfer_one;
447 master->num_chipselect = ORION_NUM_CHIPSELECTS; 432 master->num_chipselect = ORION_NUM_CHIPSELECTS;
433 master->setup = orion_spi_setup;
448 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); 434 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
449 master->auto_runtime_pm = true; 435 master->auto_runtime_pm = true;
450 436
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 62a9297e96ac..66a173939be8 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -111,23 +111,24 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
111 * by using ->dma_running. 111 * by using ->dma_running.
112 */ 112 */
113 if (atomic_dec_and_test(&drv_data->dma_running)) { 113 if (atomic_dec_and_test(&drv_data->dma_running)) {
114 void __iomem *reg = drv_data->ioaddr;
115
116 /* 114 /*
117 * If the other CPU is still handling the ROR interrupt we 115 * If the other CPU is still handling the ROR interrupt we
118 * might not know about the error yet. So we re-check the 116 * might not know about the error yet. So we re-check the
119 * ROR bit here before we clear the status register. 117 * ROR bit here before we clear the status register.
120 */ 118 */
121 if (!error) { 119 if (!error) {
122 u32 status = read_SSSR(reg) & drv_data->mask_sr; 120 u32 status = pxa2xx_spi_read(drv_data, SSSR)
121 & drv_data->mask_sr;
123 error = status & SSSR_ROR; 122 error = status & SSSR_ROR;
124 } 123 }
125 124
126 /* Clear status & disable interrupts */ 125 /* Clear status & disable interrupts */
127 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 126 pxa2xx_spi_write(drv_data, SSCR1,
127 pxa2xx_spi_read(drv_data, SSCR1)
128 & ~drv_data->dma_cr1);
128 write_SSSR_CS(drv_data, drv_data->clear_sr); 129 write_SSSR_CS(drv_data, drv_data->clear_sr);
129 if (!pxa25x_ssp_comp(drv_data)) 130 if (!pxa25x_ssp_comp(drv_data))
130 write_SSTO(0, reg); 131 pxa2xx_spi_write(drv_data, SSTO, 0);
131 132
132 if (!error) { 133 if (!error) {
133 pxa2xx_spi_unmap_dma_buffers(drv_data); 134 pxa2xx_spi_unmap_dma_buffers(drv_data);
@@ -139,7 +140,9 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
139 msg->state = pxa2xx_spi_next_transfer(drv_data); 140 msg->state = pxa2xx_spi_next_transfer(drv_data);
140 } else { 141 } else {
141 /* In case we got an error we disable the SSP now */ 142 /* In case we got an error we disable the SSP now */
142 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 143 pxa2xx_spi_write(drv_data, SSCR0,
144 pxa2xx_spi_read(drv_data, SSCR0)
145 & ~SSCR0_SSE);
143 146
144 msg->state = ERROR_STATE; 147 msg->state = ERROR_STATE;
145 } 148 }
@@ -247,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
247{ 250{
248 u32 status; 251 u32 status;
249 252
250 status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr; 253 status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
251 if (status & SSSR_ROR) { 254 if (status & SSSR_ROR) {
252 dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); 255 dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
253 256
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
index e8a26f25d5c0..2e0796a0003f 100644
--- a/drivers/spi/spi-pxa2xx-pxadma.c
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21#include <linux/delay.h> 17#include <linux/delay.h>
@@ -25,6 +21,7 @@
25#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
26#include <linux/spi/pxa2xx_spi.h> 22#include <linux/spi/pxa2xx_spi.h>
27 23
24#include <mach/dma.h>
28#include "spi-pxa2xx.h" 25#include "spi-pxa2xx.h"
29 26
30#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) 27#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
@@ -118,11 +115,11 @@ static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
118 drv_data->dma_mapped = 0; 115 drv_data->dma_mapped = 0;
119} 116}
120 117
121static int wait_ssp_rx_stall(void const __iomem *ioaddr) 118static int wait_ssp_rx_stall(struct driver_data *drv_data)
122{ 119{
123 unsigned long limit = loops_per_jiffy << 1; 120 unsigned long limit = loops_per_jiffy << 1;
124 121
125 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) 122 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
126 cpu_relax(); 123 cpu_relax();
127 124
128 return limit; 125 return limit;
@@ -141,17 +138,18 @@ static int wait_dma_channel_stop(int channel)
141static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, 138static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
142 const char *msg) 139 const char *msg)
143{ 140{
144 void __iomem *reg = drv_data->ioaddr;
145
146 /* Stop and reset */ 141 /* Stop and reset */
147 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 142 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
148 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 143 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
149 write_SSSR_CS(drv_data, drv_data->clear_sr); 144 write_SSSR_CS(drv_data, drv_data->clear_sr);
150 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 145 pxa2xx_spi_write(drv_data, SSCR1,
146 pxa2xx_spi_read(drv_data, SSCR1)
147 & ~drv_data->dma_cr1);
151 if (!pxa25x_ssp_comp(drv_data)) 148 if (!pxa25x_ssp_comp(drv_data))
152 write_SSTO(0, reg); 149 pxa2xx_spi_write(drv_data, SSTO, 0);
153 pxa2xx_spi_flush(drv_data); 150 pxa2xx_spi_flush(drv_data);
154 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 151 pxa2xx_spi_write(drv_data, SSCR0,
152 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
155 153
156 pxa2xx_spi_unmap_dma_buffers(drv_data); 154 pxa2xx_spi_unmap_dma_buffers(drv_data);
157 155
@@ -163,11 +161,12 @@ static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
163 161
164static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) 162static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
165{ 163{
166 void __iomem *reg = drv_data->ioaddr;
167 struct spi_message *msg = drv_data->cur_msg; 164 struct spi_message *msg = drv_data->cur_msg;
168 165
169 /* Clear and disable interrupts on SSP and DMA channels*/ 166 /* Clear and disable interrupts on SSP and DMA channels*/
170 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 167 pxa2xx_spi_write(drv_data, SSCR1,
168 pxa2xx_spi_read(drv_data, SSCR1)
169 & ~drv_data->dma_cr1);
171 write_SSSR_CS(drv_data, drv_data->clear_sr); 170 write_SSSR_CS(drv_data, drv_data->clear_sr);
172 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 171 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
173 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 172 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
@@ -228,7 +227,7 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
228 && (drv_data->ssp_type == PXA25x_SSP)) { 227 && (drv_data->ssp_type == PXA25x_SSP)) {
229 228
230 /* Wait for rx to stall */ 229 /* Wait for rx to stall */
231 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) 230 if (wait_ssp_rx_stall(drv_data) == 0)
232 dev_err(&drv_data->pdev->dev, 231 dev_err(&drv_data->pdev->dev,
233 "dma_handler: ssp rx stall failed\n"); 232 "dma_handler: ssp rx stall failed\n");
234 233
@@ -240,9 +239,8 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
240irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) 239irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
241{ 240{
242 u32 irq_status; 241 u32 irq_status;
243 void __iomem *reg = drv_data->ioaddr;
244 242
245 irq_status = read_SSSR(reg) & drv_data->mask_sr; 243 irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
246 if (irq_status & SSSR_ROR) { 244 if (irq_status & SSSR_ROR) {
247 pxa2xx_spi_dma_error_stop(drv_data, 245 pxa2xx_spi_dma_error_stop(drv_data,
248 "dma_transfer: fifo overrun"); 246 "dma_transfer: fifo overrun");
@@ -252,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
252 /* Check for false positive timeout */ 250 /* Check for false positive timeout */
253 if ((irq_status & SSSR_TINT) 251 if ((irq_status & SSSR_TINT)
254 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { 252 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
255 write_SSSR(SSSR_TINT, reg); 253 pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
256 return IRQ_HANDLED; 254 return IRQ_HANDLED;
257 } 255 }
258 256
@@ -261,7 +259,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
261 /* Clear and disable timeout interrupt, do the rest in 259 /* Clear and disable timeout interrupt, do the rest in
262 * dma_transfer_complete */ 260 * dma_transfer_complete */
263 if (!pxa25x_ssp_comp(drv_data)) 261 if (!pxa25x_ssp_comp(drv_data))
264 write_SSTO(0, reg); 262 pxa2xx_spi_write(drv_data, SSTO, 0);
265 263
266 /* finish this transfer, start the next */ 264 /* finish this transfer, start the next */
267 pxa2xx_spi_dma_transfer_complete(drv_data); 265 pxa2xx_spi_dma_transfer_complete(drv_data);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 23822e7df6c1..6f72ad01e041 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 14 */
19 15
20#include <linux/init.h> 16#include <linux/init.h>
@@ -45,8 +41,6 @@ MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
45MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
46MODULE_ALIAS("platform:pxa2xx-spi"); 42MODULE_ALIAS("platform:pxa2xx-spi");
47 43
48#define MAX_BUSES 3
49
50#define TIMOUT_DFLT 1000 44#define TIMOUT_DFLT 1000
51 45
52/* 46/*
@@ -162,7 +156,6 @@ pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
162 156
163static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data) 157static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
164{ 158{
165 void __iomem *reg = drv_data->ioaddr;
166 u32 mask; 159 u32 mask;
167 160
168 switch (drv_data->ssp_type) { 161 switch (drv_data->ssp_type) {
@@ -174,7 +167,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
174 break; 167 break;
175 } 168 }
176 169
177 return (read_SSSR(reg) & mask) == mask; 170 return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
178} 171}
179 172
180static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data, 173static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
@@ -253,9 +246,6 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
253 unsigned offset = 0x400; 246 unsigned offset = 0x400;
254 u32 value, orig; 247 u32 value, orig;
255 248
256 if (!is_lpss_ssp(drv_data))
257 return;
258
259 /* 249 /*
260 * Perform auto-detection of the LPSS SSP private registers. They 250 * Perform auto-detection of the LPSS SSP private registers. They
261 * can be either at 1k or 2k offset from the base address. 251 * can be either at 1k or 2k offset from the base address.
@@ -304,9 +294,6 @@ static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
304{ 294{
305 u32 value; 295 u32 value;
306 296
307 if (!is_lpss_ssp(drv_data))
308 return;
309
310 value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL); 297 value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
311 if (enable) 298 if (enable)
312 value &= ~SPI_CS_CONTROL_CS_HIGH; 299 value &= ~SPI_CS_CONTROL_CS_HIGH;
@@ -320,7 +307,7 @@ static void cs_assert(struct driver_data *drv_data)
320 struct chip_data *chip = drv_data->cur_chip; 307 struct chip_data *chip = drv_data->cur_chip;
321 308
322 if (drv_data->ssp_type == CE4100_SSP) { 309 if (drv_data->ssp_type == CE4100_SSP) {
323 write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); 310 pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
324 return; 311 return;
325 } 312 }
326 313
@@ -334,7 +321,8 @@ static void cs_assert(struct driver_data *drv_data)
334 return; 321 return;
335 } 322 }
336 323
337 lpss_ssp_cs_control(drv_data, true); 324 if (is_lpss_ssp(drv_data))
325 lpss_ssp_cs_control(drv_data, true);
338} 326}
339 327
340static void cs_deassert(struct driver_data *drv_data) 328static void cs_deassert(struct driver_data *drv_data)
@@ -354,20 +342,18 @@ static void cs_deassert(struct driver_data *drv_data)
354 return; 342 return;
355 } 343 }
356 344
357 lpss_ssp_cs_control(drv_data, false); 345 if (is_lpss_ssp(drv_data))
346 lpss_ssp_cs_control(drv_data, false);
358} 347}
359 348
360int pxa2xx_spi_flush(struct driver_data *drv_data) 349int pxa2xx_spi_flush(struct driver_data *drv_data)
361{ 350{
362 unsigned long limit = loops_per_jiffy << 1; 351 unsigned long limit = loops_per_jiffy << 1;
363 352
364 void __iomem *reg = drv_data->ioaddr;
365
366 do { 353 do {
367 while (read_SSSR(reg) & SSSR_RNE) { 354 while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
368 read_SSDR(reg); 355 pxa2xx_spi_read(drv_data, SSDR);
369 } 356 } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
370 } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
371 write_SSSR_CS(drv_data, SSSR_ROR); 357 write_SSSR_CS(drv_data, SSSR_ROR);
372 358
373 return limit; 359 return limit;
@@ -375,14 +361,13 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
375 361
376static int null_writer(struct driver_data *drv_data) 362static int null_writer(struct driver_data *drv_data)
377{ 363{
378 void __iomem *reg = drv_data->ioaddr;
379 u8 n_bytes = drv_data->n_bytes; 364 u8 n_bytes = drv_data->n_bytes;
380 365
381 if (pxa2xx_spi_txfifo_full(drv_data) 366 if (pxa2xx_spi_txfifo_full(drv_data)
382 || (drv_data->tx == drv_data->tx_end)) 367 || (drv_data->tx == drv_data->tx_end))
383 return 0; 368 return 0;
384 369
385 write_SSDR(0, reg); 370 pxa2xx_spi_write(drv_data, SSDR, 0);
386 drv_data->tx += n_bytes; 371 drv_data->tx += n_bytes;
387 372
388 return 1; 373 return 1;
@@ -390,12 +375,11 @@ static int null_writer(struct driver_data *drv_data)
390 375
391static int null_reader(struct driver_data *drv_data) 376static int null_reader(struct driver_data *drv_data)
392{ 377{
393 void __iomem *reg = drv_data->ioaddr;
394 u8 n_bytes = drv_data->n_bytes; 378 u8 n_bytes = drv_data->n_bytes;
395 379
396 while ((read_SSSR(reg) & SSSR_RNE) 380 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
397 && (drv_data->rx < drv_data->rx_end)) { 381 && (drv_data->rx < drv_data->rx_end)) {
398 read_SSDR(reg); 382 pxa2xx_spi_read(drv_data, SSDR);
399 drv_data->rx += n_bytes; 383 drv_data->rx += n_bytes;
400 } 384 }
401 385
@@ -404,13 +388,11 @@ static int null_reader(struct driver_data *drv_data)
404 388
405static int u8_writer(struct driver_data *drv_data) 389static int u8_writer(struct driver_data *drv_data)
406{ 390{
407 void __iomem *reg = drv_data->ioaddr;
408
409 if (pxa2xx_spi_txfifo_full(drv_data) 391 if (pxa2xx_spi_txfifo_full(drv_data)
410 || (drv_data->tx == drv_data->tx_end)) 392 || (drv_data->tx == drv_data->tx_end))
411 return 0; 393 return 0;
412 394
413 write_SSDR(*(u8 *)(drv_data->tx), reg); 395 pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
414 ++drv_data->tx; 396 ++drv_data->tx;
415 397
416 return 1; 398 return 1;
@@ -418,11 +400,9 @@ static int u8_writer(struct driver_data *drv_data)
418 400
419static int u8_reader(struct driver_data *drv_data) 401static int u8_reader(struct driver_data *drv_data)
420{ 402{
421 void __iomem *reg = drv_data->ioaddr; 403 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
422 404 && (drv_data->rx < drv_data->rx_end)) {
423 while ((read_SSSR(reg) & SSSR_RNE) 405 *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
424 && (drv_data->rx < drv_data->rx_end)) {
425 *(u8 *)(drv_data->rx) = read_SSDR(reg);
426 ++drv_data->rx; 406 ++drv_data->rx;
427 } 407 }
428 408
@@ -431,13 +411,11 @@ static int u8_reader(struct driver_data *drv_data)
431 411
432static int u16_writer(struct driver_data *drv_data) 412static int u16_writer(struct driver_data *drv_data)
433{ 413{
434 void __iomem *reg = drv_data->ioaddr;
435
436 if (pxa2xx_spi_txfifo_full(drv_data) 414 if (pxa2xx_spi_txfifo_full(drv_data)
437 || (drv_data->tx == drv_data->tx_end)) 415 || (drv_data->tx == drv_data->tx_end))
438 return 0; 416 return 0;
439 417
440 write_SSDR(*(u16 *)(drv_data->tx), reg); 418 pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
441 drv_data->tx += 2; 419 drv_data->tx += 2;
442 420
443 return 1; 421 return 1;
@@ -445,11 +423,9 @@ static int u16_writer(struct driver_data *drv_data)
445 423
446static int u16_reader(struct driver_data *drv_data) 424static int u16_reader(struct driver_data *drv_data)
447{ 425{
448 void __iomem *reg = drv_data->ioaddr; 426 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
449 427 && (drv_data->rx < drv_data->rx_end)) {
450 while ((read_SSSR(reg) & SSSR_RNE) 428 *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
451 && (drv_data->rx < drv_data->rx_end)) {
452 *(u16 *)(drv_data->rx) = read_SSDR(reg);
453 drv_data->rx += 2; 429 drv_data->rx += 2;
454 } 430 }
455 431
@@ -458,13 +434,11 @@ static int u16_reader(struct driver_data *drv_data)
458 434
459static int u32_writer(struct driver_data *drv_data) 435static int u32_writer(struct driver_data *drv_data)
460{ 436{
461 void __iomem *reg = drv_data->ioaddr;
462
463 if (pxa2xx_spi_txfifo_full(drv_data) 437 if (pxa2xx_spi_txfifo_full(drv_data)
464 || (drv_data->tx == drv_data->tx_end)) 438 || (drv_data->tx == drv_data->tx_end))
465 return 0; 439 return 0;
466 440
467 write_SSDR(*(u32 *)(drv_data->tx), reg); 441 pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
468 drv_data->tx += 4; 442 drv_data->tx += 4;
469 443
470 return 1; 444 return 1;
@@ -472,11 +446,9 @@ static int u32_writer(struct driver_data *drv_data)
472 446
473static int u32_reader(struct driver_data *drv_data) 447static int u32_reader(struct driver_data *drv_data)
474{ 448{
475 void __iomem *reg = drv_data->ioaddr; 449 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
476 450 && (drv_data->rx < drv_data->rx_end)) {
477 while ((read_SSSR(reg) & SSSR_RNE) 451 *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
478 && (drv_data->rx < drv_data->rx_end)) {
479 *(u32 *)(drv_data->rx) = read_SSDR(reg);
480 drv_data->rx += 4; 452 drv_data->rx += 4;
481 } 453 }
482 454
@@ -552,27 +524,25 @@ static void giveback(struct driver_data *drv_data)
552 524
553static void reset_sccr1(struct driver_data *drv_data) 525static void reset_sccr1(struct driver_data *drv_data)
554{ 526{
555 void __iomem *reg = drv_data->ioaddr;
556 struct chip_data *chip = drv_data->cur_chip; 527 struct chip_data *chip = drv_data->cur_chip;
557 u32 sccr1_reg; 528 u32 sccr1_reg;
558 529
559 sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; 530 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
560 sccr1_reg &= ~SSCR1_RFT; 531 sccr1_reg &= ~SSCR1_RFT;
561 sccr1_reg |= chip->threshold; 532 sccr1_reg |= chip->threshold;
562 write_SSCR1(sccr1_reg, reg); 533 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
563} 534}
564 535
565static void int_error_stop(struct driver_data *drv_data, const char* msg) 536static void int_error_stop(struct driver_data *drv_data, const char* msg)
566{ 537{
567 void __iomem *reg = drv_data->ioaddr;
568
569 /* Stop and reset SSP */ 538 /* Stop and reset SSP */
570 write_SSSR_CS(drv_data, drv_data->clear_sr); 539 write_SSSR_CS(drv_data, drv_data->clear_sr);
571 reset_sccr1(drv_data); 540 reset_sccr1(drv_data);
572 if (!pxa25x_ssp_comp(drv_data)) 541 if (!pxa25x_ssp_comp(drv_data))
573 write_SSTO(0, reg); 542 pxa2xx_spi_write(drv_data, SSTO, 0);
574 pxa2xx_spi_flush(drv_data); 543 pxa2xx_spi_flush(drv_data);
575 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 544 pxa2xx_spi_write(drv_data, SSCR0,
545 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
576 546
577 dev_err(&drv_data->pdev->dev, "%s\n", msg); 547 dev_err(&drv_data->pdev->dev, "%s\n", msg);
578 548
@@ -582,13 +552,11 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
582 552
583static void int_transfer_complete(struct driver_data *drv_data) 553static void int_transfer_complete(struct driver_data *drv_data)
584{ 554{
585 void __iomem *reg = drv_data->ioaddr;
586
587 /* Stop SSP */ 555 /* Stop SSP */
588 write_SSSR_CS(drv_data, drv_data->clear_sr); 556 write_SSSR_CS(drv_data, drv_data->clear_sr);
589 reset_sccr1(drv_data); 557 reset_sccr1(drv_data);
590 if (!pxa25x_ssp_comp(drv_data)) 558 if (!pxa25x_ssp_comp(drv_data))
591 write_SSTO(0, reg); 559 pxa2xx_spi_write(drv_data, SSTO, 0);
592 560
593 /* Update total byte transferred return count actual bytes read */ 561 /* Update total byte transferred return count actual bytes read */
594 drv_data->cur_msg->actual_length += drv_data->len - 562 drv_data->cur_msg->actual_length += drv_data->len -
@@ -607,12 +575,10 @@ static void int_transfer_complete(struct driver_data *drv_data)
607 575
608static irqreturn_t interrupt_transfer(struct driver_data *drv_data) 576static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
609{ 577{
610 void __iomem *reg = drv_data->ioaddr; 578 u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
579 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
611 580
612 u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? 581 u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
613 drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
614
615 u32 irq_status = read_SSSR(reg) & irq_mask;
616 582
617 if (irq_status & SSSR_ROR) { 583 if (irq_status & SSSR_ROR) {
618 int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); 584 int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
@@ -620,7 +586,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
620 } 586 }
621 587
622 if (irq_status & SSSR_TINT) { 588 if (irq_status & SSSR_TINT) {
623 write_SSSR(SSSR_TINT, reg); 589 pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
624 if (drv_data->read(drv_data)) { 590 if (drv_data->read(drv_data)) {
625 int_transfer_complete(drv_data); 591 int_transfer_complete(drv_data);
626 return IRQ_HANDLED; 592 return IRQ_HANDLED;
@@ -644,7 +610,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
644 u32 bytes_left; 610 u32 bytes_left;
645 u32 sccr1_reg; 611 u32 sccr1_reg;
646 612
647 sccr1_reg = read_SSCR1(reg); 613 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
648 sccr1_reg &= ~SSCR1_TIE; 614 sccr1_reg &= ~SSCR1_TIE;
649 615
650 /* 616 /*
@@ -670,7 +636,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
670 636
671 pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre); 637 pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
672 } 638 }
673 write_SSCR1(sccr1_reg, reg); 639 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
674 } 640 }
675 641
676 /* We did something */ 642 /* We did something */
@@ -680,7 +646,6 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
680static irqreturn_t ssp_int(int irq, void *dev_id) 646static irqreturn_t ssp_int(int irq, void *dev_id)
681{ 647{
682 struct driver_data *drv_data = dev_id; 648 struct driver_data *drv_data = dev_id;
683 void __iomem *reg = drv_data->ioaddr;
684 u32 sccr1_reg; 649 u32 sccr1_reg;
685 u32 mask = drv_data->mask_sr; 650 u32 mask = drv_data->mask_sr;
686 u32 status; 651 u32 status;
@@ -700,11 +665,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
700 * are all set to one. That means that the device is already 665 * are all set to one. That means that the device is already
701 * powered off. 666 * powered off.
702 */ 667 */
703 status = read_SSSR(reg); 668 status = pxa2xx_spi_read(drv_data, SSSR);
704 if (status == ~0) 669 if (status == ~0)
705 return IRQ_NONE; 670 return IRQ_NONE;
706 671
707 sccr1_reg = read_SSCR1(reg); 672 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
708 673
709 /* Ignore possible writes if we don't need to write */ 674 /* Ignore possible writes if we don't need to write */
710 if (!(sccr1_reg & SSCR1_TIE)) 675 if (!(sccr1_reg & SSCR1_TIE))
@@ -715,10 +680,14 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
715 680
716 if (!drv_data->cur_msg) { 681 if (!drv_data->cur_msg) {
717 682
718 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 683 pxa2xx_spi_write(drv_data, SSCR0,
719 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 684 pxa2xx_spi_read(drv_data, SSCR0)
685 & ~SSCR0_SSE);
686 pxa2xx_spi_write(drv_data, SSCR1,
687 pxa2xx_spi_read(drv_data, SSCR1)
688 & ~drv_data->int_cr1);
720 if (!pxa25x_ssp_comp(drv_data)) 689 if (!pxa25x_ssp_comp(drv_data))
721 write_SSTO(0, reg); 690 pxa2xx_spi_write(drv_data, SSTO, 0);
722 write_SSSR_CS(drv_data, drv_data->clear_sr); 691 write_SSSR_CS(drv_data, drv_data->clear_sr);
723 692
724 dev_err(&drv_data->pdev->dev, 693 dev_err(&drv_data->pdev->dev,
@@ -787,7 +756,6 @@ static void pump_transfers(unsigned long data)
787 struct spi_transfer *transfer = NULL; 756 struct spi_transfer *transfer = NULL;
788 struct spi_transfer *previous = NULL; 757 struct spi_transfer *previous = NULL;
789 struct chip_data *chip = NULL; 758 struct chip_data *chip = NULL;
790 void __iomem *reg = drv_data->ioaddr;
791 u32 clk_div = 0; 759 u32 clk_div = 0;
792 u8 bits = 0; 760 u8 bits = 0;
793 u32 speed = 0; 761 u32 speed = 0;
@@ -931,7 +899,7 @@ static void pump_transfers(unsigned long data)
931 899
932 /* Clear status and start DMA engine */ 900 /* Clear status and start DMA engine */
933 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 901 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
934 write_SSSR(drv_data->clear_sr, reg); 902 pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
935 903
936 pxa2xx_spi_dma_start(drv_data); 904 pxa2xx_spi_dma_start(drv_data);
937 } else { 905 } else {
@@ -944,39 +912,43 @@ static void pump_transfers(unsigned long data)
944 } 912 }
945 913
946 if (is_lpss_ssp(drv_data)) { 914 if (is_lpss_ssp(drv_data)) {
947 if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold) 915 if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
948 write_SSIRF(chip->lpss_rx_threshold, reg); 916 != chip->lpss_rx_threshold)
949 if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold) 917 pxa2xx_spi_write(drv_data, SSIRF,
950 write_SSITF(chip->lpss_tx_threshold, reg); 918 chip->lpss_rx_threshold);
919 if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
920 != chip->lpss_tx_threshold)
921 pxa2xx_spi_write(drv_data, SSITF,
922 chip->lpss_tx_threshold);
951 } 923 }
952 924
953 if (is_quark_x1000_ssp(drv_data) && 925 if (is_quark_x1000_ssp(drv_data) &&
954 (read_DDS_RATE(reg) != chip->dds_rate)) 926 (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
955 write_DDS_RATE(chip->dds_rate, reg); 927 pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
956 928
957 /* see if we need to reload the config registers */ 929 /* see if we need to reload the config registers */
958 if ((read_SSCR0(reg) != cr0) || 930 if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
959 (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) { 931 || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
960 932 != (cr1 & change_mask)) {
961 /* stop the SSP, and update the other bits */ 933 /* stop the SSP, and update the other bits */
962 write_SSCR0(cr0 & ~SSCR0_SSE, reg); 934 pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
963 if (!pxa25x_ssp_comp(drv_data)) 935 if (!pxa25x_ssp_comp(drv_data))
964 write_SSTO(chip->timeout, reg); 936 pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
965 /* first set CR1 without interrupt and service enables */ 937 /* first set CR1 without interrupt and service enables */
966 write_SSCR1(cr1 & change_mask, reg); 938 pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
967 /* restart the SSP */ 939 /* restart the SSP */
968 write_SSCR0(cr0, reg); 940 pxa2xx_spi_write(drv_data, SSCR0, cr0);
969 941
970 } else { 942 } else {
971 if (!pxa25x_ssp_comp(drv_data)) 943 if (!pxa25x_ssp_comp(drv_data))
972 write_SSTO(chip->timeout, reg); 944 pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
973 } 945 }
974 946
975 cs_assert(drv_data); 947 cs_assert(drv_data);
976 948
977 /* after chip select, release the data by enabling service 949 /* after chip select, release the data by enabling service
978 * requests and interrupts, without changing any mode bits */ 950 * requests and interrupts, without changing any mode bits */
979 write_SSCR1(cr1, reg); 951 pxa2xx_spi_write(drv_data, SSCR1, cr1);
980} 952}
981 953
982static int pxa2xx_spi_transfer_one_message(struct spi_master *master, 954static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
@@ -1005,8 +977,8 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
1005 struct driver_data *drv_data = spi_master_get_devdata(master); 977 struct driver_data *drv_data = spi_master_get_devdata(master);
1006 978
1007 /* Disable the SSP now */ 979 /* Disable the SSP now */
1008 write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, 980 pxa2xx_spi_write(drv_data, SSCR0,
1009 drv_data->ioaddr); 981 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
1010 982
1011 return 0; 983 return 0;
1012} 984}
@@ -1289,6 +1261,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1289 struct driver_data *drv_data; 1261 struct driver_data *drv_data;
1290 struct ssp_device *ssp; 1262 struct ssp_device *ssp;
1291 int status; 1263 int status;
1264 u32 tmp;
1292 1265
1293 platform_info = dev_get_platdata(dev); 1266 platform_info = dev_get_platdata(dev);
1294 if (!platform_info) { 1267 if (!platform_info) {
@@ -1386,38 +1359,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1386 drv_data->max_clk_rate = clk_get_rate(ssp->clk); 1359 drv_data->max_clk_rate = clk_get_rate(ssp->clk);
1387 1360
1388 /* Load default SSP configuration */ 1361 /* Load default SSP configuration */
1389 write_SSCR0(0, drv_data->ioaddr); 1362 pxa2xx_spi_write(drv_data, SSCR0, 0);
1390 switch (drv_data->ssp_type) { 1363 switch (drv_data->ssp_type) {
1391 case QUARK_X1000_SSP: 1364 case QUARK_X1000_SSP:
1392 write_SSCR1(QUARK_X1000_SSCR1_RxTresh( 1365 tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
1393 RX_THRESH_QUARK_X1000_DFLT) | 1366 | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
1394 QUARK_X1000_SSCR1_TxTresh( 1367 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1395 TX_THRESH_QUARK_X1000_DFLT),
1396 drv_data->ioaddr);
1397 1368
1398 /* using the Motorola SPI protocol and use 8 bit frame */ 1369 /* using the Motorola SPI protocol and use 8 bit frame */
1399 write_SSCR0(QUARK_X1000_SSCR0_Motorola 1370 pxa2xx_spi_write(drv_data, SSCR0,
1400 | QUARK_X1000_SSCR0_DataSize(8), 1371 QUARK_X1000_SSCR0_Motorola
1401 drv_data->ioaddr); 1372 | QUARK_X1000_SSCR0_DataSize(8));
1402 break; 1373 break;
1403 default: 1374 default:
1404 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | 1375 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
1405 SSCR1_TxTresh(TX_THRESH_DFLT), 1376 SSCR1_TxTresh(TX_THRESH_DFLT);
1406 drv_data->ioaddr); 1377 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1407 write_SSCR0(SSCR0_SCR(2) 1378 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
1408 | SSCR0_Motorola 1379 pxa2xx_spi_write(drv_data, SSCR0, tmp);
1409 | SSCR0_DataSize(8),
1410 drv_data->ioaddr);
1411 break; 1380 break;
1412 } 1381 }
1413 1382
1414 if (!pxa25x_ssp_comp(drv_data)) 1383 if (!pxa25x_ssp_comp(drv_data))
1415 write_SSTO(0, drv_data->ioaddr); 1384 pxa2xx_spi_write(drv_data, SSTO, 0);
1416 1385
1417 if (!is_quark_x1000_ssp(drv_data)) 1386 if (!is_quark_x1000_ssp(drv_data))
1418 write_SSPSP(0, drv_data->ioaddr); 1387 pxa2xx_spi_write(drv_data, SSPSP, 0);
1419 1388
1420 lpss_ssp_setup(drv_data); 1389 if (is_lpss_ssp(drv_data))
1390 lpss_ssp_setup(drv_data);
1421 1391
1422 tasklet_init(&drv_data->pump_transfers, pump_transfers, 1392 tasklet_init(&drv_data->pump_transfers, pump_transfers,
1423 (unsigned long)drv_data); 1393 (unsigned long)drv_data);
@@ -1460,7 +1430,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1460 pm_runtime_get_sync(&pdev->dev); 1430 pm_runtime_get_sync(&pdev->dev);
1461 1431
1462 /* Disable the SSP at the peripheral and SOC level */ 1432 /* Disable the SSP at the peripheral and SOC level */
1463 write_SSCR0(0, drv_data->ioaddr); 1433 pxa2xx_spi_write(drv_data, SSCR0, 0);
1464 clk_disable_unprepare(ssp->clk); 1434 clk_disable_unprepare(ssp->clk);
1465 1435
1466 /* Release DMA */ 1436 /* Release DMA */
@@ -1497,7 +1467,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
1497 status = spi_master_suspend(drv_data->master); 1467 status = spi_master_suspend(drv_data->master);
1498 if (status != 0) 1468 if (status != 0)
1499 return status; 1469 return status;
1500 write_SSCR0(0, drv_data->ioaddr); 1470 pxa2xx_spi_write(drv_data, SSCR0, 0);
1501 1471
1502 if (!pm_runtime_suspended(dev)) 1472 if (!pm_runtime_suspended(dev))
1503 clk_disable_unprepare(ssp->clk); 1473 clk_disable_unprepare(ssp->clk);
@@ -1518,7 +1488,8 @@ static int pxa2xx_spi_resume(struct device *dev)
1518 clk_prepare_enable(ssp->clk); 1488 clk_prepare_enable(ssp->clk);
1519 1489
1520 /* Restore LPSS private register bits */ 1490 /* Restore LPSS private register bits */
1521 lpss_ssp_setup(drv_data); 1491 if (is_lpss_ssp(drv_data))
1492 lpss_ssp_setup(drv_data);
1522 1493
1523 /* Start the queue running */ 1494 /* Start the queue running */
1524 status = spi_master_resume(drv_data->master); 1495 status = spi_master_resume(drv_data->master);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 6bec59c90cd4..85a58c906869 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -115,23 +115,17 @@ struct chip_data {
115 void (*cs_control)(u32 command); 115 void (*cs_control)(u32 command);
116}; 116};
117 117
118#define DEFINE_SSP_REG(reg, off) \ 118static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
119static inline u32 read_##reg(void const __iomem *p) \ 119 unsigned reg)
120{ return __raw_readl(p + (off)); } \ 120{
121\ 121 return __raw_readl(drv_data->ioaddr + reg);
122static inline void write_##reg(u32 v, void __iomem *p) \ 122}
123{ __raw_writel(v, p + (off)); } 123
124 124static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
125DEFINE_SSP_REG(SSCR0, 0x00) 125 unsigned reg, u32 val)
126DEFINE_SSP_REG(SSCR1, 0x04) 126{
127DEFINE_SSP_REG(SSSR, 0x08) 127 __raw_writel(val, drv_data->ioaddr + reg);
128DEFINE_SSP_REG(SSITR, 0x0c) 128}
129DEFINE_SSP_REG(SSDR, 0x10)
130DEFINE_SSP_REG(DDS_RATE, 0x28) /* DDS Clock Rate */
131DEFINE_SSP_REG(SSTO, 0x28)
132DEFINE_SSP_REG(SSPSP, 0x2c)
133DEFINE_SSP_REG(SSITF, SSITF)
134DEFINE_SSP_REG(SSIRF, SSIRF)
135 129
136#define START_STATE ((void *)0) 130#define START_STATE ((void *)0)
137#define RUNNING_STATE ((void *)1) 131#define RUNNING_STATE ((void *)1)
@@ -155,13 +149,11 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
155 149
156static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) 150static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
157{ 151{
158 void __iomem *reg = drv_data->ioaddr;
159
160 if (drv_data->ssp_type == CE4100_SSP || 152 if (drv_data->ssp_type == CE4100_SSP ||
161 drv_data->ssp_type == QUARK_X1000_SSP) 153 drv_data->ssp_type == QUARK_X1000_SSP)
162 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; 154 val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
163 155
164 write_SSSR(val, reg); 156 pxa2xx_spi_write(drv_data, SSSR, val);
165} 157}
166 158
167extern int pxa2xx_spi_flush(struct driver_data *drv_data); 159extern int pxa2xx_spi_flush(struct driver_data *drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index e7fb5a0d2e8d..ff9cdbdb6672 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -337,7 +337,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
337static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) 337static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
338{ 338{
339 struct spi_qup *controller = spi_master_get_devdata(spi->master); 339 struct spi_qup *controller = spi_master_get_devdata(spi->master);
340 u32 config, iomode, mode; 340 u32 config, iomode, mode, control;
341 int ret, n_words, w_size; 341 int ret, n_words, w_size;
342 342
343 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { 343 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
@@ -392,6 +392,15 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
392 392
393 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); 393 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
394 394
395 control = readl_relaxed(controller->base + SPI_IO_CONTROL);
396
397 if (spi->mode & SPI_CPOL)
398 control |= SPI_IO_C_CLK_IDLE_HIGH;
399 else
400 control &= ~SPI_IO_C_CLK_IDLE_HIGH;
401
402 writel_relaxed(control, controller->base + SPI_IO_CONTROL);
403
395 config = readl_relaxed(controller->base + SPI_CONFIG); 404 config = readl_relaxed(controller->base + SPI_CONFIG);
396 405
397 if (spi->mode & SPI_LOOP) 406 if (spi->mode & SPI_LOOP)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index daabbabd26b0..1a777dc261d6 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -437,6 +437,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
437 rs->state &= ~TXBUSY; 437 rs->state &= ~TXBUSY;
438 spin_unlock_irqrestore(&rs->lock, flags); 438 spin_unlock_irqrestore(&rs->lock, flags);
439 439
440 rxdesc = NULL;
440 if (rs->rx) { 441 if (rs->rx) {
441 rxconf.direction = rs->dma_rx.direction; 442 rxconf.direction = rs->dma_rx.direction;
442 rxconf.src_addr = rs->dma_rx.addr; 443 rxconf.src_addr = rs->dma_rx.addr;
@@ -453,6 +454,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
453 rxdesc->callback_param = rs; 454 rxdesc->callback_param = rs;
454 } 455 }
455 456
457 txdesc = NULL;
456 if (rs->tx) { 458 if (rs->tx) {
457 txconf.direction = rs->dma_tx.direction; 459 txconf.direction = rs->dma_tx.direction;
458 txconf.dst_addr = rs->dma_tx.addr; 460 txconf.dst_addr = rs->dma_tx.addr;
@@ -470,7 +472,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
470 } 472 }
471 473
472 /* rx must be started before tx due to spi instinct */ 474 /* rx must be started before tx due to spi instinct */
473 if (rs->rx) { 475 if (rxdesc) {
474 spin_lock_irqsave(&rs->lock, flags); 476 spin_lock_irqsave(&rs->lock, flags);
475 rs->state |= RXBUSY; 477 rs->state |= RXBUSY;
476 spin_unlock_irqrestore(&rs->lock, flags); 478 spin_unlock_irqrestore(&rs->lock, flags);
@@ -478,7 +480,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
478 dma_async_issue_pending(rs->dma_rx.ch); 480 dma_async_issue_pending(rs->dma_rx.ch);
479 } 481 }
480 482
481 if (rs->tx) { 483 if (txdesc) {
482 spin_lock_irqsave(&rs->lock, flags); 484 spin_lock_irqsave(&rs->lock, flags);
483 rs->state |= TXBUSY; 485 rs->state |= TXBUSY;
484 spin_unlock_irqrestore(&rs->lock, flags); 486 spin_unlock_irqrestore(&rs->lock, flags);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2071f788c6fb..46ce47076e63 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -15,11 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */ 18 */
24 19
25#include <linux/module.h> 20#include <linux/module.h>
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 37b19836f5cb..9231c34b5a5c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 14 */
19 15
20#include <linux/init.h> 16#include <linux/init.h>
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 237f2e7a7179..5a56acf8a43e 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index fc29233d0650..20e800e70442 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -16,11 +16,6 @@
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 *
24 */ 19 */
25 20
26#include <linux/clk.h> 21#include <linux/clk.h>
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 3ab7a21445fc..e57eec0b2f46 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -82,6 +82,8 @@ struct sh_msiof_spi_priv {
82#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ 82#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
83#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ 83#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
84#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ 84#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
85#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
86#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
85#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */ 87#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
86#define MDR1_FLD_SHIFT 2 88#define MDR1_FLD_SHIFT 2
87#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ 89#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
@@ -241,42 +243,80 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
241 243
242static struct { 244static struct {
243 unsigned short div; 245 unsigned short div;
244 unsigned short scr; 246 unsigned short brdv;
245} const sh_msiof_spi_clk_table[] = { 247} const sh_msiof_spi_div_table[] = {
246 { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 }, 248 { 1, SCR_BRDV_DIV_1 },
247 { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 }, 249 { 2, SCR_BRDV_DIV_2 },
248 { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 }, 250 { 4, SCR_BRDV_DIV_4 },
249 { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 }, 251 { 8, SCR_BRDV_DIV_8 },
250 { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 }, 252 { 16, SCR_BRDV_DIV_16 },
251 { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 }, 253 { 32, SCR_BRDV_DIV_32 },
252 { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 },
253 { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 },
254 { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 },
255 { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 },
256 { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
257}; 254};
258 255
259static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, 256static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
260 unsigned long parent_rate, u32 spi_hz) 257 unsigned long parent_rate, u32 spi_hz)
261{ 258{
262 unsigned long div = 1024; 259 unsigned long div = 1024;
260 u32 brps, scr;
263 size_t k; 261 size_t k;
264 262
265 if (!WARN_ON(!spi_hz || !parent_rate)) 263 if (!WARN_ON(!spi_hz || !parent_rate))
266 div = DIV_ROUND_UP(parent_rate, spi_hz); 264 div = DIV_ROUND_UP(parent_rate, spi_hz);
267 265
268 /* TODO: make more fine grained */ 266 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
269 267 brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
270 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) { 268 if (brps <= 32) /* max of brdv is 32 */
271 if (sh_msiof_spi_clk_table[k].div >= div)
272 break; 269 break;
273 } 270 }
274 271
275 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1); 272 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
276 273
277 sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr); 274 scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
275 sh_msiof_write(p, TSCR, scr);
278 if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX)) 276 if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
279 sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr); 277 sh_msiof_write(p, RSCR, scr);
278}
279
280static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
281{
282 /*
283 * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl
284 * b'000 : 0
285 * b'001 : 100
286 * b'010 : 200
287 * b'011 (SYNCDL only) : 300
288 * b'101 : 50
289 * b'110 : 150
290 */
291 if (dtdl_or_syncdl % 100)
292 return dtdl_or_syncdl / 100 + 5;
293 else
294 return dtdl_or_syncdl / 100;
295}
296
297static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
298{
299 u32 val;
300
301 if (!p->info)
302 return 0;
303
304 /* check if DTDL and SYNCDL is allowed value */
305 if (p->info->dtdl > 200 || p->info->syncdl > 300) {
306 dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
307 return 0;
308 }
309
310 /* check if the sum of DTDL and SYNCDL becomes an integer value */
311 if ((p->info->dtdl + p->info->syncdl) % 100) {
312 dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
313 return 0;
314 }
315
316 val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
317 val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
318
319 return val;
280} 320}
281 321
282static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, 322static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
@@ -296,6 +336,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
296 tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; 336 tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
297 tmp |= !cs_high << MDR1_SYNCAC_SHIFT; 337 tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
298 tmp |= lsb_first << MDR1_BITLSB_SHIFT; 338 tmp |= lsb_first << MDR1_BITLSB_SHIFT;
339 tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
299 sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON); 340 sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
300 if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) { 341 if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
301 /* These bits are reserved if RX needs TX */ 342 /* These bits are reserved if RX needs TX */
@@ -501,7 +542,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
501 gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 542 gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
502 543
503 544
504 pm_runtime_put_sync(&p->pdev->dev); 545 pm_runtime_put(&p->pdev->dev);
505 546
506 return 0; 547 return 0;
507} 548}
@@ -595,8 +636,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
595 } 636 }
596 637
597 /* wait for tx fifo to be emptied / rx fifo to be filled */ 638 /* wait for tx fifo to be emptied / rx fifo to be filled */
598 ret = wait_for_completion_timeout(&p->done, HZ); 639 if (!wait_for_completion_timeout(&p->done, HZ)) {
599 if (!ret) {
600 dev_err(&p->pdev->dev, "PIO timeout\n"); 640 dev_err(&p->pdev->dev, "PIO timeout\n");
601 ret = -ETIMEDOUT; 641 ret = -ETIMEDOUT;
602 goto stop_reset; 642 goto stop_reset;
@@ -706,8 +746,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
706 } 746 }
707 747
708 /* wait for tx fifo to be emptied / rx fifo to be filled */ 748 /* wait for tx fifo to be emptied / rx fifo to be filled */
709 ret = wait_for_completion_timeout(&p->done, HZ); 749 if (!wait_for_completion_timeout(&p->done, HZ)) {
710 if (!ret) {
711 dev_err(&p->pdev->dev, "DMA timeout\n"); 750 dev_err(&p->pdev->dev, "DMA timeout\n");
712 ret = -ETIMEDOUT; 751 ret = -ETIMEDOUT;
713 goto stop_reset; 752 goto stop_reset;
@@ -957,6 +996,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
957 &info->tx_fifo_override); 996 &info->tx_fifo_override);
958 of_property_read_u32(np, "renesas,rx-fifo-size", 997 of_property_read_u32(np, "renesas,rx-fifo-size",
959 &info->rx_fifo_override); 998 &info->rx_fifo_override);
999 of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
1000 of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
960 1001
961 info->num_chipselect = num_cs; 1002 info->num_chipselect = num_cs;
962 1003
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 1cfc906dd174..502501187c9e 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -14,11 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */ 17 */
23 18
24#include <linux/module.h> 19#include <linux/module.h>
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index d075191476f0..f5715c9f68b0 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -818,7 +818,6 @@ static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
818 818
819static const struct of_device_id spi_sirfsoc_of_match[] = { 819static const struct of_device_id spi_sirfsoc_of_match[] = {
820 { .compatible = "sirf,prima2-spi", }, 820 { .compatible = "sirf,prima2-spi", },
821 { .compatible = "sirf,marco-spi", },
822 {} 821 {}
823}; 822};
824MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); 823MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
new file mode 100644
index 000000000000..2faeaa7b57a8
--- /dev/null
+++ b/drivers/spi/spi-st-ssc4.c
@@ -0,0 +1,504 @@
1/*
2 * Copyright (c) 2008-2014 STMicroelectronics Limited
3 *
4 * Author: Angus Clark <Angus.Clark@st.com>
5 * Patrice Chotard <patrice.chotard@st.com>
6 * Lee Jones <lee.jones@linaro.org>
7 *
8 * SPI master mode controller driver, used in STMicroelectronics devices.
9 *
10 * May be copied or modified under the terms of the GNU General Public
11 * License Version 2.0 only. See linux/COPYING for more information.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/pinctrl/consumer.h>
20#include <linux/platform_device.h>
21#include <linux/of.h>
22#include <linux/of_gpio.h>
23#include <linux/of_irq.h>
24#include <linux/pm_runtime.h>
25#include <linux/spi/spi.h>
26#include <linux/spi/spi_bitbang.h>
27
28/* SSC registers */
29#define SSC_BRG 0x000
30#define SSC_TBUF 0x004
31#define SSC_RBUF 0x008
32#define SSC_CTL 0x00C
33#define SSC_IEN 0x010
34#define SSC_I2C 0x018
35
36/* SSC Control */
37#define SSC_CTL_DATA_WIDTH_9 0x8
38#define SSC_CTL_DATA_WIDTH_MSK 0xf
39#define SSC_CTL_BM 0xf
40#define SSC_CTL_HB BIT(4)
41#define SSC_CTL_PH BIT(5)
42#define SSC_CTL_PO BIT(6)
43#define SSC_CTL_SR BIT(7)
44#define SSC_CTL_MS BIT(8)
45#define SSC_CTL_EN BIT(9)
46#define SSC_CTL_LPB BIT(10)
47#define SSC_CTL_EN_TX_FIFO BIT(11)
48#define SSC_CTL_EN_RX_FIFO BIT(12)
49#define SSC_CTL_EN_CLST_RX BIT(13)
50
51/* SSC Interrupt Enable */
52#define SSC_IEN_TEEN BIT(2)
53
54#define FIFO_SIZE 8
55
56struct spi_st {
57 /* SSC SPI Controller */
58 void __iomem *base;
59 struct clk *clk;
60 struct device *dev;
61
62 /* SSC SPI current transaction */
63 const u8 *tx_ptr;
64 u8 *rx_ptr;
65 u16 bytes_per_word;
66 unsigned int words_remaining;
67 unsigned int baud;
68 struct completion done;
69};
70
71static int spi_st_clk_enable(struct spi_st *spi_st)
72{
73 /*
74 * Current platforms use one of the core clocks for SPI and I2C.
75 * If we attempt to disable the clock, the system will hang.
76 *
77 * TODO: Remove this when platform supports power domains.
78 */
79 return 0;
80
81 return clk_prepare_enable(spi_st->clk);
82}
83
84static void spi_st_clk_disable(struct spi_st *spi_st)
85{
86 /*
87 * Current platforms use one of the core clocks for SPI and I2C.
88 * If we attempt to disable the clock, the system will hang.
89 *
90 * TODO: Remove this when platform supports power domains.
91 */
92 return;
93
94 clk_disable_unprepare(spi_st->clk);
95}
96
97/* Load the TX FIFO */
98static void ssc_write_tx_fifo(struct spi_st *spi_st)
99{
100 unsigned int count, i;
101 uint32_t word = 0;
102
103 if (spi_st->words_remaining > FIFO_SIZE)
104 count = FIFO_SIZE;
105 else
106 count = spi_st->words_remaining;
107
108 for (i = 0; i < count; i++) {
109 if (spi_st->tx_ptr) {
110 if (spi_st->bytes_per_word == 1) {
111 word = *spi_st->tx_ptr++;
112 } else {
113 word = *spi_st->tx_ptr++;
114 word = *spi_st->tx_ptr++ | (word << 8);
115 }
116 }
117 writel_relaxed(word, spi_st->base + SSC_TBUF);
118 }
119}
120
121/* Read the RX FIFO */
122static void ssc_read_rx_fifo(struct spi_st *spi_st)
123{
124 unsigned int count, i;
125 uint32_t word = 0;
126
127 if (spi_st->words_remaining > FIFO_SIZE)
128 count = FIFO_SIZE;
129 else
130 count = spi_st->words_remaining;
131
132 for (i = 0; i < count; i++) {
133 word = readl_relaxed(spi_st->base + SSC_RBUF);
134
135 if (spi_st->rx_ptr) {
136 if (spi_st->bytes_per_word == 1) {
137 *spi_st->rx_ptr++ = (uint8_t)word;
138 } else {
139 *spi_st->rx_ptr++ = (word >> 8);
140 *spi_st->rx_ptr++ = word & 0xff;
141 }
142 }
143 }
144 spi_st->words_remaining -= count;
145}
146
147static int spi_st_transfer_one(struct spi_master *master,
148 struct spi_device *spi, struct spi_transfer *t)
149{
150 struct spi_st *spi_st = spi_master_get_devdata(master);
151 uint32_t ctl = 0;
152
153 /* Setup transfer */
154 spi_st->tx_ptr = t->tx_buf;
155 spi_st->rx_ptr = t->rx_buf;
156
157 if (spi->bits_per_word > 8) {
158 /*
159 * Anything greater than 8 bits-per-word requires 2
160 * bytes-per-word in the RX/TX buffers
161 */
162 spi_st->bytes_per_word = 2;
163 spi_st->words_remaining = t->len / 2;
164
165 } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
166 /*
167 * If transfer is even-length, and 8 bits-per-word, then
168 * implement as half-length 16 bits-per-word transfer
169 */
170 spi_st->bytes_per_word = 2;
171 spi_st->words_remaining = t->len / 2;
172
173 /* Set SSC_CTL to 16 bits-per-word */
174 ctl = readl_relaxed(spi_st->base + SSC_CTL);
175 writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL);
176
177 readl_relaxed(spi_st->base + SSC_RBUF);
178
179 } else {
180 spi_st->bytes_per_word = 1;
181 spi_st->words_remaining = t->len;
182 }
183
184 reinit_completion(&spi_st->done);
185
186 /* Start transfer by writing to the TX FIFO */
187 ssc_write_tx_fifo(spi_st);
188 writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN);
189
190 /* Wait for transfer to complete */
191 wait_for_completion(&spi_st->done);
192
193 /* Restore SSC_CTL if necessary */
194 if (ctl)
195 writel_relaxed(ctl, spi_st->base + SSC_CTL);
196
197 spi_finalize_current_transfer(spi->master);
198
199 return t->len;
200}
201
202static void spi_st_cleanup(struct spi_device *spi)
203{
204 int cs = spi->cs_gpio;
205
206 if (gpio_is_valid(cs))
207 devm_gpio_free(&spi->dev, cs);
208}
209
210/* the spi->mode bits understood by this driver: */
211#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
212static int spi_st_setup(struct spi_device *spi)
213{
214 struct spi_st *spi_st = spi_master_get_devdata(spi->master);
215 u32 spi_st_clk, sscbrg, var;
216 u32 hz = spi->max_speed_hz;
217 int cs = spi->cs_gpio;
218 int ret;
219
220 if (!hz) {
221 dev_err(&spi->dev, "max_speed_hz unspecified\n");
222 return -EINVAL;
223 }
224
225 if (!gpio_is_valid(cs)) {
226 dev_err(&spi->dev, "%d is not a valid gpio\n", cs);
227 return -EINVAL;
228 }
229
230 if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) {
231 dev_err(&spi->dev, "could not request gpio:%d\n", cs);
232 return -EINVAL;
233 }
234
235 ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
236 if (ret)
237 return ret;
238
239 spi_st_clk = clk_get_rate(spi_st->clk);
240
241 /* Set SSC_BRF */
242 sscbrg = spi_st_clk / (2 * hz);
243 if (sscbrg < 0x07 || sscbrg > BIT(16)) {
244 dev_err(&spi->dev,
245 "baudrate %d outside valid range %d\n", sscbrg, hz);
246 return -EINVAL;
247 }
248
249 spi_st->baud = spi_st_clk / (2 * sscbrg);
250 if (sscbrg == BIT(16)) /* 16-bit counter wraps */
251 sscbrg = 0x0;
252
253 writel_relaxed(sscbrg, spi_st->base + SSC_BRG);
254
255 dev_dbg(&spi->dev,
256 "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
257 hz, spi_st->baud, sscbrg);
258
259 /* Set SSC_CTL and enable SSC */
260 var = readl_relaxed(spi_st->base + SSC_CTL);
261 var |= SSC_CTL_MS;
262
263 if (spi->mode & SPI_CPOL)
264 var |= SSC_CTL_PO;
265 else
266 var &= ~SSC_CTL_PO;
267
268 if (spi->mode & SPI_CPHA)
269 var |= SSC_CTL_PH;
270 else
271 var &= ~SSC_CTL_PH;
272
273 if ((spi->mode & SPI_LSB_FIRST) == 0)
274 var |= SSC_CTL_HB;
275 else
276 var &= ~SSC_CTL_HB;
277
278 if (spi->mode & SPI_LOOP)
279 var |= SSC_CTL_LPB;
280 else
281 var &= ~SSC_CTL_LPB;
282
283 var &= ~SSC_CTL_DATA_WIDTH_MSK;
284 var |= (spi->bits_per_word - 1);
285
286 var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
287 var |= SSC_CTL_EN;
288
289 writel_relaxed(var, spi_st->base + SSC_CTL);
290
291 /* Clear the status register */
292 readl_relaxed(spi_st->base + SSC_RBUF);
293
294 return 0;
295}
296
297/* Interrupt fired when TX shift register becomes empty */
298static irqreturn_t spi_st_irq(int irq, void *dev_id)
299{
300 struct spi_st *spi_st = (struct spi_st *)dev_id;
301
302 /* Read RX FIFO */
303 ssc_read_rx_fifo(spi_st);
304
305 /* Fill TX FIFO */
306 if (spi_st->words_remaining) {
307 ssc_write_tx_fifo(spi_st);
308 } else {
309 /* TX/RX complete */
310 writel_relaxed(0x0, spi_st->base + SSC_IEN);
311 /*
312 * read SSC_IEN to ensure that this bit is set
313 * before re-enabling interrupt
314 */
315 readl(spi_st->base + SSC_IEN);
316 complete(&spi_st->done);
317 }
318
319 return IRQ_HANDLED;
320}
321
322static int spi_st_probe(struct platform_device *pdev)
323{
324 struct device_node *np = pdev->dev.of_node;
325 struct spi_master *master;
326 struct resource *res;
327 struct spi_st *spi_st;
328 int irq, ret = 0;
329 u32 var;
330
331 master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
332 if (!master)
333 return -ENOMEM;
334
335 master->dev.of_node = np;
336 master->mode_bits = MODEBITS;
337 master->setup = spi_st_setup;
338 master->cleanup = spi_st_cleanup;
339 master->transfer_one = spi_st_transfer_one;
340 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
341 master->auto_runtime_pm = true;
342 master->bus_num = pdev->id;
343 spi_st = spi_master_get_devdata(master);
344
345 spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
346 if (IS_ERR(spi_st->clk)) {
347 dev_err(&pdev->dev, "Unable to request clock\n");
348 return PTR_ERR(spi_st->clk);
349 }
350
351 ret = spi_st_clk_enable(spi_st);
352 if (ret)
353 return ret;
354
355 init_completion(&spi_st->done);
356
357 /* Get resources */
358 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
359 spi_st->base = devm_ioremap_resource(&pdev->dev, res);
360 if (IS_ERR(spi_st->base)) {
361 ret = PTR_ERR(spi_st->base);
362 goto clk_disable;
363 }
364
365 /* Disable I2C and Reset SSC */
366 writel_relaxed(0x0, spi_st->base + SSC_I2C);
367 var = readw_relaxed(spi_st->base + SSC_CTL);
368 var |= SSC_CTL_SR;
369 writel_relaxed(var, spi_st->base + SSC_CTL);
370
371 udelay(1);
372 var = readl_relaxed(spi_st->base + SSC_CTL);
373 var &= ~SSC_CTL_SR;
374 writel_relaxed(var, spi_st->base + SSC_CTL);
375
376 /* Set SSC into slave mode before reconfiguring PIO pins */
377 var = readl_relaxed(spi_st->base + SSC_CTL);
378 var &= ~SSC_CTL_MS;
379 writel_relaxed(var, spi_st->base + SSC_CTL);
380
381 irq = irq_of_parse_and_map(np, 0);
382 if (!irq) {
383 dev_err(&pdev->dev, "IRQ missing or invalid\n");
384 ret = -EINVAL;
385 goto clk_disable;
386 }
387
388 ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0,
389 pdev->name, spi_st);
390 if (ret) {
391 dev_err(&pdev->dev, "Failed to request irq %d\n", irq);
392 goto clk_disable;
393 }
394
395 /* by default the device is on */
396 pm_runtime_set_active(&pdev->dev);
397 pm_runtime_enable(&pdev->dev);
398
399 platform_set_drvdata(pdev, master);
400
401 ret = devm_spi_register_master(&pdev->dev, master);
402 if (ret) {
403 dev_err(&pdev->dev, "Failed to register master\n");
404 goto clk_disable;
405 }
406
407 return 0;
408
409clk_disable:
410 spi_st_clk_disable(spi_st);
411
412 return ret;
413}
414
415static int spi_st_remove(struct platform_device *pdev)
416{
417 struct spi_master *master = platform_get_drvdata(pdev);
418 struct spi_st *spi_st = spi_master_get_devdata(master);
419
420 spi_st_clk_disable(spi_st);
421
422 pinctrl_pm_select_sleep_state(&pdev->dev);
423
424 return 0;
425}
426
427#ifdef CONFIG_PM
428static int spi_st_runtime_suspend(struct device *dev)
429{
430 struct spi_master *master = dev_get_drvdata(dev);
431 struct spi_st *spi_st = spi_master_get_devdata(master);
432
433 writel_relaxed(0, spi_st->base + SSC_IEN);
434 pinctrl_pm_select_sleep_state(dev);
435
436 spi_st_clk_disable(spi_st);
437
438 return 0;
439}
440
441static int spi_st_runtime_resume(struct device *dev)
442{
443 struct spi_master *master = dev_get_drvdata(dev);
444 struct spi_st *spi_st = spi_master_get_devdata(master);
445 int ret;
446
447 ret = spi_st_clk_enable(spi_st);
448 pinctrl_pm_select_default_state(dev);
449
450 return ret;
451}
452#endif
453
454#ifdef CONFIG_PM_SLEEP
455static int spi_st_suspend(struct device *dev)
456{
457 struct spi_master *master = dev_get_drvdata(dev);
458 int ret;
459
460 ret = spi_master_suspend(master);
461 if (ret)
462 return ret;
463
464 return pm_runtime_force_suspend(dev);
465}
466
467static int spi_st_resume(struct device *dev)
468{
469 struct spi_master *master = dev_get_drvdata(dev);
470 int ret;
471
472 ret = spi_master_resume(master);
473 if (ret)
474 return ret;
475
476 return pm_runtime_force_resume(dev);
477}
478#endif
479
480static const struct dev_pm_ops spi_st_pm = {
481 SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
482 SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
483};
484
485static struct of_device_id stm_spi_match[] = {
486 { .compatible = "st,comms-ssc4-spi", },
487 {},
488};
489MODULE_DEVICE_TABLE(of, stm_spi_match);
490
491static struct platform_driver spi_st_driver = {
492 .driver = {
493 .name = "spi-st",
494 .pm = &spi_st_pm,
495 .of_match_table = of_match_ptr(stm_spi_match),
496 },
497 .probe = spi_st_probe,
498 .remove = spi_st_remove,
499};
500module_platform_driver(spi_st_driver);
501
502MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
503MODULE_DESCRIPTION("STM SSC SPI driver");
504MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 6146c4cd6583..884a716e50cb 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -201,7 +201,7 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
201 201
202static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) 202static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
203{ 203{
204 int wlen, count, ret; 204 int wlen, count;
205 unsigned int cmd; 205 unsigned int cmd;
206 const u8 *txbuf; 206 const u8 *txbuf;
207 207
@@ -230,9 +230,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
230 } 230 }
231 231
232 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 232 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
233 ret = wait_for_completion_timeout(&qspi->transfer_complete, 233 if (!wait_for_completion_timeout(&qspi->transfer_complete,
234 QSPI_COMPLETION_TIMEOUT); 234 QSPI_COMPLETION_TIMEOUT)) {
235 if (ret == 0) {
236 dev_err(qspi->dev, "write timed out\n"); 235 dev_err(qspi->dev, "write timed out\n");
237 return -ETIMEDOUT; 236 return -ETIMEDOUT;
238 } 237 }
@@ -245,7 +244,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
245 244
246static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) 245static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
247{ 246{
248 int wlen, count, ret; 247 int wlen, count;
249 unsigned int cmd; 248 unsigned int cmd;
250 u8 *rxbuf; 249 u8 *rxbuf;
251 250
@@ -268,9 +267,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
268 while (count) { 267 while (count) {
269 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); 268 dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
270 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); 269 ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
271 ret = wait_for_completion_timeout(&qspi->transfer_complete, 270 if (!wait_for_completion_timeout(&qspi->transfer_complete,
272 QSPI_COMPLETION_TIMEOUT); 271 QSPI_COMPLETION_TIMEOUT)) {
273 if (ret == 0) {
274 dev_err(qspi->dev, "read timed out\n"); 272 dev_err(qspi->dev, "read timed out\n");
275 return -ETIMEDOUT; 273 return -ETIMEDOUT;
276 } 274 }
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index be692ad50442..93dfcee0f987 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
18 */ 14 */
19 15
20#include <linux/delay.h> 16#include <linux/delay.h>
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 79bd84f43430..133f53a9c1d4 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -22,6 +22,8 @@
22#include <linux/spi/xilinx_spi.h> 22#include <linux/spi/xilinx_spi.h>
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#define XILINX_SPI_MAX_CS 32
26
25#define XILINX_SPI_NAME "xilinx_spi" 27#define XILINX_SPI_NAME "xilinx_spi"
26 28
27/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) 29/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
@@ -34,7 +36,8 @@
34#define XSPI_CR_MASTER_MODE 0x04 36#define XSPI_CR_MASTER_MODE 0x04
35#define XSPI_CR_CPOL 0x08 37#define XSPI_CR_CPOL 0x08
36#define XSPI_CR_CPHA 0x10 38#define XSPI_CR_CPHA 0x10
37#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL) 39#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL | \
40 XSPI_CR_LSB_FIRST | XSPI_CR_LOOP)
38#define XSPI_CR_TXFIFO_RESET 0x20 41#define XSPI_CR_TXFIFO_RESET 0x20
39#define XSPI_CR_RXFIFO_RESET 0x40 42#define XSPI_CR_RXFIFO_RESET 0x40
40#define XSPI_CR_MANUAL_SSELECT 0x80 43#define XSPI_CR_MANUAL_SSELECT 0x80
@@ -85,12 +88,11 @@ struct xilinx_spi {
85 88
86 u8 *rx_ptr; /* pointer in the Tx buffer */ 89 u8 *rx_ptr; /* pointer in the Tx buffer */
87 const u8 *tx_ptr; /* pointer in the Rx buffer */ 90 const u8 *tx_ptr; /* pointer in the Rx buffer */
88 int remaining_bytes; /* the number of bytes left to transfer */ 91 u8 bytes_per_word;
89 u8 bits_per_word; 92 int buffer_size; /* buffer size in words */
93 u32 cs_inactive; /* Level of the CS pins when inactive*/
90 unsigned int (*read_fn)(void __iomem *); 94 unsigned int (*read_fn)(void __iomem *);
91 void (*write_fn)(u32, void __iomem *); 95 void (*write_fn)(u32, void __iomem *);
92 void (*tx_fn)(struct xilinx_spi *);
93 void (*rx_fn)(struct xilinx_spi *);
94}; 96};
95 97
96static void xspi_write32(u32 val, void __iomem *addr) 98static void xspi_write32(u32 val, void __iomem *addr)
@@ -113,49 +115,51 @@ static unsigned int xspi_read32_be(void __iomem *addr)
113 return ioread32be(addr); 115 return ioread32be(addr);
114} 116}
115 117
116static void xspi_tx8(struct xilinx_spi *xspi) 118static void xilinx_spi_tx(struct xilinx_spi *xspi)
117{ 119{
118 xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); 120 u32 data = 0;
119 xspi->tx_ptr++;
120}
121
122static void xspi_tx16(struct xilinx_spi *xspi)
123{
124 xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
125 xspi->tx_ptr += 2;
126}
127 121
128static void xspi_tx32(struct xilinx_spi *xspi) 122 if (!xspi->tx_ptr) {
129{ 123 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
130 xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); 124 return;
131 xspi->tx_ptr += 4;
132}
133
134static void xspi_rx8(struct xilinx_spi *xspi)
135{
136 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
137 if (xspi->rx_ptr) {
138 *xspi->rx_ptr = data & 0xff;
139 xspi->rx_ptr++;
140 } 125 }
141}
142 126
143static void xspi_rx16(struct xilinx_spi *xspi) 127 switch (xspi->bytes_per_word) {
144{ 128 case 1:
145 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); 129 data = *(u8 *)(xspi->tx_ptr);
146 if (xspi->rx_ptr) { 130 break;
147 *(u16 *)(xspi->rx_ptr) = data & 0xffff; 131 case 2:
148 xspi->rx_ptr += 2; 132 data = *(u16 *)(xspi->tx_ptr);
133 break;
134 case 4:
135 data = *(u32 *)(xspi->tx_ptr);
136 break;
149 } 137 }
138
139 xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
140 xspi->tx_ptr += xspi->bytes_per_word;
150} 141}
151 142
152static void xspi_rx32(struct xilinx_spi *xspi) 143static void xilinx_spi_rx(struct xilinx_spi *xspi)
153{ 144{
154 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); 145 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
155 if (xspi->rx_ptr) { 146
147 if (!xspi->rx_ptr)
148 return;
149
150 switch (xspi->bytes_per_word) {
151 case 1:
152 *(u8 *)(xspi->rx_ptr) = data;
153 break;
154 case 2:
155 *(u16 *)(xspi->rx_ptr) = data;
156 break;
157 case 4:
156 *(u32 *)(xspi->rx_ptr) = data; 158 *(u32 *)(xspi->rx_ptr) = data;
157 xspi->rx_ptr += 4; 159 break;
158 } 160 }
161
162 xspi->rx_ptr += xspi->bytes_per_word;
159} 163}
160 164
161static void xspi_init_hw(struct xilinx_spi *xspi) 165static void xspi_init_hw(struct xilinx_spi *xspi)
@@ -165,46 +169,56 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
165 /* Reset the SPI device */ 169 /* Reset the SPI device */
166 xspi->write_fn(XIPIF_V123B_RESET_MASK, 170 xspi->write_fn(XIPIF_V123B_RESET_MASK,
167 regs_base + XIPIF_V123B_RESETR_OFFSET); 171 regs_base + XIPIF_V123B_RESETR_OFFSET);
168 /* Disable all the interrupts just in case */ 172 /* Enable the transmit empty interrupt, which we use to determine
169 xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); 173 * progress on the transmission.
170 /* Enable the global IPIF interrupt */ 174 */
171 xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, 175 xspi->write_fn(XSPI_INTR_TX_EMPTY,
172 regs_base + XIPIF_V123B_DGIER_OFFSET); 176 regs_base + XIPIF_V123B_IIER_OFFSET);
177 /* Disable the global IPIF interrupt */
178 xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
173 /* Deselect the slave on the SPI bus */ 179 /* Deselect the slave on the SPI bus */
174 xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); 180 xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
175 /* Disable the transmitter, enable Manual Slave Select Assertion, 181 /* Disable the transmitter, enable Manual Slave Select Assertion,
176 * put SPI controller into master mode, and enable it */ 182 * put SPI controller into master mode, and enable it */
177 xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | 183 xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
178 XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | 184 XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
179 XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); 185 regs_base + XSPI_CR_OFFSET);
180} 186}
181 187
182static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) 188static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
183{ 189{
184 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); 190 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
191 u16 cr;
192 u32 cs;
185 193
186 if (is_on == BITBANG_CS_INACTIVE) { 194 if (is_on == BITBANG_CS_INACTIVE) {
187 /* Deselect the slave on the SPI bus */ 195 /* Deselect the slave on the SPI bus */
188 xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); 196 xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
189 } else if (is_on == BITBANG_CS_ACTIVE) { 197 return;
190 /* Set the SPI clock phase and polarity */
191 u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET)
192 & ~XSPI_CR_MODE_MASK;
193 if (spi->mode & SPI_CPHA)
194 cr |= XSPI_CR_CPHA;
195 if (spi->mode & SPI_CPOL)
196 cr |= XSPI_CR_CPOL;
197 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
198
199 /* We do not check spi->max_speed_hz here as the SPI clock
200 * frequency is not software programmable (the IP block design
201 * parameter)
202 */
203
204 /* Activate the chip select */
205 xspi->write_fn(~(0x0001 << spi->chip_select),
206 xspi->regs + XSPI_SSR_OFFSET);
207 } 198 }
199
200 /* Set the SPI clock phase and polarity */
201 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
202 if (spi->mode & SPI_CPHA)
203 cr |= XSPI_CR_CPHA;
204 if (spi->mode & SPI_CPOL)
205 cr |= XSPI_CR_CPOL;
206 if (spi->mode & SPI_LSB_FIRST)
207 cr |= XSPI_CR_LSB_FIRST;
208 if (spi->mode & SPI_LOOP)
209 cr |= XSPI_CR_LOOP;
210 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
211
212 /* We do not check spi->max_speed_hz here as the SPI clock
213 * frequency is not software programmable (the IP block design
214 * parameter)
215 */
216
217 cs = xspi->cs_inactive;
218 cs ^= BIT(spi->chip_select);
219
220 /* Activate the chip select */
221 xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
208} 222}
209 223
210/* spi_bitbang requires custom setup_transfer() to be defined if there is a 224/* spi_bitbang requires custom setup_transfer() to be defined if there is a
@@ -213,85 +227,85 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
213static int xilinx_spi_setup_transfer(struct spi_device *spi, 227static int xilinx_spi_setup_transfer(struct spi_device *spi,
214 struct spi_transfer *t) 228 struct spi_transfer *t)
215{ 229{
216 return 0; 230 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
217}
218 231
219static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) 232 if (spi->mode & SPI_CS_HIGH)
220{ 233 xspi->cs_inactive &= ~BIT(spi->chip_select);
221 u8 sr; 234 else
235 xspi->cs_inactive |= BIT(spi->chip_select);
222 236
223 /* Fill the Tx FIFO with as many bytes as possible */ 237 return 0;
224 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
225 while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
226 if (xspi->tx_ptr)
227 xspi->tx_fn(xspi);
228 else
229 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
230 xspi->remaining_bytes -= xspi->bits_per_word / 8;
231 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
232 }
233} 238}
234 239
235static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) 240static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
236{ 241{
237 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); 242 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
238 u32 ipif_ier; 243 int remaining_words; /* the number of words left to transfer */
244 bool use_irq = false;
245 u16 cr = 0;
239 246
240 /* We get here with transmitter inhibited */ 247 /* We get here with transmitter inhibited */
241 248
242 xspi->tx_ptr = t->tx_buf; 249 xspi->tx_ptr = t->tx_buf;
243 xspi->rx_ptr = t->rx_buf; 250 xspi->rx_ptr = t->rx_buf;
244 xspi->remaining_bytes = t->len; 251 remaining_words = t->len / xspi->bytes_per_word;
245 reinit_completion(&xspi->done); 252 reinit_completion(&xspi->done);
246 253
254 if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
255 use_irq = true;
256 xspi->write_fn(XSPI_INTR_TX_EMPTY,
257 xspi->regs + XIPIF_V123B_IISR_OFFSET);
258 /* Enable the global IPIF interrupt */
259 xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
260 xspi->regs + XIPIF_V123B_DGIER_OFFSET);
261 /* Inhibit irq to avoid spurious irqs on tx_empty*/
262 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
263 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
264 xspi->regs + XSPI_CR_OFFSET);
265 }
247 266
248 /* Enable the transmit empty interrupt, which we use to determine 267 while (remaining_words) {
249 * progress on the transmission. 268 int n_words, tx_words, rx_words;
250 */
251 ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
252 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
253 xspi->regs + XIPIF_V123B_IIER_OFFSET);
254 269
255 for (;;) { 270 n_words = min(remaining_words, xspi->buffer_size);
256 u16 cr;
257 u8 sr;
258 271
259 xilinx_spi_fill_tx_fifo(xspi); 272 tx_words = n_words;
273 while (tx_words--)
274 xilinx_spi_tx(xspi);
260 275
261 /* Start the transfer by not inhibiting the transmitter any 276 /* Start the transfer by not inhibiting the transmitter any
262 * longer 277 * longer
263 */ 278 */
264 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
265 ~XSPI_CR_TRANS_INHIBIT;
266 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
267 279
268 wait_for_completion(&xspi->done); 280 if (use_irq) {
281 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
282 wait_for_completion(&xspi->done);
283 } else
284 while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) &
285 XSPI_SR_TX_EMPTY_MASK))
286 ;
269 287
270 /* A transmit has just completed. Process received data and 288 /* A transmit has just completed. Process received data and
271 * check for more data to transmit. Always inhibit the 289 * check for more data to transmit. Always inhibit the
272 * transmitter while the Isr refills the transmit register/FIFO, 290 * transmitter while the Isr refills the transmit register/FIFO,
273 * or make sure it is stopped if we're done. 291 * or make sure it is stopped if we're done.
274 */ 292 */
275 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); 293 if (use_irq)
276 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, 294 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
277 xspi->regs + XSPI_CR_OFFSET); 295 xspi->regs + XSPI_CR_OFFSET);
278 296
279 /* Read out all the data from the Rx FIFO */ 297 /* Read out all the data from the Rx FIFO */
280 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); 298 rx_words = n_words;
281 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { 299 while (rx_words--)
282 xspi->rx_fn(xspi); 300 xilinx_spi_rx(xspi);
283 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); 301
284 } 302 remaining_words -= n_words;
285
286 /* See if there is more data to send */
287 if (xspi->remaining_bytes <= 0)
288 break;
289 } 303 }
290 304
291 /* Disable the transmit empty interrupt */ 305 if (use_irq)
292 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); 306 xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
293 307
294 return t->len - xspi->remaining_bytes; 308 return t->len;
295} 309}
296 310
297 311
@@ -316,6 +330,28 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
316 return IRQ_HANDLED; 330 return IRQ_HANDLED;
317} 331}
318 332
333static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
334{
335 u8 sr;
336 int n_words = 0;
337
338 /*
339 * Before the buffer_size detection we reset the core
340 * to make sure we start with a clean state.
341 */
342 xspi->write_fn(XIPIF_V123B_RESET_MASK,
343 xspi->regs + XIPIF_V123B_RESETR_OFFSET);
344
345 /* Fill the Tx FIFO with as many words as possible */
346 do {
347 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
348 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
349 n_words++;
350 } while (!(sr & XSPI_SR_TX_FULL_MASK));
351
352 return n_words;
353}
354
319static const struct of_device_id xilinx_spi_of_match[] = { 355static const struct of_device_id xilinx_spi_of_match[] = {
320 { .compatible = "xlnx,xps-spi-2.00.a", }, 356 { .compatible = "xlnx,xps-spi-2.00.a", },
321 { .compatible = "xlnx,xps-spi-2.00.b", }, 357 { .compatible = "xlnx,xps-spi-2.00.b", },
@@ -348,14 +384,21 @@ static int xilinx_spi_probe(struct platform_device *pdev)
348 return -EINVAL; 384 return -EINVAL;
349 } 385 }
350 386
387 if (num_cs > XILINX_SPI_MAX_CS) {
388 dev_err(&pdev->dev, "Invalid number of spi slaves\n");
389 return -EINVAL;
390 }
391
351 master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi)); 392 master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
352 if (!master) 393 if (!master)
353 return -ENODEV; 394 return -ENODEV;
354 395
355 /* the spi->mode bits understood by this driver: */ 396 /* the spi->mode bits understood by this driver: */
356 master->mode_bits = SPI_CPOL | SPI_CPHA; 397 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
398 SPI_CS_HIGH;
357 399
358 xspi = spi_master_get_devdata(master); 400 xspi = spi_master_get_devdata(master);
401 xspi->cs_inactive = 0xffffffff;
359 xspi->bitbang.master = master; 402 xspi->bitbang.master = master;
360 xspi->bitbang.chipselect = xilinx_spi_chipselect; 403 xspi->bitbang.chipselect = xilinx_spi_chipselect;
361 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; 404 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
@@ -392,35 +435,20 @@ static int xilinx_spi_probe(struct platform_device *pdev)
392 } 435 }
393 436
394 master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word); 437 master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
395 xspi->bits_per_word = bits_per_word; 438 xspi->bytes_per_word = bits_per_word / 8;
396 if (xspi->bits_per_word == 8) { 439 xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
397 xspi->tx_fn = xspi_tx8;
398 xspi->rx_fn = xspi_rx8;
399 } else if (xspi->bits_per_word == 16) {
400 xspi->tx_fn = xspi_tx16;
401 xspi->rx_fn = xspi_rx16;
402 } else if (xspi->bits_per_word == 32) {
403 xspi->tx_fn = xspi_tx32;
404 xspi->rx_fn = xspi_rx32;
405 } else {
406 ret = -EINVAL;
407 goto put_master;
408 }
409
410 /* SPI controller initializations */
411 xspi_init_hw(xspi);
412 440
413 xspi->irq = platform_get_irq(pdev, 0); 441 xspi->irq = platform_get_irq(pdev, 0);
414 if (xspi->irq < 0) { 442 if (xspi->irq >= 0) {
415 ret = xspi->irq; 443 /* Register for SPI Interrupt */
416 goto put_master; 444 ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
445 dev_name(&pdev->dev), xspi);
446 if (ret)
447 goto put_master;
417 } 448 }
418 449
419 /* Register for SPI Interrupt */ 450 /* SPI controller initializations */
420 ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0, 451 xspi_init_hw(xspi);
421 dev_name(&pdev->dev), xspi);
422 if (ret)
423 goto put_master;
424 452
425 ret = spi_bitbang_start(&xspi->bitbang); 453 ret = spi_bitbang_start(&xspi->bitbang);
426 if (ret) { 454 if (ret) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 66a70e9bc743..c64a3e59fce3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 16 */
21 17
22#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -788,7 +784,7 @@ static int spi_transfer_one_message(struct spi_master *master,
788 struct spi_transfer *xfer; 784 struct spi_transfer *xfer;
789 bool keep_cs = false; 785 bool keep_cs = false;
790 int ret = 0; 786 int ret = 0;
791 int ms = 1; 787 unsigned long ms = 1;
792 788
793 spi_set_cs(msg->spi, true); 789 spi_set_cs(msg->spi, true);
794 790
@@ -875,31 +871,59 @@ void spi_finalize_current_transfer(struct spi_master *master)
875EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 871EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
876 872
877/** 873/**
878 * spi_pump_messages - kthread work function which processes spi message queue 874 * __spi_pump_messages - function which processes spi message queue
879 * @work: pointer to kthread work struct contained in the master struct 875 * @master: master to process queue for
876 * @in_kthread: true if we are in the context of the message pump thread
880 * 877 *
881 * This function checks if there is any spi message in the queue that 878 * This function checks if there is any spi message in the queue that
882 * needs processing and if so call out to the driver to initialize hardware 879 * needs processing and if so call out to the driver to initialize hardware
883 * and transfer each message. 880 * and transfer each message.
884 * 881 *
882 * Note that it is called both from the kthread itself and also from
883 * inside spi_sync(); the queue extraction handling at the top of the
884 * function should deal with this safely.
885 */ 885 */
886static void spi_pump_messages(struct kthread_work *work) 886static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
887{ 887{
888 struct spi_master *master =
889 container_of(work, struct spi_master, pump_messages);
890 unsigned long flags; 888 unsigned long flags;
891 bool was_busy = false; 889 bool was_busy = false;
892 int ret; 890 int ret;
893 891
894 /* Lock queue and check for queue work */ 892 /* Lock queue */
895 spin_lock_irqsave(&master->queue_lock, flags); 893 spin_lock_irqsave(&master->queue_lock, flags);
894
895 /* Make sure we are not already running a message */
896 if (master->cur_msg) {
897 spin_unlock_irqrestore(&master->queue_lock, flags);
898 return;
899 }
900
901 /* If another context is idling the device then defer */
902 if (master->idling) {
903 queue_kthread_work(&master->kworker, &master->pump_messages);
904 spin_unlock_irqrestore(&master->queue_lock, flags);
905 return;
906 }
907
908 /* Check if the queue is idle */
896 if (list_empty(&master->queue) || !master->running) { 909 if (list_empty(&master->queue) || !master->running) {
897 if (!master->busy) { 910 if (!master->busy) {
898 spin_unlock_irqrestore(&master->queue_lock, flags); 911 spin_unlock_irqrestore(&master->queue_lock, flags);
899 return; 912 return;
900 } 913 }
914
915 /* Only do teardown in the thread */
916 if (!in_kthread) {
917 queue_kthread_work(&master->kworker,
918 &master->pump_messages);
919 spin_unlock_irqrestore(&master->queue_lock, flags);
920 return;
921 }
922
901 master->busy = false; 923 master->busy = false;
924 master->idling = true;
902 spin_unlock_irqrestore(&master->queue_lock, flags); 925 spin_unlock_irqrestore(&master->queue_lock, flags);
926
903 kfree(master->dummy_rx); 927 kfree(master->dummy_rx);
904 master->dummy_rx = NULL; 928 master->dummy_rx = NULL;
905 kfree(master->dummy_tx); 929 kfree(master->dummy_tx);
@@ -913,14 +937,13 @@ static void spi_pump_messages(struct kthread_work *work)
913 pm_runtime_put_autosuspend(master->dev.parent); 937 pm_runtime_put_autosuspend(master->dev.parent);
914 } 938 }
915 trace_spi_master_idle(master); 939 trace_spi_master_idle(master);
916 return;
917 }
918 940
919 /* Make sure we are not already running a message */ 941 spin_lock_irqsave(&master->queue_lock, flags);
920 if (master->cur_msg) { 942 master->idling = false;
921 spin_unlock_irqrestore(&master->queue_lock, flags); 943 spin_unlock_irqrestore(&master->queue_lock, flags);
922 return; 944 return;
923 } 945 }
946
924 /* Extract head of queue */ 947 /* Extract head of queue */
925 master->cur_msg = 948 master->cur_msg =
926 list_first_entry(&master->queue, struct spi_message, queue); 949 list_first_entry(&master->queue, struct spi_message, queue);
@@ -985,13 +1008,22 @@ static void spi_pump_messages(struct kthread_work *work)
985 } 1008 }
986} 1009}
987 1010
1011/**
1012 * spi_pump_messages - kthread work function which processes spi message queue
1013 * @work: pointer to kthread work struct contained in the master struct
1014 */
1015static void spi_pump_messages(struct kthread_work *work)
1016{
1017 struct spi_master *master =
1018 container_of(work, struct spi_master, pump_messages);
1019
1020 __spi_pump_messages(master, true);
1021}
1022
988static int spi_init_queue(struct spi_master *master) 1023static int spi_init_queue(struct spi_master *master)
989{ 1024{
990 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1025 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
991 1026
992 INIT_LIST_HEAD(&master->queue);
993 spin_lock_init(&master->queue_lock);
994
995 master->running = false; 1027 master->running = false;
996 master->busy = false; 1028 master->busy = false;
997 1029
@@ -1161,12 +1193,9 @@ static int spi_destroy_queue(struct spi_master *master)
1161 return 0; 1193 return 0;
1162} 1194}
1163 1195
1164/** 1196static int __spi_queued_transfer(struct spi_device *spi,
1165 * spi_queued_transfer - transfer function for queued transfers 1197 struct spi_message *msg,
1166 * @spi: spi device which is requesting transfer 1198 bool need_pump)
1167 * @msg: spi message which is to handled is queued to driver queue
1168 */
1169static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1170{ 1199{
1171 struct spi_master *master = spi->master; 1200 struct spi_master *master = spi->master;
1172 unsigned long flags; 1201 unsigned long flags;
@@ -1181,13 +1210,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1181 msg->status = -EINPROGRESS; 1210 msg->status = -EINPROGRESS;
1182 1211
1183 list_add_tail(&msg->queue, &master->queue); 1212 list_add_tail(&msg->queue, &master->queue);
1184 if (!master->busy) 1213 if (!master->busy && need_pump)
1185 queue_kthread_work(&master->kworker, &master->pump_messages); 1214 queue_kthread_work(&master->kworker, &master->pump_messages);
1186 1215
1187 spin_unlock_irqrestore(&master->queue_lock, flags); 1216 spin_unlock_irqrestore(&master->queue_lock, flags);
1188 return 0; 1217 return 0;
1189} 1218}
1190 1219
1220/**
1221 * spi_queued_transfer - transfer function for queued transfers
1222 * @spi: spi device which is requesting transfer
1223 * @msg: spi message which is to handled is queued to driver queue
1224 */
1225static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1226{
1227 return __spi_queued_transfer(spi, msg, true);
1228}
1229
1191static int spi_master_initialize_queue(struct spi_master *master) 1230static int spi_master_initialize_queue(struct spi_master *master)
1192{ 1231{
1193 int ret; 1232 int ret;
@@ -1609,6 +1648,8 @@ int spi_register_master(struct spi_master *master)
1609 dynamic = 1; 1648 dynamic = 1;
1610 } 1649 }
1611 1650
1651 INIT_LIST_HEAD(&master->queue);
1652 spin_lock_init(&master->queue_lock);
1612 spin_lock_init(&master->bus_lock_spinlock); 1653 spin_lock_init(&master->bus_lock_spinlock);
1613 mutex_init(&master->bus_lock_mutex); 1654 mutex_init(&master->bus_lock_mutex);
1614 master->bus_lock_flag = 0; 1655 master->bus_lock_flag = 0;
@@ -2114,19 +2155,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2114 DECLARE_COMPLETION_ONSTACK(done); 2155 DECLARE_COMPLETION_ONSTACK(done);
2115 int status; 2156 int status;
2116 struct spi_master *master = spi->master; 2157 struct spi_master *master = spi->master;
2158 unsigned long flags;
2159
2160 status = __spi_validate(spi, message);
2161 if (status != 0)
2162 return status;
2117 2163
2118 message->complete = spi_complete; 2164 message->complete = spi_complete;
2119 message->context = &done; 2165 message->context = &done;
2166 message->spi = spi;
2120 2167
2121 if (!bus_locked) 2168 if (!bus_locked)
2122 mutex_lock(&master->bus_lock_mutex); 2169 mutex_lock(&master->bus_lock_mutex);
2123 2170
2124 status = spi_async_locked(spi, message); 2171 /* If we're not using the legacy transfer method then we will
2172 * try to transfer in the calling context so special case.
2173 * This code would be less tricky if we could remove the
2174 * support for driver implemented message queues.
2175 */
2176 if (master->transfer == spi_queued_transfer) {
2177 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2178
2179 trace_spi_message_submit(message);
2180
2181 status = __spi_queued_transfer(spi, message, false);
2182
2183 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2184 } else {
2185 status = spi_async_locked(spi, message);
2186 }
2125 2187
2126 if (!bus_locked) 2188 if (!bus_locked)
2127 mutex_unlock(&master->bus_lock_mutex); 2189 mutex_unlock(&master->bus_lock_mutex);
2128 2190
2129 if (status == 0) { 2191 if (status == 0) {
2192 /* Push out the messages in the calling context if we
2193 * can.
2194 */
2195 if (master->transfer == spi_queued_transfer)
2196 __spi_pump_messages(master, false);
2197
2130 wait_for_completion(&done); 2198 wait_for_completion(&done);
2131 status = message->status; 2199 status = message->status;
2132 } 2200 }
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 6941e04afb8c..4eb7a980e670 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -14,10 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 17 */
22 18
23#include <linux/init.h> 19#include <linux/init.h>
@@ -317,6 +313,37 @@ done:
317 return status; 313 return status;
318} 314}
319 315
316static struct spi_ioc_transfer *
317spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
318 unsigned *n_ioc)
319{
320 struct spi_ioc_transfer *ioc;
321 u32 tmp;
322
323 /* Check type, command number and direction */
324 if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
325 || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
326 || _IOC_DIR(cmd) != _IOC_WRITE)
327 return ERR_PTR(-ENOTTY);
328
329 tmp = _IOC_SIZE(cmd);
330 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
331 return ERR_PTR(-EINVAL);
332 *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
333 if (*n_ioc == 0)
334 return NULL;
335
336 /* copy into scratch area */
337 ioc = kmalloc(tmp, GFP_KERNEL);
338 if (!ioc)
339 return ERR_PTR(-ENOMEM);
340 if (__copy_from_user(ioc, u_ioc, tmp)) {
341 kfree(ioc);
342 return ERR_PTR(-EFAULT);
343 }
344 return ioc;
345}
346
320static long 347static long
321spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 348spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
322{ 349{
@@ -456,32 +483,15 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
456 483
457 default: 484 default:
458 /* segmented and/or full-duplex I/O request */ 485 /* segmented and/or full-duplex I/O request */
459 if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) 486 /* Check message and copy into scratch area */
460 || _IOC_DIR(cmd) != _IOC_WRITE) { 487 ioc = spidev_get_ioc_message(cmd,
461 retval = -ENOTTY; 488 (struct spi_ioc_transfer __user *)arg, &n_ioc);
462 break; 489 if (IS_ERR(ioc)) {
463 } 490 retval = PTR_ERR(ioc);
464
465 tmp = _IOC_SIZE(cmd);
466 if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
467 retval = -EINVAL;
468 break;
469 }
470 n_ioc = tmp / sizeof(struct spi_ioc_transfer);
471 if (n_ioc == 0)
472 break;
473
474 /* copy into scratch area */
475 ioc = kmalloc(tmp, GFP_KERNEL);
476 if (!ioc) {
477 retval = -ENOMEM;
478 break;
479 }
480 if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
481 kfree(ioc);
482 retval = -EFAULT;
483 break; 491 break;
484 } 492 }
493 if (!ioc)
494 break; /* n_ioc is also 0 */
485 495
486 /* translate to spi_message, execute */ 496 /* translate to spi_message, execute */
487 retval = spidev_message(spidev, ioc, n_ioc); 497 retval = spidev_message(spidev, ioc, n_ioc);
@@ -496,8 +506,67 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
496 506
497#ifdef CONFIG_COMPAT 507#ifdef CONFIG_COMPAT
498static long 508static long
509spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
510 unsigned long arg)
511{
512 struct spi_ioc_transfer __user *u_ioc;
513 int retval = 0;
514 struct spidev_data *spidev;
515 struct spi_device *spi;
516 unsigned n_ioc, n;
517 struct spi_ioc_transfer *ioc;
518
519 u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
520 if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd)))
521 return -EFAULT;
522
523 /* guard against device removal before, or while,
524 * we issue this ioctl.
525 */
526 spidev = filp->private_data;
527 spin_lock_irq(&spidev->spi_lock);
528 spi = spi_dev_get(spidev->spi);
529 spin_unlock_irq(&spidev->spi_lock);
530
531 if (spi == NULL)
532 return -ESHUTDOWN;
533
534 /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
535 mutex_lock(&spidev->buf_lock);
536
537 /* Check message and copy into scratch area */
538 ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
539 if (IS_ERR(ioc)) {
540 retval = PTR_ERR(ioc);
541 goto done;
542 }
543 if (!ioc)
544 goto done; /* n_ioc is also 0 */
545
546 /* Convert buffer pointers */
547 for (n = 0; n < n_ioc; n++) {
548 ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
549 ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
550 }
551
552 /* translate to spi_message, execute */
553 retval = spidev_message(spidev, ioc, n_ioc);
554 kfree(ioc);
555
556done:
557 mutex_unlock(&spidev->buf_lock);
558 spi_dev_put(spi);
559 return retval;
560}
561
562static long
499spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 563spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
500{ 564{
565 if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
566 && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
567 && _IOC_DIR(cmd) == _IOC_WRITE)
568 return spidev_compat_ioc_message(filp, cmd, arg);
569
501 return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 570 return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
502} 571}
503#else 572#else
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index aeb50bb6ba9c..eaffb0248de1 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3452,8 +3452,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
3452 return status; 3452 return status;
3453} 3453}
3454 3454
3455#ifdef CONFIG_PM
3456
3457int usb_remote_wakeup(struct usb_device *udev) 3455int usb_remote_wakeup(struct usb_device *udev)
3458{ 3456{
3459 int status = 0; 3457 int status = 0;
@@ -3512,16 +3510,6 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
3512 return connect_change; 3510 return connect_change;
3513} 3511}
3514 3512
3515#else
3516
3517static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
3518 u16 portstatus, u16 portchange)
3519{
3520 return 0;
3521}
3522
3523#endif
3524
3525static int check_ports_changed(struct usb_hub *hub) 3513static int check_ports_changed(struct usb_hub *hub)
3526{ 3514{
3527 int port1; 3515 int port1;
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index 4953b657635e..cb9ee2556850 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -3118,8 +3118,7 @@ int __init atafb_init(void)
3118 printk("atafb_init: initializing Falcon hw\n"); 3118 printk("atafb_init: initializing Falcon hw\n");
3119 fbhw = &falcon_switch; 3119 fbhw = &falcon_switch;
3120 atafb_ops.fb_setcolreg = &falcon_setcolreg; 3120 atafb_ops.fb_setcolreg = &falcon_setcolreg;
3121 error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 3121 error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 0,
3122 IRQ_TYPE_PRIO,
3123 "framebuffer:modeswitch", 3122 "framebuffer:modeswitch",
3124 falcon_vbl_switcher); 3123 falcon_vbl_switcher);
3125 if (error) 3124 if (error)
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 3860d02729dc..0b52d92cb2e5 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -92,7 +92,6 @@ EXPORT_SYMBOL_GPL(balloon_stats);
92 92
93/* We increase/decrease in batches which fit in a page */ 93/* We increase/decrease in batches which fit in a page */
94static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; 94static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
95static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
96 95
97 96
98/* List of ballooned pages, threaded through the mem_map array. */ 97/* List of ballooned pages, threaded through the mem_map array. */
@@ -423,22 +422,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
423 page = pfn_to_page(pfn); 422 page = pfn_to_page(pfn);
424 423
425#ifdef CONFIG_XEN_HAVE_PVMMU 424#ifdef CONFIG_XEN_HAVE_PVMMU
426 /*
427 * Ballooned out frames are effectively replaced with
428 * a scratch frame. Ensure direct mappings and the
429 * p2m are consistent.
430 */
431 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 425 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
432 if (!PageHighMem(page)) { 426 if (!PageHighMem(page)) {
433 struct page *scratch_page = get_balloon_scratch_page();
434
435 ret = HYPERVISOR_update_va_mapping( 427 ret = HYPERVISOR_update_va_mapping(
436 (unsigned long)__va(pfn << PAGE_SHIFT), 428 (unsigned long)__va(pfn << PAGE_SHIFT),
437 pfn_pte(page_to_pfn(scratch_page), 429 __pte_ma(0), 0);
438 PAGE_KERNEL_RO), 0);
439 BUG_ON(ret); 430 BUG_ON(ret);
440
441 put_balloon_scratch_page();
442 } 431 }
443 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 432 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
444 } 433 }
@@ -500,18 +489,6 @@ static void balloon_process(struct work_struct *work)
500 mutex_unlock(&balloon_mutex); 489 mutex_unlock(&balloon_mutex);
501} 490}
502 491
503struct page *get_balloon_scratch_page(void)
504{
505 struct page *ret = get_cpu_var(balloon_scratch_page);
506 BUG_ON(ret == NULL);
507 return ret;
508}
509
510void put_balloon_scratch_page(void)
511{
512 put_cpu_var(balloon_scratch_page);
513}
514
515/* Resets the Xen limit, sets new target, and kicks off processing. */ 492/* Resets the Xen limit, sets new target, and kicks off processing. */
516void balloon_set_new_target(unsigned long target) 493void balloon_set_new_target(unsigned long target)
517{ 494{
@@ -605,61 +582,13 @@ static void __init balloon_add_region(unsigned long start_pfn,
605 } 582 }
606} 583}
607 584
608static int alloc_balloon_scratch_page(int cpu)
609{
610 if (per_cpu(balloon_scratch_page, cpu) != NULL)
611 return 0;
612
613 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
614 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
615 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
616 return -ENOMEM;
617 }
618
619 return 0;
620}
621
622
623static int balloon_cpu_notify(struct notifier_block *self,
624 unsigned long action, void *hcpu)
625{
626 int cpu = (long)hcpu;
627 switch (action) {
628 case CPU_UP_PREPARE:
629 if (alloc_balloon_scratch_page(cpu))
630 return NOTIFY_BAD;
631 break;
632 default:
633 break;
634 }
635 return NOTIFY_OK;
636}
637
638static struct notifier_block balloon_cpu_notifier = {
639 .notifier_call = balloon_cpu_notify,
640};
641
642static int __init balloon_init(void) 585static int __init balloon_init(void)
643{ 586{
644 int i, cpu; 587 int i;
645 588
646 if (!xen_domain()) 589 if (!xen_domain())
647 return -ENODEV; 590 return -ENODEV;
648 591
649 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
650 register_cpu_notifier(&balloon_cpu_notifier);
651
652 get_online_cpus();
653 for_each_online_cpu(cpu) {
654 if (alloc_balloon_scratch_page(cpu)) {
655 put_online_cpus();
656 unregister_cpu_notifier(&balloon_cpu_notifier);
657 return -ENOMEM;
658 }
659 }
660 put_online_cpus();
661 }
662
663 pr_info("Initialising balloon driver\n"); 592 pr_info("Initialising balloon driver\n");
664 593
665 balloon_stats.current_pages = xen_pv_domain() 594 balloon_stats.current_pages = xen_pv_domain()
@@ -696,15 +625,4 @@ static int __init balloon_init(void)
696 625
697subsys_initcall(balloon_init); 626subsys_initcall(balloon_init);
698 627
699static int __init balloon_clear(void)
700{
701 int cpu;
702
703 for_each_possible_cpu(cpu)
704 per_cpu(balloon_scratch_page, cpu) = NULL;
705
706 return 0;
707}
708early_initcall(balloon_clear);
709
710MODULE_LICENSE("GPL"); 628MODULE_LICENSE("GPL");
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 073b4a19a8b0..d5bb1a33d0a3 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -67,7 +67,7 @@ struct gntdev_priv {
67 * Only populated if populate_freeable_maps == 1 */ 67 * Only populated if populate_freeable_maps == 1 */
68 struct list_head freeable_maps; 68 struct list_head freeable_maps;
69 /* lock protects maps and freeable_maps */ 69 /* lock protects maps and freeable_maps */
70 spinlock_t lock; 70 struct mutex lock;
71 struct mm_struct *mm; 71 struct mm_struct *mm;
72 struct mmu_notifier mn; 72 struct mmu_notifier mn;
73}; 73};
@@ -91,7 +91,9 @@ struct grant_map {
91 struct gnttab_map_grant_ref *map_ops; 91 struct gnttab_map_grant_ref *map_ops;
92 struct gnttab_unmap_grant_ref *unmap_ops; 92 struct gnttab_unmap_grant_ref *unmap_ops;
93 struct gnttab_map_grant_ref *kmap_ops; 93 struct gnttab_map_grant_ref *kmap_ops;
94 struct gnttab_unmap_grant_ref *kunmap_ops;
94 struct page **pages; 95 struct page **pages;
96 unsigned long pages_vm_start;
95}; 97};
96 98
97static int unmap_grant_pages(struct grant_map *map, int offset, int pages); 99static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
@@ -118,12 +120,13 @@ static void gntdev_free_map(struct grant_map *map)
118 return; 120 return;
119 121
120 if (map->pages) 122 if (map->pages)
121 free_xenballooned_pages(map->count, map->pages); 123 gnttab_free_pages(map->count, map->pages);
122 kfree(map->pages); 124 kfree(map->pages);
123 kfree(map->grants); 125 kfree(map->grants);
124 kfree(map->map_ops); 126 kfree(map->map_ops);
125 kfree(map->unmap_ops); 127 kfree(map->unmap_ops);
126 kfree(map->kmap_ops); 128 kfree(map->kmap_ops);
129 kfree(map->kunmap_ops);
127 kfree(map); 130 kfree(map);
128} 131}
129 132
@@ -140,21 +143,24 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
140 add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); 143 add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
141 add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); 144 add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
142 add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); 145 add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
146 add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
143 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); 147 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
144 if (NULL == add->grants || 148 if (NULL == add->grants ||
145 NULL == add->map_ops || 149 NULL == add->map_ops ||
146 NULL == add->unmap_ops || 150 NULL == add->unmap_ops ||
147 NULL == add->kmap_ops || 151 NULL == add->kmap_ops ||
152 NULL == add->kunmap_ops ||
148 NULL == add->pages) 153 NULL == add->pages)
149 goto err; 154 goto err;
150 155
151 if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */)) 156 if (gnttab_alloc_pages(count, add->pages))
152 goto err; 157 goto err;
153 158
154 for (i = 0; i < count; i++) { 159 for (i = 0; i < count; i++) {
155 add->map_ops[i].handle = -1; 160 add->map_ops[i].handle = -1;
156 add->unmap_ops[i].handle = -1; 161 add->unmap_ops[i].handle = -1;
157 add->kmap_ops[i].handle = -1; 162 add->kmap_ops[i].handle = -1;
163 add->kunmap_ops[i].handle = -1;
158 } 164 }
159 165
160 add->index = 0; 166 add->index = 0;
@@ -216,9 +222,9 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
216 } 222 }
217 223
218 if (populate_freeable_maps && priv) { 224 if (populate_freeable_maps && priv) {
219 spin_lock(&priv->lock); 225 mutex_lock(&priv->lock);
220 list_del(&map->next); 226 list_del(&map->next);
221 spin_unlock(&priv->lock); 227 mutex_unlock(&priv->lock);
222 } 228 }
223 229
224 if (map->pages && !use_ptemod) 230 if (map->pages && !use_ptemod)
@@ -239,6 +245,14 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
239 BUG_ON(pgnr >= map->count); 245 BUG_ON(pgnr >= map->count);
240 pte_maddr = arbitrary_virt_to_machine(pte).maddr; 246 pte_maddr = arbitrary_virt_to_machine(pte).maddr;
241 247
248 /*
249 * Set the PTE as special to force get_user_pages_fast() fall
250 * back to the slow path. If this is not supported as part of
251 * the grant map, it will be done afterwards.
252 */
253 if (xen_feature(XENFEAT_gnttab_map_avail_bits))
254 flags |= (1 << _GNTMAP_guest_avail0);
255
242 gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, 256 gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
243 map->grants[pgnr].ref, 257 map->grants[pgnr].ref,
244 map->grants[pgnr].domid); 258 map->grants[pgnr].domid);
@@ -247,6 +261,15 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
247 return 0; 261 return 0;
248} 262}
249 263
264#ifdef CONFIG_X86
265static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
266 unsigned long addr, void *data)
267{
268 set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
269 return 0;
270}
271#endif
272
250static int map_grant_pages(struct grant_map *map) 273static int map_grant_pages(struct grant_map *map)
251{ 274{
252 int i, err = 0; 275 int i, err = 0;
@@ -280,6 +303,8 @@ static int map_grant_pages(struct grant_map *map)
280 map->flags | GNTMAP_host_map, 303 map->flags | GNTMAP_host_map,
281 map->grants[i].ref, 304 map->grants[i].ref,
282 map->grants[i].domid); 305 map->grants[i].domid);
306 gnttab_set_unmap_op(&map->kunmap_ops[i], address,
307 map->flags | GNTMAP_host_map, -1);
283 } 308 }
284 } 309 }
285 310
@@ -290,20 +315,42 @@ static int map_grant_pages(struct grant_map *map)
290 return err; 315 return err;
291 316
292 for (i = 0; i < map->count; i++) { 317 for (i = 0; i < map->count; i++) {
293 if (map->map_ops[i].status) 318 if (map->map_ops[i].status) {
294 err = -EINVAL; 319 err = -EINVAL;
295 else { 320 continue;
296 BUG_ON(map->map_ops[i].handle == -1);
297 map->unmap_ops[i].handle = map->map_ops[i].handle;
298 pr_debug("map handle=%d\n", map->map_ops[i].handle);
299 } 321 }
322
323 map->unmap_ops[i].handle = map->map_ops[i].handle;
324 if (use_ptemod)
325 map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
300 } 326 }
301 return err; 327 return err;
302} 328}
303 329
330struct unmap_grant_pages_callback_data
331{
332 struct completion completion;
333 int result;
334};
335
336static void unmap_grant_callback(int result,
337 struct gntab_unmap_queue_data *data)
338{
339 struct unmap_grant_pages_callback_data* d = data->data;
340
341 d->result = result;
342 complete(&d->completion);
343}
344
304static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) 345static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
305{ 346{
306 int i, err = 0; 347 int i, err = 0;
348 struct gntab_unmap_queue_data unmap_data;
349 struct unmap_grant_pages_callback_data data;
350
351 init_completion(&data.completion);
352 unmap_data.data = &data;
353 unmap_data.done= &unmap_grant_callback;
307 354
308 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { 355 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
309 int pgno = (map->notify.addr >> PAGE_SHIFT); 356 int pgno = (map->notify.addr >> PAGE_SHIFT);
@@ -315,11 +362,16 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
315 } 362 }
316 } 363 }
317 364
318 err = gnttab_unmap_refs(map->unmap_ops + offset, 365 unmap_data.unmap_ops = map->unmap_ops + offset;
319 use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, 366 unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
320 pages); 367 unmap_data.pages = map->pages + offset;
321 if (err) 368 unmap_data.count = pages;
322 return err; 369
370 gnttab_unmap_refs_async(&unmap_data);
371
372 wait_for_completion(&data.completion);
373 if (data.result)
374 return data.result;
323 375
324 for (i = 0; i < pages; i++) { 376 for (i = 0; i < pages; i++) {
325 if (map->unmap_ops[offset+i].status) 377 if (map->unmap_ops[offset+i].status)
@@ -387,17 +439,26 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
387 * not do any unmapping, since that has been done prior to 439 * not do any unmapping, since that has been done prior to
388 * closing the vma, but it may still iterate the unmap_ops list. 440 * closing the vma, but it may still iterate the unmap_ops list.
389 */ 441 */
390 spin_lock(&priv->lock); 442 mutex_lock(&priv->lock);
391 map->vma = NULL; 443 map->vma = NULL;
392 spin_unlock(&priv->lock); 444 mutex_unlock(&priv->lock);
393 } 445 }
394 vma->vm_private_data = NULL; 446 vma->vm_private_data = NULL;
395 gntdev_put_map(priv, map); 447 gntdev_put_map(priv, map);
396} 448}
397 449
450static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
451 unsigned long addr)
452{
453 struct grant_map *map = vma->vm_private_data;
454
455 return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
456}
457
398static struct vm_operations_struct gntdev_vmops = { 458static struct vm_operations_struct gntdev_vmops = {
399 .open = gntdev_vma_open, 459 .open = gntdev_vma_open,
400 .close = gntdev_vma_close, 460 .close = gntdev_vma_close,
461 .find_special_page = gntdev_vma_find_special_page,
401}; 462};
402 463
403/* ------------------------------------------------------------------ */ 464/* ------------------------------------------------------------------ */
@@ -433,14 +494,14 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
433 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); 494 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
434 struct grant_map *map; 495 struct grant_map *map;
435 496
436 spin_lock(&priv->lock); 497 mutex_lock(&priv->lock);
437 list_for_each_entry(map, &priv->maps, next) { 498 list_for_each_entry(map, &priv->maps, next) {
438 unmap_if_in_range(map, start, end); 499 unmap_if_in_range(map, start, end);
439 } 500 }
440 list_for_each_entry(map, &priv->freeable_maps, next) { 501 list_for_each_entry(map, &priv->freeable_maps, next) {
441 unmap_if_in_range(map, start, end); 502 unmap_if_in_range(map, start, end);
442 } 503 }
443 spin_unlock(&priv->lock); 504 mutex_unlock(&priv->lock);
444} 505}
445 506
446static void mn_invl_page(struct mmu_notifier *mn, 507static void mn_invl_page(struct mmu_notifier *mn,
@@ -457,7 +518,7 @@ static void mn_release(struct mmu_notifier *mn,
457 struct grant_map *map; 518 struct grant_map *map;
458 int err; 519 int err;
459 520
460 spin_lock(&priv->lock); 521 mutex_lock(&priv->lock);
461 list_for_each_entry(map, &priv->maps, next) { 522 list_for_each_entry(map, &priv->maps, next) {
462 if (!map->vma) 523 if (!map->vma)
463 continue; 524 continue;
@@ -476,7 +537,7 @@ static void mn_release(struct mmu_notifier *mn,
476 err = unmap_grant_pages(map, /* offset */ 0, map->count); 537 err = unmap_grant_pages(map, /* offset */ 0, map->count);
477 WARN_ON(err); 538 WARN_ON(err);
478 } 539 }
479 spin_unlock(&priv->lock); 540 mutex_unlock(&priv->lock);
480} 541}
481 542
482static struct mmu_notifier_ops gntdev_mmu_ops = { 543static struct mmu_notifier_ops gntdev_mmu_ops = {
@@ -498,7 +559,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
498 559
499 INIT_LIST_HEAD(&priv->maps); 560 INIT_LIST_HEAD(&priv->maps);
500 INIT_LIST_HEAD(&priv->freeable_maps); 561 INIT_LIST_HEAD(&priv->freeable_maps);
501 spin_lock_init(&priv->lock); 562 mutex_init(&priv->lock);
502 563
503 if (use_ptemod) { 564 if (use_ptemod) {
504 priv->mm = get_task_mm(current); 565 priv->mm = get_task_mm(current);
@@ -572,10 +633,10 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
572 return -EFAULT; 633 return -EFAULT;
573 } 634 }
574 635
575 spin_lock(&priv->lock); 636 mutex_lock(&priv->lock);
576 gntdev_add_map(priv, map); 637 gntdev_add_map(priv, map);
577 op.index = map->index << PAGE_SHIFT; 638 op.index = map->index << PAGE_SHIFT;
578 spin_unlock(&priv->lock); 639 mutex_unlock(&priv->lock);
579 640
580 if (copy_to_user(u, &op, sizeof(op)) != 0) 641 if (copy_to_user(u, &op, sizeof(op)) != 0)
581 return -EFAULT; 642 return -EFAULT;
@@ -594,7 +655,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
594 return -EFAULT; 655 return -EFAULT;
595 pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); 656 pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
596 657
597 spin_lock(&priv->lock); 658 mutex_lock(&priv->lock);
598 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); 659 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
599 if (map) { 660 if (map) {
600 list_del(&map->next); 661 list_del(&map->next);
@@ -602,7 +663,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
602 list_add_tail(&map->next, &priv->freeable_maps); 663 list_add_tail(&map->next, &priv->freeable_maps);
603 err = 0; 664 err = 0;
604 } 665 }
605 spin_unlock(&priv->lock); 666 mutex_unlock(&priv->lock);
606 if (map) 667 if (map)
607 gntdev_put_map(priv, map); 668 gntdev_put_map(priv, map);
608 return err; 669 return err;
@@ -670,7 +731,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
670 out_flags = op.action; 731 out_flags = op.action;
671 out_event = op.event_channel_port; 732 out_event = op.event_channel_port;
672 733
673 spin_lock(&priv->lock); 734 mutex_lock(&priv->lock);
674 735
675 list_for_each_entry(map, &priv->maps, next) { 736 list_for_each_entry(map, &priv->maps, next) {
676 uint64_t begin = map->index << PAGE_SHIFT; 737 uint64_t begin = map->index << PAGE_SHIFT;
@@ -698,7 +759,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
698 rc = 0; 759 rc = 0;
699 760
700 unlock_out: 761 unlock_out:
701 spin_unlock(&priv->lock); 762 mutex_unlock(&priv->lock);
702 763
703 /* Drop the reference to the event channel we did not save in the map */ 764 /* Drop the reference to the event channel we did not save in the map */
704 if (out_flags & UNMAP_NOTIFY_SEND_EVENT) 765 if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
@@ -748,7 +809,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
748 pr_debug("map %d+%d at %lx (pgoff %lx)\n", 809 pr_debug("map %d+%d at %lx (pgoff %lx)\n",
749 index, count, vma->vm_start, vma->vm_pgoff); 810 index, count, vma->vm_start, vma->vm_pgoff);
750 811
751 spin_lock(&priv->lock); 812 mutex_lock(&priv->lock);
752 map = gntdev_find_map_index(priv, index, count); 813 map = gntdev_find_map_index(priv, index, count);
753 if (!map) 814 if (!map)
754 goto unlock_out; 815 goto unlock_out;
@@ -783,7 +844,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
783 map->flags |= GNTMAP_readonly; 844 map->flags |= GNTMAP_readonly;
784 } 845 }
785 846
786 spin_unlock(&priv->lock); 847 mutex_unlock(&priv->lock);
787 848
788 if (use_ptemod) { 849 if (use_ptemod) {
789 err = apply_to_page_range(vma->vm_mm, vma->vm_start, 850 err = apply_to_page_range(vma->vm_mm, vma->vm_start,
@@ -806,16 +867,34 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
806 if (err) 867 if (err)
807 goto out_put_map; 868 goto out_put_map;
808 } 869 }
870 } else {
871#ifdef CONFIG_X86
872 /*
873 * If the PTEs were not made special by the grant map
874 * hypercall, do so here.
875 *
876 * This is racy since the mapping is already visible
877 * to userspace but userspace should be well-behaved
878 * enough to not touch it until the mmap() call
879 * returns.
880 */
881 if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
882 apply_to_page_range(vma->vm_mm, vma->vm_start,
883 vma->vm_end - vma->vm_start,
884 set_grant_ptes_as_special, NULL);
885 }
886#endif
887 map->pages_vm_start = vma->vm_start;
809 } 888 }
810 889
811 return 0; 890 return 0;
812 891
813unlock_out: 892unlock_out:
814 spin_unlock(&priv->lock); 893 mutex_unlock(&priv->lock);
815 return err; 894 return err;
816 895
817out_unlock_put: 896out_unlock_put:
818 spin_unlock(&priv->lock); 897 mutex_unlock(&priv->lock);
819out_put_map: 898out_put_map:
820 if (use_ptemod) 899 if (use_ptemod)
821 map->vma = NULL; 900 map->vma = NULL;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7786291ba229..17972fbacddc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -42,6 +42,7 @@
42#include <linux/io.h> 42#include <linux/io.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/hardirq.h> 44#include <linux/hardirq.h>
45#include <linux/workqueue.h>
45 46
46#include <xen/xen.h> 47#include <xen/xen.h>
47#include <xen/interface/xen.h> 48#include <xen/interface/xen.h>
@@ -50,6 +51,7 @@
50#include <xen/interface/memory.h> 51#include <xen/interface/memory.h>
51#include <xen/hvc-console.h> 52#include <xen/hvc-console.h>
52#include <xen/swiotlb-xen.h> 53#include <xen/swiotlb-xen.h>
54#include <xen/balloon.h>
53#include <asm/xen/hypercall.h> 55#include <asm/xen/hypercall.h>
54#include <asm/xen/interface.h> 56#include <asm/xen/interface.h>
55 57
@@ -671,6 +673,59 @@ void gnttab_free_auto_xlat_frames(void)
671} 673}
672EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); 674EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
673 675
676/**
677 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
678 * @nr_pages: number of pages to alloc
679 * @pages: returns the pages
680 */
681int gnttab_alloc_pages(int nr_pages, struct page **pages)
682{
683 int i;
684 int ret;
685
686 ret = alloc_xenballooned_pages(nr_pages, pages, false);
687 if (ret < 0)
688 return ret;
689
690 for (i = 0; i < nr_pages; i++) {
691#if BITS_PER_LONG < 64
692 struct xen_page_foreign *foreign;
693
694 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
695 if (!foreign) {
696 gnttab_free_pages(nr_pages, pages);
697 return -ENOMEM;
698 }
699 set_page_private(pages[i], (unsigned long)foreign);
700#endif
701 SetPagePrivate(pages[i]);
702 }
703
704 return 0;
705}
706EXPORT_SYMBOL(gnttab_alloc_pages);
707
708/**
709 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
710 * @nr_pages; number of pages to free
711 * @pages: the pages
712 */
713void gnttab_free_pages(int nr_pages, struct page **pages)
714{
715 int i;
716
717 for (i = 0; i < nr_pages; i++) {
718 if (PagePrivate(pages[i])) {
719#if BITS_PER_LONG < 64
720 kfree((void *)page_private(pages[i]));
721#endif
722 ClearPagePrivate(pages[i]);
723 }
724 }
725 free_xenballooned_pages(nr_pages, pages);
726}
727EXPORT_SYMBOL(gnttab_free_pages);
728
674/* Handling of paged out grant targets (GNTST_eagain) */ 729/* Handling of paged out grant targets (GNTST_eagain) */
675#define MAX_DELAY 256 730#define MAX_DELAY 256
676static inline void 731static inline void
@@ -727,30 +782,87 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
727 if (ret) 782 if (ret)
728 return ret; 783 return ret;
729 784
730 /* Retry eagain maps */ 785 for (i = 0; i < count; i++) {
731 for (i = 0; i < count; i++) 786 /* Retry eagain maps */
732 if (map_ops[i].status == GNTST_eagain) 787 if (map_ops[i].status == GNTST_eagain)
733 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 788 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
734 &map_ops[i].status, __func__); 789 &map_ops[i].status, __func__);
735 790
791 if (map_ops[i].status == GNTST_okay) {
792 struct xen_page_foreign *foreign;
793
794 SetPageForeign(pages[i]);
795 foreign = xen_page_foreign(pages[i]);
796 foreign->domid = map_ops[i].dom;
797 foreign->gref = map_ops[i].ref;
798 }
799 }
800
736 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); 801 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
737} 802}
738EXPORT_SYMBOL_GPL(gnttab_map_refs); 803EXPORT_SYMBOL_GPL(gnttab_map_refs);
739 804
740int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 805int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
741 struct gnttab_map_grant_ref *kmap_ops, 806 struct gnttab_unmap_grant_ref *kunmap_ops,
742 struct page **pages, unsigned int count) 807 struct page **pages, unsigned int count)
743{ 808{
809 unsigned int i;
744 int ret; 810 int ret;
745 811
746 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 812 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
747 if (ret) 813 if (ret)
748 return ret; 814 return ret;
749 815
750 return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count); 816 for (i = 0; i < count; i++)
817 ClearPageForeign(pages[i]);
818
819 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
751} 820}
752EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 821EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
753 822
823#define GNTTAB_UNMAP_REFS_DELAY 5
824
825static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
826
827static void gnttab_unmap_work(struct work_struct *work)
828{
829 struct gntab_unmap_queue_data
830 *unmap_data = container_of(work,
831 struct gntab_unmap_queue_data,
832 gnttab_work.work);
833 if (unmap_data->age != UINT_MAX)
834 unmap_data->age++;
835 __gnttab_unmap_refs_async(unmap_data);
836}
837
838static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
839{
840 int ret;
841 int pc;
842
843 for (pc = 0; pc < item->count; pc++) {
844 if (page_count(item->pages[pc]) > 1) {
845 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
846 schedule_delayed_work(&item->gnttab_work,
847 msecs_to_jiffies(delay));
848 return;
849 }
850 }
851
852 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
853 item->pages, item->count);
854 item->done(ret, item);
855}
856
857void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
858{
859 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
860 item->age = 0;
861
862 __gnttab_unmap_refs_async(item);
863}
864EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
865
754static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 866static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
755{ 867{
756 int rc; 868 int rc;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index f8bb36f9d9ce..bf1940706422 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -105,10 +105,16 @@ static void do_suspend(void)
105 105
106 err = freeze_processes(); 106 err = freeze_processes();
107 if (err) { 107 if (err) {
108 pr_err("%s: freeze failed %d\n", __func__, err); 108 pr_err("%s: freeze processes failed %d\n", __func__, err);
109 goto out; 109 goto out;
110 } 110 }
111 111
112 err = freeze_kernel_threads();
113 if (err) {
114 pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
115 goto out_thaw;
116 }
117
112 err = dpm_suspend_start(PMSG_FREEZE); 118 err = dpm_suspend_start(PMSG_FREEZE);
113 if (err) { 119 if (err) {
114 pr_err("%s: dpm_suspend_start %d\n", __func__, err); 120 pr_err("%s: dpm_suspend_start %d\n", __func__, err);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 83b5c53bec6b..8a65423bc696 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -374,7 +374,7 @@ static struct frontswap_ops tmem_frontswap_ops = {
374}; 374};
375#endif 375#endif
376 376
377static int xen_tmem_init(void) 377static int __init xen_tmem_init(void)
378{ 378{
379 if (!xen_domain()) 379 if (!xen_domain())
380 return 0; 380 return 0;
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c
index 34e40b733f9a..4fc886cd5586 100644
--- a/drivers/xen/xen-acpi-memhotplug.c
+++ b/drivers/xen/xen-acpi-memhotplug.c
@@ -117,8 +117,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
117 list_for_each_entry(info, &mem_device->res_list, list) { 117 list_for_each_entry(info, &mem_device->res_list, list) {
118 if ((info->caching == address64.info.mem.caching) && 118 if ((info->caching == address64.info.mem.caching) &&
119 (info->write_protect == address64.info.mem.write_protect) && 119 (info->write_protect == address64.info.mem.write_protect) &&
120 (info->start_addr + info->length == address64.minimum)) { 120 (info->start_addr + info->length == address64.address.minimum)) {
121 info->length += address64.address_length; 121 info->length += address64.address.address_length;
122 return AE_OK; 122 return AE_OK;
123 } 123 }
124 } 124 }
@@ -130,8 +130,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
130 INIT_LIST_HEAD(&new->list); 130 INIT_LIST_HEAD(&new->list);
131 new->caching = address64.info.mem.caching; 131 new->caching = address64.info.mem.caching;
132 new->write_protect = address64.info.mem.write_protect; 132 new->write_protect = address64.info.mem.write_protect;
133 new->start_addr = address64.minimum; 133 new->start_addr = address64.address.minimum;
134 new->length = address64.address_length; 134 new->length = address64.address.address_length;
135 list_add_tail(&new->list, &mem_device->res_list); 135 list_add_tail(&new->list, &mem_device->res_list);
136 136
137 return AE_OK; 137 return AE_OK;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index e999496eda3e..ecd540a7a562 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -227,7 +227,7 @@ static void put_free_pages(struct page **page, int num)
227 return; 227 return;
228 if (i > scsiback_max_buffer_pages) { 228 if (i > scsiback_max_buffer_pages) {
229 n = min(num, i - scsiback_max_buffer_pages); 229 n = min(num, i - scsiback_max_buffer_pages);
230 free_xenballooned_pages(n, page + num - n); 230 gnttab_free_pages(n, page + num - n);
231 n = num - n; 231 n = num - n;
232 } 232 }
233 spin_lock_irqsave(&free_pages_lock, flags); 233 spin_lock_irqsave(&free_pages_lock, flags);
@@ -244,7 +244,7 @@ static int get_free_page(struct page **page)
244 spin_lock_irqsave(&free_pages_lock, flags); 244 spin_lock_irqsave(&free_pages_lock, flags);
245 if (list_empty(&scsiback_free_pages)) { 245 if (list_empty(&scsiback_free_pages)) {
246 spin_unlock_irqrestore(&free_pages_lock, flags); 246 spin_unlock_irqrestore(&free_pages_lock, flags);
247 return alloc_xenballooned_pages(1, page, false); 247 return gnttab_alloc_pages(1, page);
248 } 248 }
249 page[0] = list_first_entry(&scsiback_free_pages, struct page, lru); 249 page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
250 list_del(&page[0]->lru); 250 list_del(&page[0]->lru);
@@ -2106,7 +2106,7 @@ static void __exit scsiback_exit(void)
2106 while (free_pages_num) { 2106 while (free_pages_num) {
2107 if (get_free_page(&page)) 2107 if (get_free_page(&page))
2108 BUG(); 2108 BUG();
2109 free_xenballooned_pages(1, &page); 2109 gnttab_free_pages(1, &page);
2110 } 2110 }
2111 scsiback_deregister_configfs(); 2111 scsiback_deregister_configfs();
2112 xenbus_unregister_driver(&scsiback_driver); 2112 xenbus_unregister_driver(&scsiback_driver);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 85534ea63555..9433e46518c8 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -326,10 +326,13 @@ static int xenbus_write_transaction(unsigned msg_type,
326 } 326 }
327 327
328 if (msg_type == XS_TRANSACTION_START) { 328 if (msg_type == XS_TRANSACTION_START) {
329 trans->handle.id = simple_strtoul(reply, NULL, 0); 329 if (u->u.msg.type == XS_ERROR)
330 330 kfree(trans);
331 list_add(&trans->list, &u->transactions); 331 else {
332 } else if (msg_type == XS_TRANSACTION_END) { 332 trans->handle.id = simple_strtoul(reply, NULL, 0);
333 list_add(&trans->list, &u->transactions);
334 }
335 } else if (u->u.msg.type == XS_TRANSACTION_END) {
333 list_for_each_entry(trans, &u->transactions, list) 336 list_for_each_entry(trans, &u->transactions, list)
334 if (trans->handle.id == u->u.msg.tx_id) 337 if (trans->handle.id == u->u.msg.tx_id)
335 break; 338 break;
diff --git a/fs/Kconfig b/fs/Kconfig
index 664991afe0c0..a6bb530b1ec5 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -165,6 +165,7 @@ config HUGETLB_PAGE
165 def_bool HUGETLBFS 165 def_bool HUGETLBFS
166 166
167source "fs/configfs/Kconfig" 167source "fs/configfs/Kconfig"
168source "fs/efivarfs/Kconfig"
168 169
169endmenu 170endmenu
170 171
@@ -209,7 +210,6 @@ source "fs/sysv/Kconfig"
209source "fs/ufs/Kconfig" 210source "fs/ufs/Kconfig"
210source "fs/exofs/Kconfig" 211source "fs/exofs/Kconfig"
211source "fs/f2fs/Kconfig" 212source "fs/f2fs/Kconfig"
212source "fs/efivarfs/Kconfig"
213 213
214endif # MISC_FILESYSTEMS 214endif # MISC_FILESYSTEMS
215 215
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index a66768ebc8d1..80e9c18ea64f 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -8,6 +8,7 @@ config BTRFS_FS
8 select LZO_DECOMPRESS 8 select LZO_DECOMPRESS
9 select RAID6_PQ 9 select RAID6_PQ
10 select XOR_BLOCKS 10 select XOR_BLOCKS
11 select SRCU
11 12
12 help 13 help
13 Btrfs is a general purpose copy-on-write filesystem with extents, 14 Btrfs is a general purpose copy-on-write filesystem with extents,
diff --git a/fs/efivarfs/Kconfig b/fs/efivarfs/Kconfig
index 367bbb10c543..c2499ef174a2 100644
--- a/fs/efivarfs/Kconfig
+++ b/fs/efivarfs/Kconfig
@@ -1,6 +1,7 @@
1config EFIVAR_FS 1config EFIVAR_FS
2 tristate "EFI Variable filesystem" 2 tristate "EFI Variable filesystem"
3 depends on EFI 3 depends on EFI
4 default m
4 help 5 help
5 efivarfs is a replacement filesystem for the old EFI 6 efivarfs is a replacement filesystem for the old EFI
6 variable support via sysfs, as it doesn't suffer from the 7 variable support via sysfs, as it doesn't suffer from the
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 6dad1176ec52..ddbce42548c9 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -140,7 +140,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
140 140
141 name[len] = '-'; 141 name[len] = '-';
142 142
143 efi_guid_unparse(&entry->var.VendorGuid, name + len + 1); 143 efi_guid_to_str(&entry->var.VendorGuid, name + len + 1);
144 144
145 name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; 145 name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
146 146
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index 22c629eedd82..2a24249b30af 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -1,5 +1,6 @@
1config FSNOTIFY 1config FSNOTIFY
2 def_bool n 2 def_bool n
3 select SRCU
3 4
4source "fs/notify/dnotify/Kconfig" 5source "fs/notify/dnotify/Kconfig"
5source "fs/notify/inotify/Kconfig" 6source "fs/notify/inotify/Kconfig"
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index c51df1dd237e..4a09975aac90 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -5,6 +5,7 @@
5config QUOTA 5config QUOTA
6 bool "Quota support" 6 bool "Quota support"
7 select QUOTACTL 7 select QUOTACTL
8 select SRCU
8 help 9 help
9 If you say Y here, you will be able to set per user limits for disk 10 If you say Y here, you will be able to set per user limits for disk
10 usage (also called disk quotas). Currently, it works for the 11 usage (also called disk quotas). Currently, it works for the
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index d5ec6c87810f..6b040f4ddfab 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 5a0a3e5daf85..03aacfb3e98b 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 8b06e4c1dd5d..11c3a011dcbf 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 7461327e14e4..273de709495c 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 1baae6edda89..9318a87ee39a 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index a08e55a263c9..b0bb30ebb807 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 03b3e6d405ff..0bc78df66d4b 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2014, Intel Corp. 10 * Copyright (C) 2000 - 2015, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 5ba78464c1b1..d56f5d722138 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20141107 49#define ACPI_CA_VERSION 0x20150204
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -569,6 +569,14 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
569 address, 569 address,
570 void *context)) 570 void *context))
571ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 571ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
572 acpi_install_gpe_raw_handler(acpi_handle
573 gpe_device,
574 u32 gpe_number,
575 u32 type,
576 acpi_gpe_handler
577 address,
578 void *context))
579ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
572 acpi_remove_gpe_handler(acpi_handle gpe_device, 580 acpi_remove_gpe_handler(acpi_handle gpe_device,
573 u32 gpe_number, 581 u32 gpe_number,
574 acpi_gpe_handler 582 acpi_gpe_handler
@@ -891,12 +899,6 @@ ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
891ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap); 899ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
892 900
893ACPI_EXTERNAL_RETURN_STATUS(acpi_status 901ACPI_EXTERNAL_RETURN_STATUS(acpi_status
894 acpi_get_id(acpi_handle object,
895 acpi_owner_id * out_type))
896
897ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_table_id(acpi_owner_id id))
898
899ACPI_EXTERNAL_RETURN_STATUS(acpi_status
900 acpi_get_table_with_size(acpi_string signature, 902 acpi_get_table_with_size(acpi_string signature,
901 u32 instance, 903 u32 instance,
902 struct acpi_table_header 904 struct acpi_table_header
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index eb760ca0b2e0..ebe242638591 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -305,43 +305,51 @@ struct acpi_resource_source {
305 u8 max_address_fixed; \ 305 u8 max_address_fixed; \
306 union acpi_resource_attribute info; 306 union acpi_resource_attribute info;
307 307
308struct acpi_resource_address { 308struct acpi_address16_attribute {
309ACPI_RESOURCE_ADDRESS_COMMON}; 309 u16 granularity;
310
311struct acpi_resource_address16 {
312 ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
313 u16 minimum; 310 u16 minimum;
314 u16 maximum; 311 u16 maximum;
315 u16 translation_offset; 312 u16 translation_offset;
316 u16 address_length; 313 u16 address_length;
317 struct acpi_resource_source resource_source;
318}; 314};
319 315
320struct acpi_resource_address32 { 316struct acpi_address32_attribute {
321 ACPI_RESOURCE_ADDRESS_COMMON u32 granularity; 317 u32 granularity;
322 u32 minimum; 318 u32 minimum;
323 u32 maximum; 319 u32 maximum;
324 u32 translation_offset; 320 u32 translation_offset;
325 u32 address_length; 321 u32 address_length;
326 struct acpi_resource_source resource_source;
327}; 322};
328 323
329struct acpi_resource_address64 { 324struct acpi_address64_attribute {
330 ACPI_RESOURCE_ADDRESS_COMMON u64 granularity; 325 u64 granularity;
331 u64 minimum; 326 u64 minimum;
332 u64 maximum; 327 u64 maximum;
333 u64 translation_offset; 328 u64 translation_offset;
334 u64 address_length; 329 u64 address_length;
330};
331
332struct acpi_resource_address {
333ACPI_RESOURCE_ADDRESS_COMMON};
334
335struct acpi_resource_address16 {
336 ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address16_attribute address;
337 struct acpi_resource_source resource_source;
338};
339
340struct acpi_resource_address32 {
341 ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address32_attribute address;
342 struct acpi_resource_source resource_source;
343};
344
345struct acpi_resource_address64 {
346 ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address64_attribute address;
335 struct acpi_resource_source resource_source; 347 struct acpi_resource_source resource_source;
336}; 348};
337 349
338struct acpi_resource_extended_address64 { 350struct acpi_resource_extended_address64 {
339 ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID; 351 ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID;
340 u64 granularity; 352 struct acpi_address64_attribute address;
341 u64 minimum;
342 u64 maximum;
343 u64 translation_offset;
344 u64 address_length;
345 u64 type_specific; 353 u64 type_specific;
346}; 354};
347 355
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index bee19d8170c5..d4081fef1095 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 29e79370641d..b80b0e6dabc5 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index ecff62405f17..f06d75e5fa54 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 5480cb2236bf..440ca8104b43 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index bbef17368e49..b034f1068dfe 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -744,7 +744,7 @@ typedef u32 acpi_event_status;
744/* 744/*
745 * GPE info flags - Per GPE 745 * GPE info flags - Per GPE
746 * +-------+-+-+---+ 746 * +-------+-+-+---+
747 * | 7:4 |3|2|1:0| 747 * | 7:5 |4|3|2:0|
748 * +-------+-+-+---+ 748 * +-------+-+-+---+
749 * | | | | 749 * | | | |
750 * | | | +-- Type of dispatch:to method, handler, notify, or none 750 * | | | +-- Type of dispatch:to method, handler, notify, or none
@@ -756,13 +756,15 @@ typedef u32 acpi_event_status;
756#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 756#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01
757#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02 757#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02
758#define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03 758#define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03
759#define ACPI_GPE_DISPATCH_MASK (u8) 0x03 759#define ACPI_GPE_DISPATCH_RAW_HANDLER (u8) 0x04
760#define ACPI_GPE_DISPATCH_MASK (u8) 0x07
761#define ACPI_GPE_DISPATCH_TYPE(flags) ((u8) ((flags) & ACPI_GPE_DISPATCH_MASK))
760 762
761#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x04 763#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x08
762#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 764#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00
763#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x04 765#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08
764 766
765#define ACPI_GPE_CAN_WAKE (u8) 0x08 767#define ACPI_GPE_CAN_WAKE (u8) 0x10
766 768
767/* 769/*
768 * Flags for GPE and Lock interfaces 770 * Flags for GPE and Lock interfaces
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 5f8cc1fa3278..ad74dc51d5b7 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 2b612384c994..71e5ec5b07a3 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 384875da3713..f54de0a63558 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 1ba7c190c2cc..74ba46c8157a 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 568d4b886712..acedc3f026de 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d459cd17b477..24c7aa8b1d20 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -27,6 +27,7 @@
27 27
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/ioport.h> /* for struct resource */ 29#include <linux/ioport.h> /* for struct resource */
30#include <linux/resource_ext.h>
30#include <linux/device.h> 31#include <linux/device.h>
31#include <linux/property.h> 32#include <linux/property.h>
32 33
@@ -151,6 +152,10 @@ int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
151int acpi_unmap_cpu(int cpu); 152int acpi_unmap_cpu(int cpu);
152#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 153#endif /* CONFIG_ACPI_HOTPLUG_CPU */
153 154
155#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
156int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
157#endif
158
154int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); 159int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
155int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); 160int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
156int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); 161int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
@@ -288,22 +293,25 @@ extern int pnpacpi_disabled;
288bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); 293bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res);
289bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); 294bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res);
290bool acpi_dev_resource_address_space(struct acpi_resource *ares, 295bool acpi_dev_resource_address_space(struct acpi_resource *ares,
291 struct resource *res); 296 struct resource_win *win);
292bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, 297bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
293 struct resource *res); 298 struct resource_win *win);
294unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); 299unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
295bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, 300bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
296 struct resource *res); 301 struct resource *res);
297 302
298struct resource_list_entry {
299 struct list_head node;
300 struct resource res;
301};
302
303void acpi_dev_free_resource_list(struct list_head *list); 303void acpi_dev_free_resource_list(struct list_head *list);
304int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, 304int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
305 int (*preproc)(struct acpi_resource *, void *), 305 int (*preproc)(struct acpi_resource *, void *),
306 void *preproc_data); 306 void *preproc_data);
307int acpi_dev_filter_resource_type(struct acpi_resource *ares,
308 unsigned long types);
309
310static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
311 void *arg)
312{
313 return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
314}
307 315
308int acpi_check_resource_conflict(const struct resource *res); 316int acpi_check_resource_conflict(const struct resource *res);
309 317
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 642d6ae4030c..a270f25ee7c7 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -21,16 +21,20 @@ struct device;
21struct ata_port_info; 21struct ata_port_info;
22struct ahci_host_priv; 22struct ahci_host_priv;
23struct platform_device; 23struct platform_device;
24struct scsi_host_template;
24 25
25int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); 26int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
26void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); 27void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
28int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
29void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
27int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); 30int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
28void ahci_platform_disable_resources(struct ahci_host_priv *hpriv); 31void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
29struct ahci_host_priv *ahci_platform_get_resources( 32struct ahci_host_priv *ahci_platform_get_resources(
30 struct platform_device *pdev); 33 struct platform_device *pdev);
31int ahci_platform_init_host(struct platform_device *pdev, 34int ahci_platform_init_host(struct platform_device *pdev,
32 struct ahci_host_priv *hpriv, 35 struct ahci_host_priv *hpriv,
33 const struct ata_port_info *pi_template); 36 const struct ata_port_info *pi_template,
37 struct scsi_host_template *sht);
34 38
35int ahci_platform_suspend_host(struct device *dev); 39int ahci_platform_suspend_host(struct device *dev);
36int ahci_platform_resume_host(struct device *dev); 40int ahci_platform_resume_host(struct device *dev);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index f2f4d8da97c0..1648026e06b4 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -503,7 +503,7 @@ struct ata_bmdma_prd {
503#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8)) 503#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8))
504#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8)) 504#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
505#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) 505#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
506#define ata_id_removeable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) 506#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
507#define ata_id_has_atapi_AN(id) \ 507#define ata_id_has_atapi_AN(id) \
508 ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \ 508 ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
509 ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \ 509 ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index 5c618a084225..619d9e78e644 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -10,12 +10,15 @@ struct pata_platform_info {
10 unsigned int ioport_shift; 10 unsigned int ioport_shift;
11}; 11};
12 12
13struct scsi_host_template;
14
13extern int __pata_platform_probe(struct device *dev, 15extern int __pata_platform_probe(struct device *dev,
14 struct resource *io_res, 16 struct resource *io_res,
15 struct resource *ctl_res, 17 struct resource *ctl_res,
16 struct resource *irq_res, 18 struct resource *irq_res,
17 unsigned int ioport_shift, 19 unsigned int ioport_shift,
18 int __pio_mask); 20 int __pio_mask,
21 struct scsi_host_template *sht);
19 22
20/* 23/*
21 * Marvell SATA private data 24 * Marvell SATA private data
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index da0dae0600e6..b9cb94c3102a 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -943,6 +943,8 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
943 943
944#else /* !CONFIG_CGROUPS */ 944#else /* !CONFIG_CGROUPS */
945 945
946struct cgroup_subsys_state;
947
946static inline int cgroup_init_early(void) { return 0; } 948static inline int cgroup_init_early(void) { return 0; }
947static inline int cgroup_init(void) { return 0; } 949static inline int cgroup_init(void) { return 0; }
948static inline void cgroup_fork(struct task_struct *p) {} 950static inline void cgroup_fork(struct task_struct *p) {}
@@ -955,6 +957,8 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
955 return -EINVAL; 957 return -EINVAL;
956} 958}
957 959
960static inline void css_put(struct cgroup_subsys_state *css) {}
961
958/* No cgroups - nothing to do */ 962/* No cgroups - nothing to do */
959static inline int cgroup_attach_task_all(struct task_struct *from, 963static inline int cgroup_attach_task_all(struct task_struct *from,
960 struct task_struct *t) 964 struct task_struct *t)
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 98c4f9b12b03..e4a96fb14403 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -15,6 +15,10 @@ SUBSYS(cpu)
15SUBSYS(cpuacct) 15SUBSYS(cpuacct)
16#endif 16#endif
17 17
18#if IS_ENABLED(CONFIG_BLK_CGROUP)
19SUBSYS(blkio)
20#endif
21
18#if IS_ENABLED(CONFIG_MEMCG) 22#if IS_ENABLED(CONFIG_MEMCG)
19SUBSYS(memory) 23SUBSYS(memory)
20#endif 24#endif
@@ -31,10 +35,6 @@ SUBSYS(freezer)
31SUBSYS(net_cls) 35SUBSYS(net_cls)
32#endif 36#endif
33 37
34#if IS_ENABLED(CONFIG_BLK_CGROUP)
35SUBSYS(blkio)
36#endif
37
38#if IS_ENABLED(CONFIG_CGROUP_PERF) 38#if IS_ENABLED(CONFIG_CGROUP_PERF)
39SUBSYS(perf_event) 39SUBSYS(perf_event)
40#endif 40#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 33063f872ee3..176bf816875e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -385,7 +385,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
385 385
386/* Is this type a native word size -- useful for atomic operations */ 386/* Is this type a native word size -- useful for atomic operations */
387#ifndef __native_word 387#ifndef __native_word
388# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) 388# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
389#endif 389#endif
390 390
391/* Compile time object size, -1 for unknown */ 391/* Compile time object size, -1 for unknown */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4d078cebafd2..2ee4888c1f47 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -66,8 +66,6 @@ struct cpufreq_policy {
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 67 should set cpufreq */
68 unsigned int cpu; /* cpu nr of CPU managing this policy */ 68 unsigned int cpu; /* cpu nr of CPU managing this policy */
69 unsigned int last_cpu; /* cpu nr of previous CPU that managed
70 * this policy */
71 struct clk *clk; 69 struct clk *clk;
72 struct cpufreq_cpuinfo cpuinfo;/* see above */ 70 struct cpufreq_cpuinfo cpuinfo;/* see above */
73 71
@@ -113,6 +111,9 @@ struct cpufreq_policy {
113 wait_queue_head_t transition_wait; 111 wait_queue_head_t transition_wait;
114 struct task_struct *transition_task; /* Task which is doing the transition */ 112 struct task_struct *transition_task; /* Task which is doing the transition */
115 113
114 /* cpufreq-stats */
115 struct cpufreq_stats *stats;
116
116 /* For cpufreq driver's internal use */ 117 /* For cpufreq driver's internal use */
117 void *driver_data; 118 void *driver_data;
118}; 119};
@@ -367,9 +368,8 @@ static inline void cpufreq_resume(void) {}
367#define CPUFREQ_INCOMPATIBLE (1) 368#define CPUFREQ_INCOMPATIBLE (1)
368#define CPUFREQ_NOTIFY (2) 369#define CPUFREQ_NOTIFY (2)
369#define CPUFREQ_START (3) 370#define CPUFREQ_START (3)
370#define CPUFREQ_UPDATE_POLICY_CPU (4) 371#define CPUFREQ_CREATE_POLICY (4)
371#define CPUFREQ_CREATE_POLICY (5) 372#define CPUFREQ_REMOVE_POLICY (5)
372#define CPUFREQ_REMOVE_POLICY (6)
373 373
374#ifdef CONFIG_CPU_FREQ 374#ifdef CONFIG_CPU_FREQ
375int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 375int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
new file mode 100644
index 000000000000..602fbbfcfeed
--- /dev/null
+++ b/include/linux/devfreq-event.h
@@ -0,0 +1,196 @@
1/*
2 * devfreq-event: a framework to provide raw data and events of devfreq devices
3 *
4 * Copyright (C) 2014 Samsung Electronics
5 * Author: Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __LINUX_DEVFREQ_EVENT_H__
13#define __LINUX_DEVFREQ_EVENT_H__
14
15#include <linux/device.h>
16
17/**
18 * struct devfreq_event_dev - the devfreq-event device
19 *
20 * @node : Contain the devfreq-event device that have been registered.
21 * @dev : the device registered by devfreq-event class. dev.parent is
22 * the device using devfreq-event.
23 * @lock : a mutex to protect accessing devfreq-event.
24 * @enable_count: the number of enable function have been called.
25 * @desc : the description for devfreq-event device.
26 *
27 * This structure contains devfreq-event device information.
28 */
29struct devfreq_event_dev {
30 struct list_head node;
31
32 struct device dev;
33 struct mutex lock;
34 u32 enable_count;
35
36 const struct devfreq_event_desc *desc;
37};
38
39/**
40 * struct devfreq_event_data - the devfreq-event data
41 *
42 * @load_count : load count of devfreq-event device for the given period.
43 * @total_count : total count of devfreq-event device for the given period.
44 * each count may represent a clock cycle, a time unit
45 * (ns/us/...), or anything the device driver wants.
46 * Generally, utilization is load_count / total_count.
47 *
48 * This structure contains the data of devfreq-event device for polling period.
49 */
50struct devfreq_event_data {
51 unsigned long load_count;
52 unsigned long total_count;
53};
54
55/**
56 * struct devfreq_event_ops - the operations of devfreq-event device
57 *
58 * @enable : Enable the devfreq-event device.
59 * @disable : Disable the devfreq-event device.
60 * @reset : Reset all setting of the devfreq-event device.
61 * @set_event : Set the specific event type for the devfreq-event device.
62 * @get_event : Get the result of the devfreq-event devie with specific
63 * event type.
64 *
65 * This structure contains devfreq-event device operations which can be
66 * implemented by devfreq-event device drivers.
67 */
68struct devfreq_event_ops {
69 /* Optional functions */
70 int (*enable)(struct devfreq_event_dev *edev);
71 int (*disable)(struct devfreq_event_dev *edev);
72 int (*reset)(struct devfreq_event_dev *edev);
73
74 /* Mandatory functions */
75 int (*set_event)(struct devfreq_event_dev *edev);
76 int (*get_event)(struct devfreq_event_dev *edev,
77 struct devfreq_event_data *edata);
78};
79
80/**
81 * struct devfreq_event_desc - the descriptor of devfreq-event device
82 *
83 * @name : the name of devfreq-event device.
84 * @driver_data : the private data for devfreq-event driver.
85 * @ops : the operation to control devfreq-event device.
86 *
87 * Each devfreq-event device is described with a this structure.
88 * This structure contains the various data for devfreq-event device.
89 */
90struct devfreq_event_desc {
91 const char *name;
92 void *driver_data;
93
94 struct devfreq_event_ops *ops;
95};
96
97#if defined(CONFIG_PM_DEVFREQ_EVENT)
98extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev);
99extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev);
100extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev);
101extern int devfreq_event_set_event(struct devfreq_event_dev *edev);
102extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
103 struct devfreq_event_data *edata);
104extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
105extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
106 struct device *dev, int index);
107extern int devfreq_event_get_edev_count(struct device *dev);
108extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
109 struct devfreq_event_desc *desc);
110extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
111extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
112 struct devfreq_event_desc *desc);
113extern void devm_devfreq_event_remove_edev(struct device *dev,
114 struct devfreq_event_dev *edev);
115static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
116{
117 return edev->desc->driver_data;
118}
119#else
120static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
121{
122 return -EINVAL;
123}
124
125static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
126{
127 return -EINVAL;
128}
129
130static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
131{
132 return false;
133}
134
135static inline int devfreq_event_set_event(struct devfreq_event_dev *edev)
136{
137 return -EINVAL;
138}
139
140static inline int devfreq_event_get_event(struct devfreq_event_dev *edev,
141 struct devfreq_event_data *edata)
142{
143 return -EINVAL;
144}
145
146static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
147{
148 return -EINVAL;
149}
150
151static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
152{
153 return ERR_PTR(-EINVAL);
154}
155
156static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
157 struct device *dev, int index)
158{
159 return ERR_PTR(-EINVAL);
160}
161
162static inline int devfreq_event_get_edev_count(struct device *dev)
163{
164 return -EINVAL;
165}
166
167static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
168 struct devfreq_event_desc *desc)
169{
170 return ERR_PTR(-EINVAL);
171}
172
173static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
174{
175 return -EINVAL;
176}
177
178static inline struct devfreq_event_dev *devm_devfreq_event_add_edev(
179 struct device *dev,
180 struct devfreq_event_desc *desc)
181{
182 return ERR_PTR(-EINVAL);
183}
184
185static inline void devm_devfreq_event_remove_edev(struct device *dev,
186 struct devfreq_event_dev *edev)
187{
188}
189
190static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
191{
192 return NULL;
193}
194#endif /* CONFIG_PM_DEVFREQ_EVENT */
195
196#endif /* __LINUX_DEVFREQ_EVENT_H__ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0238d612750e..b674837e2b98 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -848,7 +848,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
848} 848}
849 849
850static inline char * 850static inline char *
851efi_guid_unparse(efi_guid_t *guid, char *out) 851efi_guid_to_str(efi_guid_t *guid, char *out)
852{ 852{
853 sprintf(out, "%pUl", guid->b); 853 sprintf(out, "%pUl", guid->b);
854 return out; 854 return out;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 0bebb5c348b8..d36f68b08acc 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -595,7 +595,7 @@ extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
595 char *filter_str); 595 char *filter_str);
596extern void ftrace_profile_free_filter(struct perf_event *event); 596extern void ftrace_profile_free_filter(struct perf_event *event);
597extern void *perf_trace_buf_prepare(int size, unsigned short type, 597extern void *perf_trace_buf_prepare(int size, unsigned short type,
598 struct pt_regs *regs, int *rctxp); 598 struct pt_regs **regs, int *rctxp);
599 599
600static inline void 600static inline void
601perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, 601perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index a036d058a249..05f6df1fdf5b 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -170,6 +170,7 @@ enum hrtimer_base_type {
170 * @clock_was_set: Indicates that clock was set from irq context. 170 * @clock_was_set: Indicates that clock was set from irq context.
171 * @expires_next: absolute time of the next event which was scheduled 171 * @expires_next: absolute time of the next event which was scheduled
172 * via clock_set_next_event() 172 * via clock_set_next_event()
173 * @in_hrtirq: hrtimer_interrupt() is currently executing
173 * @hres_active: State of high resolution mode 174 * @hres_active: State of high resolution mode
174 * @hang_detected: The last hrtimer interrupt detected a hang 175 * @hang_detected: The last hrtimer interrupt detected a hang
175 * @nr_events: Total number of hrtimer interrupt events 176 * @nr_events: Total number of hrtimer interrupt events
@@ -185,6 +186,7 @@ struct hrtimer_cpu_base {
185 unsigned int clock_was_set; 186 unsigned int clock_was_set;
186#ifdef CONFIG_HIGH_RES_TIMERS 187#ifdef CONFIG_HIGH_RES_TIMERS
187 ktime_t expires_next; 188 ktime_t expires_next;
189 int in_hrtirq;
188 int hres_active; 190 int hres_active;
189 int hang_detected; 191 int hang_detected;
190 unsigned long nr_events; 192 unsigned long nr_events;
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index c9d645ad98ff..5fc3d1083071 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -166,7 +166,17 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
166} 166}
167 167
168#if BITS_PER_LONG < 64 168#if BITS_PER_LONG < 64
169extern u64 ktime_divns(const ktime_t kt, s64 div); 169extern u64 __ktime_divns(const ktime_t kt, s64 div);
170static inline u64 ktime_divns(const ktime_t kt, s64 div)
171{
172 if (__builtin_constant_p(div) && !(div >> 32)) {
173 u64 ns = kt.tv64;
174 do_div(ns, div);
175 return ns;
176 } else {
177 return __ktime_divns(kt, div);
178 }
179}
170#else /* BITS_PER_LONG < 64 */ 180#else /* BITS_PER_LONG < 64 */
171# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) 181# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
172#endif 182#endif
@@ -186,6 +196,11 @@ static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
186 return ktime_to_us(ktime_sub(later, earlier)); 196 return ktime_to_us(ktime_sub(later, earlier));
187} 197}
188 198
199static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
200{
201 return ktime_to_ms(ktime_sub(later, earlier));
202}
203
189static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) 204static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
190{ 205{
191 return ktime_add_ns(kt, usec * NSEC_PER_USEC); 206 return ktime_add_ns(kt, usec * NSEC_PER_USEC);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 91f705de2c0b..61df823ac86a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1340,6 +1340,12 @@ extern const struct ata_port_operations ata_base_port_ops;
1340extern const struct ata_port_operations sata_port_ops; 1340extern const struct ata_port_operations sata_port_ops;
1341extern struct device_attribute *ata_common_sdev_attrs[]; 1341extern struct device_attribute *ata_common_sdev_attrs[];
1342 1342
1343/*
1344 * All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
1345 * by the edge drivers. Because the 'module' field of sht must be the
1346 * edge driver's module reference, otherwise the driver can be unloaded
1347 * even if the scsi_device is being accessed.
1348 */
1343#define ATA_BASE_SHT(drv_name) \ 1349#define ATA_BASE_SHT(drv_name) \
1344 .module = THIS_MODULE, \ 1350 .module = THIS_MODULE, \
1345 .name = drv_name, \ 1351 .name = drv_name, \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index dd5ea3016fc4..237b3ba29225 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -290,6 +290,14 @@ struct vm_operations_struct {
290 /* called by sys_remap_file_pages() to populate non-linear mapping */ 290 /* called by sys_remap_file_pages() to populate non-linear mapping */
291 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, 291 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
292 unsigned long size, pgoff_t pgoff); 292 unsigned long size, pgoff_t pgoff);
293
294 /*
295 * Called by vm_normal_page() for special PTEs to find the
296 * page for @addr. This is useful if the default behavior
297 * (using pte_page()) would not find the correct page.
298 */
299 struct page *(*find_special_page)(struct vm_area_struct *vma,
300 unsigned long addr);
293}; 301};
294 302
295struct mmu_gather; 303struct mmu_gather;
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 90230d5811c5..3a6490e81b28 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -5,8 +5,11 @@
5 * An MCS like lock especially tailored for optimistic spinning for sleeping 5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc). 6 * lock implementations (mutex, rwsem, etc).
7 */ 7 */
8 8struct optimistic_spin_node {
9#define OSQ_UNLOCKED_VAL (0) 9 struct optimistic_spin_node *next, *prev;
10 int locked; /* 1 if lock acquired */
11 int cpu; /* encoded CPU # + 1 value */
12};
10 13
11struct optimistic_spin_queue { 14struct optimistic_spin_queue {
12 /* 15 /*
@@ -16,6 +19,8 @@ struct optimistic_spin_queue {
16 atomic_t tail; 19 atomic_t tail;
17}; 20};
18 21
22#define OSQ_UNLOCKED_VAL (0)
23
19/* Init macro and function. */ 24/* Init macro and function. */
20#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } 25#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
21 26
@@ -24,4 +29,7 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
24 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); 29 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
25} 30}
26 31
32extern bool osq_lock(struct optimistic_spin_queue *lock);
33extern void osq_unlock(struct optimistic_spin_queue *lock);
34
27#endif 35#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e1f5fcd79792..5ed7bdaf22d5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -121,8 +121,12 @@ enum pageflags {
121 PG_fscache = PG_private_2, /* page backed by cache */ 121 PG_fscache = PG_private_2, /* page backed by cache */
122 122
123 /* XEN */ 123 /* XEN */
124 /* Pinned in Xen as a read-only pagetable page. */
124 PG_pinned = PG_owner_priv_1, 125 PG_pinned = PG_owner_priv_1,
126 /* Pinned as part of domain save (see xen_mm_pin_all()). */
125 PG_savepinned = PG_dirty, 127 PG_savepinned = PG_dirty,
128 /* Has a grant mapping of another (foreign) domain's page. */
129 PG_foreign = PG_owner_priv_1,
126 130
127 /* SLOB */ 131 /* SLOB */
128 PG_slob_free = PG_private, 132 PG_slob_free = PG_private,
@@ -215,6 +219,7 @@ __PAGEFLAG(Slab, slab)
215PAGEFLAG(Checked, checked) /* Used by some filesystems */ 219PAGEFLAG(Checked, checked) /* Used by some filesystems */
216PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 220PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
217PAGEFLAG(SavePinned, savepinned); /* Xen */ 221PAGEFLAG(SavePinned, savepinned); /* Xen */
222PAGEFLAG(Foreign, foreign); /* Xen */
218PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 223PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
219PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) 224PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
220 __SETPAGEFLAG(SwapBacked, swapbacked) 225 __SETPAGEFLAG(SwapBacked, swapbacked)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9603094ed59b..211e9da8a7d7 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -29,6 +29,7 @@
29#include <linux/atomic.h> 29#include <linux/atomic.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/resource_ext.h>
32#include <uapi/linux/pci.h> 33#include <uapi/linux/pci.h>
33 34
34#include <linux/pci_ids.h> 35#include <linux/pci_ids.h>
@@ -177,6 +178,8 @@ enum pci_dev_flags {
177 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), 178 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
178 /* Do not use bus resets for device */ 179 /* Do not use bus resets for device */
179 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), 180 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
181 /* Do not use PM reset even if device advertises NoSoftRst- */
182 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
180}; 183};
181 184
182enum pci_irq_reroute_variant { 185enum pci_irq_reroute_variant {
@@ -397,16 +400,10 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
397 return (pdev->error_state != pci_channel_io_normal); 400 return (pdev->error_state != pci_channel_io_normal);
398} 401}
399 402
400struct pci_host_bridge_window {
401 struct list_head list;
402 struct resource *res; /* host bridge aperture (CPU address) */
403 resource_size_t offset; /* bus address + offset = CPU address */
404};
405
406struct pci_host_bridge { 403struct pci_host_bridge {
407 struct device dev; 404 struct device dev;
408 struct pci_bus *bus; /* root bus */ 405 struct pci_bus *bus; /* root bus */
409 struct list_head windows; /* pci_host_bridge_windows */ 406 struct list_head windows; /* resource_entry */
410 void (*release_fn)(struct pci_host_bridge *); 407 void (*release_fn)(struct pci_host_bridge *);
411 void *release_data; 408 void *release_data;
412}; 409};
@@ -562,6 +559,7 @@ static inline int pcibios_err_to_errno(int err)
562/* Low-level architecture-dependent routines */ 559/* Low-level architecture-dependent routines */
563 560
564struct pci_ops { 561struct pci_ops {
562 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
565 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); 563 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
566 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); 564 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
567}; 565};
@@ -859,6 +857,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
859 int where, u16 val); 857 int where, u16 val);
860int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, 858int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
861 int where, u32 val); 859 int where, u32 val);
860
861int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
862 int where, int size, u32 *val);
863int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
864 int where, int size, u32 val);
865int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
866 int where, int size, u32 *val);
867int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
868 int where, int size, u32 val);
869
862struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); 870struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
863 871
864static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) 872static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
@@ -1850,6 +1858,8 @@ static inline void pci_set_of_node(struct pci_dev *dev) { }
1850static inline void pci_release_of_node(struct pci_dev *dev) { } 1858static inline void pci_release_of_node(struct pci_dev *dev) { }
1851static inline void pci_set_bus_of_node(struct pci_bus *bus) { } 1859static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
1852static inline void pci_release_bus_of_node(struct pci_bus *bus) { } 1860static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
1861static inline struct device_node *
1862pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
1853#endif /* CONFIG_OF */ 1863#endif /* CONFIG_OF */
1854 1864
1855#ifdef CONFIG_EEH 1865#ifdef CONFIG_EEH
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index b4337646388b..12c9b485beb7 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,8 +128,22 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
128static inline bool __ref_is_percpu(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 unsigned long __percpu **percpu_countp) 129 unsigned long __percpu **percpu_countp)
130{ 130{
131 /* paired with smp_store_release() in percpu_ref_reinit() */ 131 unsigned long percpu_ptr;
132 unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); 132
133 /*
134 * The value of @ref->percpu_count_ptr is tested for
135 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
136 * used as a pointer. If the compiler generates a separate fetch
137 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
138 * between contaminating the pointer value, meaning that
139 * ACCESS_ONCE() is required when fetching it.
140 *
141 * Also, we need a data dependency barrier to be paired with
142 * smp_store_release() in __percpu_ref_switch_to_percpu().
143 *
144 * Use lockless deref which contains both.
145 */
146 percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
133 147
134 /* 148 /*
135 * Theoretically, the following could test just ATOMIC; however, 149 * Theoretically, the following could test just ATOMIC; however,
@@ -233,7 +247,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
233 if (__ref_is_percpu(ref, &percpu_count)) { 247 if (__ref_is_percpu(ref, &percpu_count)) {
234 this_cpu_inc(*percpu_count); 248 this_cpu_inc(*percpu_count);
235 ret = true; 249 ret = true;
236 } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) { 250 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
237 ret = atomic_long_inc_not_zero(&ref->count); 251 ret = atomic_long_inc_not_zero(&ref->count);
238 } 252 }
239 253
@@ -281,6 +295,20 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
281} 295}
282 296
283/** 297/**
298 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
299 * @ref: percpu_ref to test
300 *
301 * Returns %true if @ref is dying or dead.
302 *
303 * This function is safe to call as long as @ref is between init and exit
304 * and the caller is responsible for synchronizing against state changes.
305 */
306static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
307{
308 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
309}
310
311/**
284 * percpu_ref_is_zero - test whether a percpu refcount reached zero 312 * percpu_ref_is_zero - test whether a percpu refcount reached zero
285 * @ref: percpu_ref to test 313 * @ref: percpu_ref to test
286 * 314 *
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 664de5a4ec46..5cad0e6f3552 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -469,6 +469,7 @@ struct perf_event_context {
469 */ 469 */
470 struct mutex mutex; 470 struct mutex mutex;
471 471
472 struct list_head active_ctx_list;
472 struct list_head pinned_groups; 473 struct list_head pinned_groups;
473 struct list_head flexible_groups; 474 struct list_head flexible_groups;
474 struct list_head event_list; 475 struct list_head event_list;
@@ -519,7 +520,6 @@ struct perf_cpu_context {
519 int exclusive; 520 int exclusive;
520 struct hrtimer hrtimer; 521 struct hrtimer hrtimer;
521 ktime_t hrtimer_interval; 522 ktime_t hrtimer_interval;
522 struct list_head rotation_list;
523 struct pmu *unique_pmu; 523 struct pmu *unique_pmu;
524 struct perf_cgroup *cgrp; 524 struct perf_cgroup *cgrp;
525}; 525};
@@ -659,6 +659,7 @@ static inline int is_software_event(struct perf_event *event)
659 659
660extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 660extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
661 661
662extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
662extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 663extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
663 664
664#ifndef perf_arch_fetch_caller_regs 665#ifndef perf_arch_fetch_caller_regs
@@ -683,14 +684,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
683static __always_inline void 684static __always_inline void
684perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 685perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
685{ 686{
686 struct pt_regs hot_regs; 687 if (static_key_false(&perf_swevent_enabled[event_id]))
688 __perf_sw_event(event_id, nr, regs, addr);
689}
690
691DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
687 692
693/*
694 * 'Special' version for the scheduler, it hard assumes no recursion,
695 * which is guaranteed by us not actually scheduling inside other swevents
696 * because those disable preemption.
697 */
698static __always_inline void
699perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
700{
688 if (static_key_false(&perf_swevent_enabled[event_id])) { 701 if (static_key_false(&perf_swevent_enabled[event_id])) {
689 if (!regs) { 702 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
690 perf_fetch_caller_regs(&hot_regs); 703
691 regs = &hot_regs; 704 perf_fetch_caller_regs(regs);
692 } 705 ___perf_sw_event(event_id, nr, regs, addr);
693 __perf_sw_event(event_id, nr, regs, addr);
694 } 706 }
695} 707}
696 708
@@ -706,7 +718,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
706static inline void perf_event_task_sched_out(struct task_struct *prev, 718static inline void perf_event_task_sched_out(struct task_struct *prev,
707 struct task_struct *next) 719 struct task_struct *next)
708{ 720{
709 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 721 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
710 722
711 if (static_key_false(&perf_sched_events.key)) 723 if (static_key_false(&perf_sched_events.key))
712 __perf_event_task_sched_out(prev, next); 724 __perf_event_task_sched_out(prev, next);
@@ -817,6 +829,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
817static inline void 829static inline void
818perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } 830perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
819static inline void 831static inline void
832perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
833static inline void
820perf_bp_event(struct perf_event *event, void *data) { } 834perf_bp_event(struct perf_event *event, void *data) { }
821 835
822static inline int perf_register_guest_info_callbacks 836static inline int perf_register_guest_info_callbacks
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 8b5976364619..e2f1be6dd9dd 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -597,7 +597,7 @@ struct dev_pm_info {
597 597
598extern void update_pm_runtime_accounting(struct device *dev); 598extern void update_pm_runtime_accounting(struct device *dev);
599extern int dev_pm_get_subsys_data(struct device *dev); 599extern int dev_pm_get_subsys_data(struct device *dev);
600extern int dev_pm_put_subsys_data(struct device *dev); 600extern void dev_pm_put_subsys_data(struct device *dev);
601 601
602/* 602/*
603 * Power domains provide callbacks that are executed during system suspend, 603 * Power domains provide callbacks that are executed during system suspend,
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index a9edab2c787a..080e778118ba 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -113,8 +113,6 @@ struct generic_pm_domain_data {
113 struct pm_domain_data base; 113 struct pm_domain_data base;
114 struct gpd_timing_data td; 114 struct gpd_timing_data td;
115 struct notifier_block nb; 115 struct notifier_block nb;
116 struct mutex lock;
117 unsigned int refcount;
118 int need_restore; 116 int need_restore;
119}; 117};
120 118
@@ -140,7 +138,6 @@ extern int __pm_genpd_name_add_device(const char *domain_name,
140 138
141extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, 139extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
142 struct device *dev); 140 struct device *dev);
143extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
144extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 141extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
145 struct generic_pm_domain *new_subdomain); 142 struct generic_pm_domain *new_subdomain);
146extern int pm_genpd_add_subdomain_names(const char *master_name, 143extern int pm_genpd_add_subdomain_names(const char *master_name,
@@ -187,7 +184,6 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
187{ 184{
188 return -ENOSYS; 185 return -ENOSYS;
189} 186}
190static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
191static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 187static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
192 struct generic_pm_domain *new_sd) 188 struct generic_pm_domain *new_sd)
193{ 189{
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 77aed9ea1d26..dab545bb66b3 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -37,6 +37,7 @@
37#define SSDR (0x10) /* SSP Data Write/Data Read Register */ 37#define SSDR (0x10) /* SSP Data Write/Data Read Register */
38 38
39#define SSTO (0x28) /* SSP Time Out Register */ 39#define SSTO (0x28) /* SSP Time Out Register */
40#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
40#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */ 41#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
41#define SSTSA (0x30) /* SSP Tx Timeslot Active */ 42#define SSTSA (0x30) /* SSP Tx Timeslot Active */
42#define SSRSA (0x34) /* SSP Rx Timeslot Active */ 43#define SSRSA (0x34) /* SSP Rx Timeslot Active */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 529bc946f450..a18b16f1dc0e 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
524 * @member: the name of the hlist_node within the struct. 524 * @member: the name of the hlist_node within the struct.
525 */ 525 */
526#define hlist_for_each_entry_continue_rcu(pos, member) \ 526#define hlist_for_each_entry_continue_rcu(pos, member) \
527 for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ 527 for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
528 typeof(*(pos)), member); \ 528 &(pos)->member)), typeof(*(pos)), member); \
529 pos; \ 529 pos; \
530 pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ 530 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
531 typeof(*(pos)), member)) 531 &(pos)->member)), typeof(*(pos)), member))
532 532
533/** 533/**
534 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point 534 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
@@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
536 * @member: the name of the hlist_node within the struct. 536 * @member: the name of the hlist_node within the struct.
537 */ 537 */
538#define hlist_for_each_entry_continue_rcu_bh(pos, member) \ 538#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
539 for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 539 for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
540 typeof(*(pos)), member); \ 540 &(pos)->member)), typeof(*(pos)), member); \
541 pos; \ 541 pos; \
542 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 542 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
543 typeof(*(pos)), member)) 543 &(pos)->member)), typeof(*(pos)), member))
544 544
545/** 545/**
546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point 546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ed4f5939a452..78097491cd99 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void)
331extern struct srcu_struct tasks_rcu_exit_srcu; 331extern struct srcu_struct tasks_rcu_exit_srcu;
332#define rcu_note_voluntary_context_switch(t) \ 332#define rcu_note_voluntary_context_switch(t) \
333 do { \ 333 do { \
334 rcu_all_qs(); \
334 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ 335 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
335 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ 336 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
336 } while (0) 337 } while (0)
337#else /* #ifdef CONFIG_TASKS_RCU */ 338#else /* #ifdef CONFIG_TASKS_RCU */
338#define TASKS_RCU(x) do { } while (0) 339#define TASKS_RCU(x) do { } while (0)
339#define rcu_note_voluntary_context_switch(t) do { } while (0) 340#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
340#endif /* #else #ifdef CONFIG_TASKS_RCU */ 341#endif /* #else #ifdef CONFIG_TASKS_RCU */
341 342
342/** 343/**
@@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void)
582}) 583})
583#define __rcu_dereference_check(p, c, space) \ 584#define __rcu_dereference_check(p, c, space) \
584({ \ 585({ \
585 typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ 586 /* Dependency order vs. p above. */ \
587 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
586 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ 588 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
587 rcu_dereference_sparse(p, space); \ 589 rcu_dereference_sparse(p, space); \
588 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 590 ((typeof(*p) __force __kernel *)(________p1)); \
589 ((typeof(*p) __force __kernel *)(_________p1)); \
590}) 591})
591#define __rcu_dereference_protected(p, c, space) \ 592#define __rcu_dereference_protected(p, c, space) \
592({ \ 593({ \
@@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void)
603}) 604})
604#define __rcu_dereference_index_check(p, c) \ 605#define __rcu_dereference_index_check(p, c) \
605({ \ 606({ \
606 typeof(p) _________p1 = ACCESS_ONCE(p); \ 607 /* Dependency order vs. p above. */ \
608 typeof(p) _________p1 = lockless_dereference(p); \
607 rcu_lockdep_assert(c, \ 609 rcu_lockdep_assert(c, \
608 "suspicious rcu_dereference_index_check() usage"); \ 610 "suspicious rcu_dereference_index_check() usage"); \
609 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
610 (_________p1); \ 611 (_________p1); \
611}) 612})
612 613
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 0e5366200154..937edaeb150d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu)
92} 92}
93 93
94/* 94/*
95 * Return the number of grace periods. 95 * Return the number of grace periods started.
96 */ 96 */
97static inline long rcu_batches_completed(void) 97static inline unsigned long rcu_batches_started(void)
98{ 98{
99 return 0; 99 return 0;
100} 100}
101 101
102/* 102/*
103 * Return the number of bottom-half grace periods. 103 * Return the number of bottom-half grace periods started.
104 */ 104 */
105static inline long rcu_batches_completed_bh(void) 105static inline unsigned long rcu_batches_started_bh(void)
106{
107 return 0;
108}
109
110/*
111 * Return the number of sched grace periods started.
112 */
113static inline unsigned long rcu_batches_started_sched(void)
114{
115 return 0;
116}
117
118/*
119 * Return the number of grace periods completed.
120 */
121static inline unsigned long rcu_batches_completed(void)
122{
123 return 0;
124}
125
126/*
127 * Return the number of bottom-half grace periods completed.
128 */
129static inline unsigned long rcu_batches_completed_bh(void)
130{
131 return 0;
132}
133
134/*
135 * Return the number of sched grace periods completed.
136 */
137static inline unsigned long rcu_batches_completed_sched(void)
106{ 138{
107 return 0; 139 return 0;
108} 140}
@@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void)
154 return true; 186 return true;
155} 187}
156 188
157
158#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 189#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
159 190
191static inline void rcu_all_qs(void)
192{
193}
194
160#endif /* __LINUX_RCUTINY_H */ 195#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 52953790dcca..d2e583a6aaca 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate);
81 81
82extern unsigned long rcutorture_testseq; 82extern unsigned long rcutorture_testseq;
83extern unsigned long rcutorture_vernum; 83extern unsigned long rcutorture_vernum;
84long rcu_batches_completed(void); 84unsigned long rcu_batches_started(void);
85long rcu_batches_completed_bh(void); 85unsigned long rcu_batches_started_bh(void);
86long rcu_batches_completed_sched(void); 86unsigned long rcu_batches_started_sched(void);
87unsigned long rcu_batches_completed(void);
88unsigned long rcu_batches_completed_bh(void);
89unsigned long rcu_batches_completed_sched(void);
87void show_rcu_gp_kthreads(void); 90void show_rcu_gp_kthreads(void);
88 91
89void rcu_force_quiescent_state(void); 92void rcu_force_quiescent_state(void);
@@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly;
97 100
98bool rcu_is_watching(void); 101bool rcu_is_watching(void);
99 102
103void rcu_all_qs(void);
104
100#endif /* __LINUX_RCUTREE_H */ 105#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4419b99d8d6e..116655d92269 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -468,7 +468,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
468 * 468 *
469 * @reg: Offset of the register within the regmap bank 469 * @reg: Offset of the register within the regmap bank
470 * @lsb: lsb of the register field. 470 * @lsb: lsb of the register field.
471 * @reg: msb of the register field. 471 * @msb: msb of the register field.
472 * @id_size: port size if it has some ports 472 * @id_size: port size if it has some ports
473 * @id_offset: address offset for each ports 473 * @id_offset: address offset for each ports
474 */ 474 */
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5479394fefce..5dd65acc2a69 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -32,6 +32,8 @@ struct da9211_pdata {
32 * 2 : 2 phase 2 buck 32 * 2 : 2 phase 2 buck
33 */ 33 */
34 int num_buck; 34 int num_buck;
35 int gpio_ren[DA9211_MAX_REGULATORS];
36 struct device_node *reg_node[DA9211_MAX_REGULATORS];
35 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS]; 37 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
36}; 38};
37#endif 39#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 5f1e9ca47417..d4ad5b5a02bb 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -21,6 +21,7 @@
21 21
22struct regmap; 22struct regmap;
23struct regulator_dev; 23struct regulator_dev;
24struct regulator_config;
24struct regulator_init_data; 25struct regulator_init_data;
25struct regulator_enable_gpio; 26struct regulator_enable_gpio;
26 27
@@ -205,6 +206,15 @@ enum regulator_type {
205 * @supply_name: Identifying the regulator supply 206 * @supply_name: Identifying the regulator supply
206 * @of_match: Name used to identify regulator in DT. 207 * @of_match: Name used to identify regulator in DT.
207 * @regulators_node: Name of node containing regulator definitions in DT. 208 * @regulators_node: Name of node containing regulator definitions in DT.
209 * @of_parse_cb: Optional callback called only if of_match is present.
210 * Will be called for each regulator parsed from DT, during
211 * init_data parsing.
212 * The regulator_config passed as argument to the callback will
213 * be a copy of config passed to regulator_register, valid only
214 * for this particular call. Callback may freely change the
215 * config but it cannot store it for later usage.
216 * Callback should return 0 on success or negative ERRNO
217 * indicating failure.
208 * @id: Numerical identifier for the regulator. 218 * @id: Numerical identifier for the regulator.
209 * @ops: Regulator operations table. 219 * @ops: Regulator operations table.
210 * @irq: Interrupt number for the regulator. 220 * @irq: Interrupt number for the regulator.
@@ -251,6 +261,9 @@ struct regulator_desc {
251 const char *supply_name; 261 const char *supply_name;
252 const char *of_match; 262 const char *of_match;
253 const char *regulators_node; 263 const char *regulators_node;
264 int (*of_parse_cb)(struct device_node *,
265 const struct regulator_desc *,
266 struct regulator_config *);
254 int id; 267 int id;
255 bool continuous_voltage_range; 268 bool continuous_voltage_range;
256 unsigned n_voltages; 269 unsigned n_voltages;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 0b08d05d470b..b07562e082c4 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -191,15 +191,22 @@ struct regulator_init_data {
191 void *driver_data; /* core does not touch this */ 191 void *driver_data; /* core does not touch this */
192}; 192};
193 193
194int regulator_suspend_prepare(suspend_state_t state);
195int regulator_suspend_finish(void);
196
197#ifdef CONFIG_REGULATOR 194#ifdef CONFIG_REGULATOR
198void regulator_has_full_constraints(void); 195void regulator_has_full_constraints(void);
196int regulator_suspend_prepare(suspend_state_t state);
197int regulator_suspend_finish(void);
199#else 198#else
200static inline void regulator_has_full_constraints(void) 199static inline void regulator_has_full_constraints(void)
201{ 200{
202} 201}
202static inline int regulator_suspend_prepare(suspend_state_t state)
203{
204 return 0;
205}
206static inline int regulator_suspend_finish(void)
207{
208 return 0;
209}
203#endif 210#endif
204 211
205#endif 212#endif
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
new file mode 100644
index 000000000000..30cc5963e265
--- /dev/null
+++ b/include/linux/regulator/mt6397-regulator.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Flora Fu <flora.fu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __LINUX_REGULATOR_MT6397_H
16#define __LINUX_REGULATOR_MT6397_H
17
18enum {
19 MT6397_ID_VPCA15 = 0,
20 MT6397_ID_VPCA7,
21 MT6397_ID_VSRAMCA15,
22 MT6397_ID_VSRAMCA7,
23 MT6397_ID_VCORE,
24 MT6397_ID_VGPU,
25 MT6397_ID_VDRM,
26 MT6397_ID_VIO18 = 7,
27 MT6397_ID_VTCXO,
28 MT6397_ID_VA28,
29 MT6397_ID_VCAMA,
30 MT6397_ID_VIO28,
31 MT6397_ID_VUSB,
32 MT6397_ID_VMC,
33 MT6397_ID_VMCH,
34 MT6397_ID_VEMC3V3,
35 MT6397_ID_VGP1,
36 MT6397_ID_VGP2,
37 MT6397_ID_VGP3,
38 MT6397_ID_VGP4,
39 MT6397_ID_VGP5,
40 MT6397_ID_VGP6,
41 MT6397_ID_VIBR,
42 MT6397_ID_RG_MAX,
43};
44
45#define MT6397_MAX_REGULATOR MT6397_ID_RG_MAX
46#define MT6397_REGULATOR_ID97 0x97
47#define MT6397_REGULATOR_ID91 0x91
48
49#endif /* __LINUX_REGULATOR_MT6397_H */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index 364f7a7c43db..70c6c66c5bcf 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -49,6 +49,20 @@
49#define PFUZE200_VGEN5 11 49#define PFUZE200_VGEN5 11
50#define PFUZE200_VGEN6 12 50#define PFUZE200_VGEN6 12
51 51
52#define PFUZE3000_SW1A 0
53#define PFUZE3000_SW1B 1
54#define PFUZE3000_SW2 2
55#define PFUZE3000_SW3 3
56#define PFUZE3000_SWBST 4
57#define PFUZE3000_VSNVS 5
58#define PFUZE3000_VREFDDR 6
59#define PFUZE3000_VLDO1 7
60#define PFUZE3000_VLDO2 8
61#define PFUZE3000_VCCSD 9
62#define PFUZE3000_V33 10
63#define PFUZE3000_VLDO3 11
64#define PFUZE3000_VLDO4 12
65
52struct regulator_init_data; 66struct regulator_init_data;
53 67
54struct pfuze_regulator_platform_data { 68struct pfuze_regulator_platform_data {
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
new file mode 100644
index 000000000000..e2bf63d881d4
--- /dev/null
+++ b/include/linux/resource_ext.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (C) 2015, Intel Corporation
3 * Author: Jiang Liu <jiang.liu@linux.intel.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#ifndef _LINUX_RESOURCE_EXT_H
15#define _LINUX_RESOURCE_EXT_H
16#include <linux/types.h>
17#include <linux/list.h>
18#include <linux/ioport.h>
19#include <linux/slab.h>
20
21/* Represent resource window for bridge devices */
22struct resource_win {
23 struct resource res; /* In master (CPU) address space */
24 resource_size_t offset; /* Translation offset for bridge */
25};
26
27/*
28 * Common resource list management data structure and interfaces to support
29 * ACPI, PNP and PCI host bridge etc.
30 */
31struct resource_entry {
32 struct list_head node;
33 struct resource *res; /* In master (CPU) address space */
34 resource_size_t offset; /* Translation offset for bridge */
35 struct resource __res; /* Default storage for res */
36};
37
38extern struct resource_entry *
39resource_list_create_entry(struct resource *res, size_t extra_size);
40extern void resource_list_free(struct list_head *head);
41
42static inline void resource_list_add(struct resource_entry *entry,
43 struct list_head *head)
44{
45 list_add(&entry->node, head);
46}
47
48static inline void resource_list_add_tail(struct resource_entry *entry,
49 struct list_head *head)
50{
51 list_add_tail(&entry->node, head);
52}
53
54static inline void resource_list_del(struct resource_entry *entry)
55{
56 list_del(&entry->node);
57}
58
59static inline void resource_list_free_entry(struct resource_entry *entry)
60{
61 kfree(entry);
62}
63
64static inline void
65resource_list_destroy_entry(struct resource_entry *entry)
66{
67 resource_list_del(entry);
68 resource_list_free_entry(entry);
69}
70
71#define resource_list_for_each_entry(entry, list) \
72 list_for_each_entry((entry), (list), node)
73
74#define resource_list_for_each_entry_safe(entry, tmp, list) \
75 list_for_each_entry_safe((entry), (tmp), (list), node)
76
77#endif /* _LINUX_RESOURCE_EXT_H */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 6d6be09a2fe5..dcad7ee0d746 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -161,7 +161,7 @@ extern void devm_rtc_device_unregister(struct device *dev,
161extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); 161extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
162extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); 162extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
163extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); 163extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
164extern int rtc_set_ntp_time(struct timespec now); 164extern int rtc_set_ntp_time(struct timespec64 now);
165int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); 165int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
166extern int rtc_read_alarm(struct rtc_device *rtc, 166extern int rtc_read_alarm(struct rtc_device *rtc,
167 struct rtc_wkalrm *alrm); 167 struct rtc_wkalrm *alrm);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 93dff5fff524..be91db2a7017 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -151,6 +151,13 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
151static inline void kick_all_cpus_sync(void) { } 151static inline void kick_all_cpus_sync(void) { }
152static inline void wake_up_all_idle_cpus(void) { } 152static inline void wake_up_all_idle_cpus(void) { }
153 153
154#ifdef CONFIG_UP_LATE_INIT
155extern void __init up_late_init(void);
156static inline void smp_init(void) { up_late_init(); }
157#else
158static inline void smp_init(void) { }
159#endif
160
154#endif /* !SMP */ 161#endif /* !SMP */
155 162
156/* 163/*
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
index b2b1afbb3202..cd519a11c2c6 100644
--- a/include/linux/spi/at86rf230.h
+++ b/include/linux/spi/at86rf230.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Written by: 15 * Written by:
20 * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com> 16 * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
21 */ 17 */
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
index bc8677c8eba9..e69e9b51b21a 100644
--- a/include/linux/spi/l4f00242t03.h
+++ b/include/linux/spi/l4f00242t03.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/ 15*/
20 16
21#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ 17#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
index 555d254e6606..fdd1d1d51da5 100644
--- a/include/linux/spi/lms283gf05.h
+++ b/include/linux/spi/lms283gf05.h
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18*/ 14*/
19 15
20#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ 16#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h
index 4835486f58e5..381d368b91b4 100644
--- a/include/linux/spi/mxs-spi.h
+++ b/include/linux/spi/mxs-spi.h
@@ -15,10 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */ 18 */
23 19
24#ifndef __LINUX_SPI_MXS_SPI_H__ 20#ifndef __LINUX_SPI_MXS_SPI_H__
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index d5a316550177..6d36dacec4ba 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 13 */
18#ifndef __linux_pxa2xx_spi_h 14#ifndef __linux_pxa2xx_spi_h
19#define __linux_pxa2xx_spi_h 15#define __linux_pxa2xx_spi_h
@@ -57,7 +53,6 @@ struct pxa2xx_spi_chip {
57#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) 53#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
58 54
59#include <linux/clk.h> 55#include <linux/clk.h>
60#include <mach/dma.h>
61 56
62extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); 57extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
63 58
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
index e546b2ceb623..a693188cc08b 100644
--- a/include/linux/spi/rspi.h
+++ b/include/linux/spi/rspi.h
@@ -11,11 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 */ 14 */
20 15
21#ifndef __LINUX_SPI_RENESAS_SPI_H__ 16#ifndef __LINUX_SPI_RENESAS_SPI_H__
diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h
index a1121f872ac1..aa0d440ab4f0 100644
--- a/include/linux/spi/sh_hspi.h
+++ b/include/linux/spi/sh_hspi.h
@@ -9,10 +9,6 @@
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */ 12 */
17#ifndef SH_HSPI_H 13#ifndef SH_HSPI_H
18#define SH_HSPI_H 14#define SH_HSPI_H
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index 88a14d81c49e..b087a85f5f72 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -7,6 +7,8 @@ struct sh_msiof_spi_info {
7 u16 num_chipselect; 7 u16 num_chipselect;
8 unsigned int dma_tx_id; 8 unsigned int dma_tx_id;
9 unsigned int dma_rx_id; 9 unsigned int dma_rx_id;
10 u32 dtdl;
11 u32 syncdl;
10}; 12};
11 13
12#endif /* __SPI_SH_MSIOF_H__ */ 14#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a6ef2a8e6de4..ed9489d893a4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 13 */
18 14
19#ifndef __LINUX_SPI_H 15#ifndef __LINUX_SPI_H
@@ -260,6 +256,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
260 * @pump_messages: work struct for scheduling work to the message pump 256 * @pump_messages: work struct for scheduling work to the message pump
261 * @queue_lock: spinlock to syncronise access to message queue 257 * @queue_lock: spinlock to syncronise access to message queue
262 * @queue: message queue 258 * @queue: message queue
259 * @idling: the device is entering idle state
263 * @cur_msg: the currently in-flight message 260 * @cur_msg: the currently in-flight message
264 * @cur_msg_prepared: spi_prepare_message was called for the currently 261 * @cur_msg_prepared: spi_prepare_message was called for the currently
265 * in-flight message 262 * in-flight message
@@ -425,6 +422,7 @@ struct spi_master {
425 spinlock_t queue_lock; 422 spinlock_t queue_lock;
426 struct list_head queue; 423 struct list_head queue;
427 struct spi_message *cur_msg; 424 struct spi_message *cur_msg;
425 bool idling;
428 bool busy; 426 bool busy;
429 bool running; 427 bool running;
430 bool rt; 428 bool rt;
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
index 60b59187e590..414c6fddfcf0 100644
--- a/include/linux/spi/tle62x0.h
+++ b/include/linux/spi/tle62x0.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/ 15*/
20 16
21struct tle62x0_pdata { 17struct tle62x0_pdata {
diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
index 8f721e465e05..563b3b1799a8 100644
--- a/include/linux/spi/tsc2005.h
+++ b/include/linux/spi/tsc2005.h
@@ -12,11 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */ 15 */
21 16
22#ifndef _LINUX_SPI_TSC2005_H 17#ifndef _LINUX_SPI_TSC2005_H
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a2783cb5d275..9cfd9623fb03 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -45,7 +45,7 @@ struct rcu_batch {
45#define RCU_BATCH_INIT(name) { NULL, &(name.head) } 45#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
46 46
47struct srcu_struct { 47struct srcu_struct {
48 unsigned completed; 48 unsigned long completed;
49 struct srcu_struct_array __percpu *per_cpu_ref; 49 struct srcu_struct_array __percpu *per_cpu_ref;
50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */ 50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */
51 bool running; 51 bool running;
@@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work);
102 * define and init a srcu struct at build time. 102 * define and init a srcu struct at build time.
103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. 103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
104 */ 104 */
105#define DEFINE_SRCU(name) \ 105#define __DEFINE_SRCU(name, is_static) \
106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ 106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
107 struct srcu_struct name = __SRCU_STRUCT_INIT(name); 107 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
108 108#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
109#define DEFINE_STATIC_SRCU(name) \ 109#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
110 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
111 static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
112 110
113/** 111/**
114 * call_srcu() - Queue a callback for invocation after an SRCU grace period 112 * call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
135void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 133void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
136void synchronize_srcu(struct srcu_struct *sp); 134void synchronize_srcu(struct srcu_struct *sp);
137void synchronize_srcu_expedited(struct srcu_struct *sp); 135void synchronize_srcu_expedited(struct srcu_struct *sp);
138long srcu_batches_completed(struct srcu_struct *sp); 136unsigned long srcu_batches_completed(struct srcu_struct *sp);
139void srcu_barrier(struct srcu_struct *sp); 137void srcu_barrier(struct srcu_struct *sp);
140 138
141#ifdef CONFIG_DEBUG_LOCK_ALLOC 139#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 9b63d13ba82b..3eaae4754275 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -33,6 +33,7 @@ extern time64_t ktime_get_real_seconds(void);
33 33
34extern int __getnstimeofday64(struct timespec64 *tv); 34extern int __getnstimeofday64(struct timespec64 *tv);
35extern void getnstimeofday64(struct timespec64 *tv); 35extern void getnstimeofday64(struct timespec64 *tv);
36extern void getboottime64(struct timespec64 *ts);
36 37
37#if BITS_PER_LONG == 64 38#if BITS_PER_LONG == 64
38/** 39/**
@@ -72,6 +73,11 @@ static inline struct timespec get_monotonic_coarse(void)
72{ 73{
73 return get_monotonic_coarse64(); 74 return get_monotonic_coarse64();
74} 75}
76
77static inline void getboottime(struct timespec *ts)
78{
79 return getboottime64(ts);
80}
75#else 81#else
76/** 82/**
77 * Deprecated. Use do_settimeofday64(). 83 * Deprecated. Use do_settimeofday64().
@@ -129,9 +135,15 @@ static inline struct timespec get_monotonic_coarse(void)
129{ 135{
130 return timespec64_to_timespec(get_monotonic_coarse64()); 136 return timespec64_to_timespec(get_monotonic_coarse64());
131} 137}
132#endif
133 138
134extern void getboottime(struct timespec *ts); 139static inline void getboottime(struct timespec *ts)
140{
141 struct timespec64 ts64;
142
143 getboottime64(&ts64);
144 *ts = timespec64_to_timespec(ts64);
145}
146#endif
135 147
136#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) 148#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
137#define ktime_get_real_ts64(ts) getnstimeofday64(ts) 149#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
@@ -217,6 +229,11 @@ static inline void get_monotonic_boottime(struct timespec *ts)
217 *ts = ktime_to_timespec(ktime_get_boottime()); 229 *ts = ktime_to_timespec(ktime_get_boottime());
218} 230}
219 231
232static inline void get_monotonic_boottime64(struct timespec64 *ts)
233{
234 *ts = ktime_to_timespec64(ktime_get_boottime());
235}
236
220static inline void timekeeping_clocktai(struct timespec *ts) 237static inline void timekeeping_clocktai(struct timespec *ts)
221{ 238{
222 *ts = ktime_to_timespec(ktime_get_clocktai()); 239 *ts = ktime_to_timespec(ktime_get_clocktai());
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 37423e0e1379..537d58eea8a0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -990,6 +990,32 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
990} 990}
991 991
992/** 992/**
993 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
994 * @word: the word being waited on, a kernel virtual address
995 * @bit: the bit of the word being waited on
996 * @mode: the task state to sleep in
997 * @timeout: timeout, in jiffies
998 *
999 * Use the standard hashed waitqueue table to wait for a bit
1000 * to be cleared. This is similar to wait_on_bit(), except also takes a
1001 * timeout parameter.
1002 *
1003 * Returned value will be zero if the bit was cleared before the
1004 * @timeout elapsed, or non-zero if the @timeout elapsed or process
1005 * received a signal and the mode permitted wakeup on that signal.
1006 */
1007static inline int
1008wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
1009{
1010 might_sleep();
1011 if (!test_bit(bit, word))
1012 return 0;
1013 return out_of_line_wait_on_bit_timeout(word, bit,
1014 bit_wait_timeout,
1015 mode, timeout);
1016}
1017
1018/**
993 * wait_on_bit_action - wait for a bit to be cleared 1019 * wait_on_bit_action - wait for a bit to be cleared
994 * @word: the word being waited on, a kernel virtual address 1020 * @word: the word being waited on, a kernel virtual address
995 * @bit: the bit of the word being waited on 1021 * @bit: the bit of the word being waited on
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b996e6cde6bb..74db135f9957 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -220,14 +220,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
220#endif 220#endif
221 221
222#define INIT_WORK(_work, _func) \ 222#define INIT_WORK(_work, _func) \
223 do { \ 223 __INIT_WORK((_work), (_func), 0)
224 __INIT_WORK((_work), (_func), 0); \
225 } while (0)
226 224
227#define INIT_WORK_ONSTACK(_work, _func) \ 225#define INIT_WORK_ONSTACK(_work, _func) \
228 do { \ 226 __INIT_WORK((_work), (_func), 1)
229 __INIT_WORK((_work), (_func), 1); \
230 } while (0)
231 227
232#define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 228#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
233 do { \ 229 do { \
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 139b5067345b..27609dfcce25 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -763,7 +763,7 @@ perf_trace_##call(void *__data, proto) \
763 struct ftrace_event_call *event_call = __data; \ 763 struct ftrace_event_call *event_call = __data; \
764 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 764 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
765 struct ftrace_raw_##call *entry; \ 765 struct ftrace_raw_##call *entry; \
766 struct pt_regs __regs; \ 766 struct pt_regs *__regs; \
767 u64 __addr = 0, __count = 1; \ 767 u64 __addr = 0, __count = 1; \
768 struct task_struct *__task = NULL; \ 768 struct task_struct *__task = NULL; \
769 struct hlist_head *head; \ 769 struct hlist_head *head; \
@@ -782,18 +782,19 @@ perf_trace_##call(void *__data, proto) \
782 sizeof(u64)); \ 782 sizeof(u64)); \
783 __entry_size -= sizeof(u32); \ 783 __entry_size -= sizeof(u32); \
784 \ 784 \
785 perf_fetch_caller_regs(&__regs); \
786 entry = perf_trace_buf_prepare(__entry_size, \ 785 entry = perf_trace_buf_prepare(__entry_size, \
787 event_call->event.type, &__regs, &rctx); \ 786 event_call->event.type, &__regs, &rctx); \
788 if (!entry) \ 787 if (!entry) \
789 return; \ 788 return; \
790 \ 789 \
790 perf_fetch_caller_regs(__regs); \
791 \
791 tstruct \ 792 tstruct \
792 \ 793 \
793 { assign; } \ 794 { assign; } \
794 \ 795 \
795 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 796 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
796 __count, &__regs, head, __task); \ 797 __count, __regs, head, __task); \
797} 798}
798 799
799/* 800/*
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 4a1d0cc38ff2..efe3443572ba 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -451,6 +451,10 @@
451#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ 451#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
452#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */ 452#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
453#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ 453#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
454#define PCI_EXP_DEVCTL_READRQ_128B 0x0000 /* 128 Bytes */
455#define PCI_EXP_DEVCTL_READRQ_256B 0x1000 /* 256 Bytes */
456#define PCI_EXP_DEVCTL_READRQ_512B 0x2000 /* 512 Bytes */
457#define PCI_EXP_DEVCTL_READRQ_1024B 0x3000 /* 1024 Bytes */
454#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ 458#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
455#define PCI_EXP_DEVSTA 10 /* Device Status */ 459#define PCI_EXP_DEVSTA 10 /* Device Status */
456#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */ 460#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 3387465b9caa..143ca5ffab7a 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -45,6 +45,8 @@
45#include <asm/xen/hypervisor.h> 45#include <asm/xen/hypervisor.h>
46 46
47#include <xen/features.h> 47#include <xen/features.h>
48#include <linux/mm_types.h>
49#include <linux/page-flags.h>
48 50
49#define GNTTAB_RESERVED_XENSTORE 1 51#define GNTTAB_RESERVED_XENSTORE 1
50 52
@@ -58,6 +60,22 @@ struct gnttab_free_callback {
58 u16 count; 60 u16 count;
59}; 61};
60 62
63struct gntab_unmap_queue_data;
64
65typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
66
67struct gntab_unmap_queue_data
68{
69 struct delayed_work gnttab_work;
70 void *data;
71 gnttab_unmap_refs_done done;
72 struct gnttab_unmap_grant_ref *unmap_ops;
73 struct gnttab_unmap_grant_ref *kunmap_ops;
74 struct page **pages;
75 unsigned int count;
76 unsigned int age;
77};
78
61int gnttab_init(void); 79int gnttab_init(void);
62int gnttab_suspend(void); 80int gnttab_suspend(void);
63int gnttab_resume(void); 81int gnttab_resume(void);
@@ -163,12 +181,17 @@ void gnttab_free_auto_xlat_frames(void);
163 181
164#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) 182#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
165 183
184int gnttab_alloc_pages(int nr_pages, struct page **pages);
185void gnttab_free_pages(int nr_pages, struct page **pages);
186
166int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 187int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
167 struct gnttab_map_grant_ref *kmap_ops, 188 struct gnttab_map_grant_ref *kmap_ops,
168 struct page **pages, unsigned int count); 189 struct page **pages, unsigned int count);
169int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 190int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
170 struct gnttab_map_grant_ref *kunmap_ops, 191 struct gnttab_unmap_grant_ref *kunmap_ops,
171 struct page **pages, unsigned int count); 192 struct page **pages, unsigned int count);
193void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
194
172 195
173/* Perform a batch of grant map/copy operations. Retry every batch slot 196/* Perform a batch of grant map/copy operations. Retry every batch slot
174 * for which the hypervisor returns GNTST_eagain. This is typically due 197 * for which the hypervisor returns GNTST_eagain. This is typically due
@@ -182,4 +205,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
182void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count); 205void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
183void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count); 206void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
184 207
208
209struct xen_page_foreign {
210 domid_t domid;
211 grant_ref_t gref;
212};
213
214static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
215{
216 if (!PageForeign(page))
217 return NULL;
218#if BITS_PER_LONG < 64
219 return (struct xen_page_foreign *)page->private;
220#else
221 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
222 return (struct xen_page_foreign *)&page->private;
223#endif
224}
225
185#endif /* __ASM_GNTTAB_H__ */ 226#endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 131a6ccdba25..6ad3d110bb81 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -41,6 +41,12 @@
41/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ 41/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
42#define XENFEAT_mmu_pt_update_preserve_ad 5 42#define XENFEAT_mmu_pt_update_preserve_ad 5
43 43
44/*
45 * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
46 * available pte bits.
47 */
48#define XENFEAT_gnttab_map_avail_bits 7
49
44/* x86: Does this Xen host support the HVM callback vector type? */ 50/* x86: Does this Xen host support the HVM callback vector type? */
45#define XENFEAT_hvm_callback_vector 8 51#define XENFEAT_hvm_callback_vector 8
46 52
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index bcce56439d64..56806bc90c2f 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -526,6 +526,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
526#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) 526#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
527 527
528/* 528/*
529 * Bits to be placed in guest kernel available PTE bits (architecture
530 * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
531 */
532#define _GNTMAP_guest_avail0 (16)
533#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
534
535/*
529 * Values for error status returns. All errors are -ve. 536 * Values for error status returns. All errors are -ve.
530 */ 537 */
531#define GNTST_okay (0) /* Normal return. */ 538#define GNTST_okay (0) /* Normal return. */
diff --git a/init/Kconfig b/init/Kconfig
index 9afb971497f4..1354ac09b516 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -470,7 +470,6 @@ choice
470config TREE_RCU 470config TREE_RCU
471 bool "Tree-based hierarchical RCU" 471 bool "Tree-based hierarchical RCU"
472 depends on !PREEMPT && SMP 472 depends on !PREEMPT && SMP
473 select IRQ_WORK
474 help 473 help
475 This option selects the RCU implementation that is 474 This option selects the RCU implementation that is
476 designed for very large SMP system with hundreds or 475 designed for very large SMP system with hundreds or
@@ -480,7 +479,6 @@ config TREE_RCU
480config PREEMPT_RCU 479config PREEMPT_RCU
481 bool "Preemptible tree-based hierarchical RCU" 480 bool "Preemptible tree-based hierarchical RCU"
482 depends on PREEMPT 481 depends on PREEMPT
483 select IRQ_WORK
484 help 482 help
485 This option selects the RCU implementation that is 483 This option selects the RCU implementation that is
486 designed for very large SMP systems with hundreds or 484 designed for very large SMP systems with hundreds or
@@ -501,9 +499,17 @@ config TINY_RCU
501 499
502endchoice 500endchoice
503 501
502config SRCU
503 bool
504 help
505 This option selects the sleepable version of RCU. This version
506 permits arbitrary sleeping or blocking within RCU read-side critical
507 sections.
508
504config TASKS_RCU 509config TASKS_RCU
505 bool "Task_based RCU implementation using voluntary context switch" 510 bool "Task_based RCU implementation using voluntary context switch"
506 default n 511 default n
512 select SRCU
507 help 513 help
508 This option enables a task-based RCU implementation that uses 514 This option enables a task-based RCU implementation that uses
509 only voluntary context switch (not preemption!), idle, and 515 only voluntary context switch (not preemption!), idle, and
@@ -668,9 +674,10 @@ config RCU_BOOST
668 674
669config RCU_KTHREAD_PRIO 675config RCU_KTHREAD_PRIO
670 int "Real-time priority to use for RCU worker threads" 676 int "Real-time priority to use for RCU worker threads"
671 range 1 99 677 range 1 99 if RCU_BOOST
672 depends on RCU_BOOST 678 range 0 99 if !RCU_BOOST
673 default 1 679 default 1 if RCU_BOOST
680 default 0 if !RCU_BOOST
674 help 681 help
675 This option specifies the SCHED_FIFO priority value that will be 682 This option specifies the SCHED_FIFO priority value that will be
676 assigned to the rcuc/n and rcub/n threads and is also the value 683 assigned to the rcuc/n and rcub/n threads and is also the value
@@ -1595,6 +1602,7 @@ config PERF_EVENTS
1595 depends on HAVE_PERF_EVENTS 1602 depends on HAVE_PERF_EVENTS
1596 select ANON_INODES 1603 select ANON_INODES
1597 select IRQ_WORK 1604 select IRQ_WORK
1605 select SRCU
1598 help 1606 help
1599 Enable kernel support for various performance events provided 1607 Enable kernel support for various performance events provided
1600 by software and hardware. 1608 by software and hardware.
diff --git a/init/main.c b/init/main.c
index 61b993767db5..179ada15d08a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -87,10 +87,6 @@
87#include <asm/sections.h> 87#include <asm/sections.h>
88#include <asm/cacheflush.h> 88#include <asm/cacheflush.h>
89 89
90#ifdef CONFIG_X86_LOCAL_APIC
91#include <asm/smp.h>
92#endif
93
94static int kernel_init(void *); 90static int kernel_init(void *);
95 91
96extern void init_IRQ(void); 92extern void init_IRQ(void);
@@ -351,15 +347,6 @@ __setup("rdinit=", rdinit_setup);
351 347
352#ifndef CONFIG_SMP 348#ifndef CONFIG_SMP
353static const unsigned int setup_max_cpus = NR_CPUS; 349static const unsigned int setup_max_cpus = NR_CPUS;
354#ifdef CONFIG_X86_LOCAL_APIC
355static void __init smp_init(void)
356{
357 APIC_init_uniprocessor();
358}
359#else
360#define smp_init() do { } while (0)
361#endif
362
363static inline void setup_nr_cpu_ids(void) { } 350static inline void setup_nr_cpu_ids(void) { }
364static inline void smp_prepare_cpus(unsigned int maxcpus) { } 351static inline void smp_prepare_cpus(unsigned int maxcpus) { }
365#endif 352#endif
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 76768ee812b2..08561f1acd13 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -231,6 +231,10 @@ config RWSEM_SPIN_ON_OWNER
231 def_bool y 231 def_bool y
232 depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW 232 depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
233 233
234config LOCK_SPIN_ON_OWNER
235 def_bool y
236 depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER
237
234config ARCH_USE_QUEUE_RWLOCK 238config ARCH_USE_QUEUE_RWLOCK
235 bool 239 bool
236 240
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5d220234b3ca..1972b161c61e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -58,22 +58,23 @@ static int cpu_hotplug_disabled;
58 58
59static struct { 59static struct {
60 struct task_struct *active_writer; 60 struct task_struct *active_writer;
61 struct mutex lock; /* Synchronizes accesses to refcount, */ 61 /* wait queue to wake up the active_writer */
62 wait_queue_head_t wq;
63 /* verifies that no writer will get active while readers are active */
64 struct mutex lock;
62 /* 65 /*
63 * Also blocks the new readers during 66 * Also blocks the new readers during
64 * an ongoing cpu hotplug operation. 67 * an ongoing cpu hotplug operation.
65 */ 68 */
66 int refcount; 69 atomic_t refcount;
67 /* And allows lockless put_online_cpus(). */
68 atomic_t puts_pending;
69 70
70#ifdef CONFIG_DEBUG_LOCK_ALLOC 71#ifdef CONFIG_DEBUG_LOCK_ALLOC
71 struct lockdep_map dep_map; 72 struct lockdep_map dep_map;
72#endif 73#endif
73} cpu_hotplug = { 74} cpu_hotplug = {
74 .active_writer = NULL, 75 .active_writer = NULL,
76 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
75 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 77 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
76 .refcount = 0,
77#ifdef CONFIG_DEBUG_LOCK_ALLOC 78#ifdef CONFIG_DEBUG_LOCK_ALLOC
78 .dep_map = {.name = "cpu_hotplug.lock" }, 79 .dep_map = {.name = "cpu_hotplug.lock" },
79#endif 80#endif
@@ -86,15 +87,6 @@ static struct {
86#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) 87#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
87#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) 88#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
88 89
89static void apply_puts_pending(int max)
90{
91 int delta;
92
93 if (atomic_read(&cpu_hotplug.puts_pending) >= max) {
94 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
95 cpu_hotplug.refcount -= delta;
96 }
97}
98 90
99void get_online_cpus(void) 91void get_online_cpus(void)
100{ 92{
@@ -103,8 +95,7 @@ void get_online_cpus(void)
103 return; 95 return;
104 cpuhp_lock_acquire_read(); 96 cpuhp_lock_acquire_read();
105 mutex_lock(&cpu_hotplug.lock); 97 mutex_lock(&cpu_hotplug.lock);
106 apply_puts_pending(65536); 98 atomic_inc(&cpu_hotplug.refcount);
107 cpu_hotplug.refcount++;
108 mutex_unlock(&cpu_hotplug.lock); 99 mutex_unlock(&cpu_hotplug.lock);
109} 100}
110EXPORT_SYMBOL_GPL(get_online_cpus); 101EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -116,8 +107,7 @@ bool try_get_online_cpus(void)
116 if (!mutex_trylock(&cpu_hotplug.lock)) 107 if (!mutex_trylock(&cpu_hotplug.lock))
117 return false; 108 return false;
118 cpuhp_lock_acquire_tryread(); 109 cpuhp_lock_acquire_tryread();
119 apply_puts_pending(65536); 110 atomic_inc(&cpu_hotplug.refcount);
120 cpu_hotplug.refcount++;
121 mutex_unlock(&cpu_hotplug.lock); 111 mutex_unlock(&cpu_hotplug.lock);
122 return true; 112 return true;
123} 113}
@@ -125,20 +115,18 @@ EXPORT_SYMBOL_GPL(try_get_online_cpus);
125 115
126void put_online_cpus(void) 116void put_online_cpus(void)
127{ 117{
118 int refcount;
119
128 if (cpu_hotplug.active_writer == current) 120 if (cpu_hotplug.active_writer == current)
129 return; 121 return;
130 if (!mutex_trylock(&cpu_hotplug.lock)) {
131 atomic_inc(&cpu_hotplug.puts_pending);
132 cpuhp_lock_release();
133 return;
134 }
135 122
136 if (WARN_ON(!cpu_hotplug.refcount)) 123 refcount = atomic_dec_return(&cpu_hotplug.refcount);
137 cpu_hotplug.refcount++; /* try to fix things up */ 124 if (WARN_ON(refcount < 0)) /* try to fix things up */
125 atomic_inc(&cpu_hotplug.refcount);
126
127 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
128 wake_up(&cpu_hotplug.wq);
138 129
139 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
140 wake_up_process(cpu_hotplug.active_writer);
141 mutex_unlock(&cpu_hotplug.lock);
142 cpuhp_lock_release(); 130 cpuhp_lock_release();
143 131
144} 132}
@@ -168,18 +156,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
168 */ 156 */
169void cpu_hotplug_begin(void) 157void cpu_hotplug_begin(void)
170{ 158{
171 cpu_hotplug.active_writer = current; 159 DEFINE_WAIT(wait);
172 160
161 cpu_hotplug.active_writer = current;
173 cpuhp_lock_acquire(); 162 cpuhp_lock_acquire();
163
174 for (;;) { 164 for (;;) {
175 mutex_lock(&cpu_hotplug.lock); 165 mutex_lock(&cpu_hotplug.lock);
176 apply_puts_pending(1); 166 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
177 if (likely(!cpu_hotplug.refcount)) 167 if (likely(!atomic_read(&cpu_hotplug.refcount)))
178 break; 168 break;
179 __set_current_state(TASK_UNINTERRUPTIBLE);
180 mutex_unlock(&cpu_hotplug.lock); 169 mutex_unlock(&cpu_hotplug.lock);
181 schedule(); 170 schedule();
182 } 171 }
172 finish_wait(&cpu_hotplug.wq, &wait);
183} 173}
184 174
185void cpu_hotplug_done(void) 175void cpu_hotplug_done(void)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 19efcf13375a..7f2fbb8b5069 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -872,22 +872,32 @@ void perf_pmu_enable(struct pmu *pmu)
872 pmu->pmu_enable(pmu); 872 pmu->pmu_enable(pmu);
873} 873}
874 874
875static DEFINE_PER_CPU(struct list_head, rotation_list); 875static DEFINE_PER_CPU(struct list_head, active_ctx_list);
876 876
877/* 877/*
878 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized 878 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
879 * because they're strictly cpu affine and rotate_start is called with IRQs 879 * perf_event_task_tick() are fully serialized because they're strictly cpu
880 * disabled, while rotate_context is called from IRQ context. 880 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
881 * disabled, while perf_event_task_tick is called from IRQ context.
881 */ 882 */
882static void perf_pmu_rotate_start(struct pmu *pmu) 883static void perf_event_ctx_activate(struct perf_event_context *ctx)
883{ 884{
884 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 885 struct list_head *head = this_cpu_ptr(&active_ctx_list);
885 struct list_head *head = this_cpu_ptr(&rotation_list);
886 886
887 WARN_ON(!irqs_disabled()); 887 WARN_ON(!irqs_disabled());
888 888
889 if (list_empty(&cpuctx->rotation_list)) 889 WARN_ON(!list_empty(&ctx->active_ctx_list));
890 list_add(&cpuctx->rotation_list, head); 890
891 list_add(&ctx->active_ctx_list, head);
892}
893
894static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
895{
896 WARN_ON(!irqs_disabled());
897
898 WARN_ON(list_empty(&ctx->active_ctx_list));
899
900 list_del_init(&ctx->active_ctx_list);
891} 901}
892 902
893static void get_ctx(struct perf_event_context *ctx) 903static void get_ctx(struct perf_event_context *ctx)
@@ -907,6 +917,84 @@ static void put_ctx(struct perf_event_context *ctx)
907} 917}
908 918
909/* 919/*
920 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
921 * perf_pmu_migrate_context() we need some magic.
922 *
923 * Those places that change perf_event::ctx will hold both
924 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
925 *
926 * Lock ordering is by mutex address. There is one other site where
927 * perf_event_context::mutex nests and that is put_event(). But remember that
928 * that is a parent<->child context relation, and migration does not affect
929 * children, therefore these two orderings should not interact.
930 *
931 * The change in perf_event::ctx does not affect children (as claimed above)
932 * because the sys_perf_event_open() case will install a new event and break
933 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
934 * concerned with cpuctx and that doesn't have children.
935 *
936 * The places that change perf_event::ctx will issue:
937 *
938 * perf_remove_from_context();
939 * synchronize_rcu();
940 * perf_install_in_context();
941 *
942 * to affect the change. The remove_from_context() + synchronize_rcu() should
943 * quiesce the event, after which we can install it in the new location. This
944 * means that only external vectors (perf_fops, prctl) can perturb the event
945 * while in transit. Therefore all such accessors should also acquire
946 * perf_event_context::mutex to serialize against this.
947 *
948 * However; because event->ctx can change while we're waiting to acquire
949 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
950 * function.
951 *
952 * Lock order:
953 * task_struct::perf_event_mutex
954 * perf_event_context::mutex
955 * perf_event_context::lock
956 * perf_event::child_mutex;
957 * perf_event::mmap_mutex
958 * mmap_sem
959 */
960static struct perf_event_context *
961perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
962{
963 struct perf_event_context *ctx;
964
965again:
966 rcu_read_lock();
967 ctx = ACCESS_ONCE(event->ctx);
968 if (!atomic_inc_not_zero(&ctx->refcount)) {
969 rcu_read_unlock();
970 goto again;
971 }
972 rcu_read_unlock();
973
974 mutex_lock_nested(&ctx->mutex, nesting);
975 if (event->ctx != ctx) {
976 mutex_unlock(&ctx->mutex);
977 put_ctx(ctx);
978 goto again;
979 }
980
981 return ctx;
982}
983
984static inline struct perf_event_context *
985perf_event_ctx_lock(struct perf_event *event)
986{
987 return perf_event_ctx_lock_nested(event, 0);
988}
989
990static void perf_event_ctx_unlock(struct perf_event *event,
991 struct perf_event_context *ctx)
992{
993 mutex_unlock(&ctx->mutex);
994 put_ctx(ctx);
995}
996
997/*
910 * This must be done under the ctx->lock, such as to serialize against 998 * This must be done under the ctx->lock, such as to serialize against
911 * context_equiv(), therefore we cannot call put_ctx() since that might end up 999 * context_equiv(), therefore we cannot call put_ctx() since that might end up
912 * calling scheduler related locks and ctx->lock nests inside those. 1000 * calling scheduler related locks and ctx->lock nests inside those.
@@ -1155,8 +1243,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1155 ctx->nr_branch_stack++; 1243 ctx->nr_branch_stack++;
1156 1244
1157 list_add_rcu(&event->event_entry, &ctx->event_list); 1245 list_add_rcu(&event->event_entry, &ctx->event_list);
1158 if (!ctx->nr_events)
1159 perf_pmu_rotate_start(ctx->pmu);
1160 ctx->nr_events++; 1246 ctx->nr_events++;
1161 if (event->attr.inherit_stat) 1247 if (event->attr.inherit_stat)
1162 ctx->nr_stat++; 1248 ctx->nr_stat++;
@@ -1275,6 +1361,8 @@ static void perf_group_attach(struct perf_event *event)
1275 if (group_leader == event) 1361 if (group_leader == event)
1276 return; 1362 return;
1277 1363
1364 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1365
1278 if (group_leader->group_flags & PERF_GROUP_SOFTWARE && 1366 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1279 !is_software_event(event)) 1367 !is_software_event(event))
1280 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; 1368 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
@@ -1296,6 +1384,10 @@ static void
1296list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1384list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1297{ 1385{
1298 struct perf_cpu_context *cpuctx; 1386 struct perf_cpu_context *cpuctx;
1387
1388 WARN_ON_ONCE(event->ctx != ctx);
1389 lockdep_assert_held(&ctx->lock);
1390
1299 /* 1391 /*
1300 * We can have double detach due to exit/hot-unplug + close. 1392 * We can have double detach due to exit/hot-unplug + close.
1301 */ 1393 */
@@ -1380,6 +1472,8 @@ static void perf_group_detach(struct perf_event *event)
1380 1472
1381 /* Inherit group flags from the previous leader */ 1473 /* Inherit group flags from the previous leader */
1382 sibling->group_flags = event->group_flags; 1474 sibling->group_flags = event->group_flags;
1475
1476 WARN_ON_ONCE(sibling->ctx != event->ctx);
1383 } 1477 }
1384 1478
1385out: 1479out:
@@ -1442,6 +1536,10 @@ event_sched_out(struct perf_event *event,
1442{ 1536{
1443 u64 tstamp = perf_event_time(event); 1537 u64 tstamp = perf_event_time(event);
1444 u64 delta; 1538 u64 delta;
1539
1540 WARN_ON_ONCE(event->ctx != ctx);
1541 lockdep_assert_held(&ctx->lock);
1542
1445 /* 1543 /*
1446 * An event which could not be activated because of 1544 * An event which could not be activated because of
1447 * filter mismatch still needs to have its timings 1545 * filter mismatch still needs to have its timings
@@ -1471,7 +1569,8 @@ event_sched_out(struct perf_event *event,
1471 1569
1472 if (!is_software_event(event)) 1570 if (!is_software_event(event))
1473 cpuctx->active_oncpu--; 1571 cpuctx->active_oncpu--;
1474 ctx->nr_active--; 1572 if (!--ctx->nr_active)
1573 perf_event_ctx_deactivate(ctx);
1475 if (event->attr.freq && event->attr.sample_freq) 1574 if (event->attr.freq && event->attr.sample_freq)
1476 ctx->nr_freq--; 1575 ctx->nr_freq--;
1477 if (event->attr.exclusive || !cpuctx->active_oncpu) 1576 if (event->attr.exclusive || !cpuctx->active_oncpu)
@@ -1654,7 +1753,7 @@ int __perf_event_disable(void *info)
1654 * is the current context on this CPU and preemption is disabled, 1753 * is the current context on this CPU and preemption is disabled,
1655 * hence we can't get into perf_event_task_sched_out for this context. 1754 * hence we can't get into perf_event_task_sched_out for this context.
1656 */ 1755 */
1657void perf_event_disable(struct perf_event *event) 1756static void _perf_event_disable(struct perf_event *event)
1658{ 1757{
1659 struct perf_event_context *ctx = event->ctx; 1758 struct perf_event_context *ctx = event->ctx;
1660 struct task_struct *task = ctx->task; 1759 struct task_struct *task = ctx->task;
@@ -1695,6 +1794,19 @@ retry:
1695 } 1794 }
1696 raw_spin_unlock_irq(&ctx->lock); 1795 raw_spin_unlock_irq(&ctx->lock);
1697} 1796}
1797
1798/*
1799 * Strictly speaking kernel users cannot create groups and therefore this
1800 * interface does not need the perf_event_ctx_lock() magic.
1801 */
1802void perf_event_disable(struct perf_event *event)
1803{
1804 struct perf_event_context *ctx;
1805
1806 ctx = perf_event_ctx_lock(event);
1807 _perf_event_disable(event);
1808 perf_event_ctx_unlock(event, ctx);
1809}
1698EXPORT_SYMBOL_GPL(perf_event_disable); 1810EXPORT_SYMBOL_GPL(perf_event_disable);
1699 1811
1700static void perf_set_shadow_time(struct perf_event *event, 1812static void perf_set_shadow_time(struct perf_event *event,
@@ -1782,7 +1894,8 @@ event_sched_in(struct perf_event *event,
1782 1894
1783 if (!is_software_event(event)) 1895 if (!is_software_event(event))
1784 cpuctx->active_oncpu++; 1896 cpuctx->active_oncpu++;
1785 ctx->nr_active++; 1897 if (!ctx->nr_active++)
1898 perf_event_ctx_activate(ctx);
1786 if (event->attr.freq && event->attr.sample_freq) 1899 if (event->attr.freq && event->attr.sample_freq)
1787 ctx->nr_freq++; 1900 ctx->nr_freq++;
1788 1901
@@ -2158,7 +2271,7 @@ unlock:
2158 * perf_event_for_each_child or perf_event_for_each as described 2271 * perf_event_for_each_child or perf_event_for_each as described
2159 * for perf_event_disable. 2272 * for perf_event_disable.
2160 */ 2273 */
2161void perf_event_enable(struct perf_event *event) 2274static void _perf_event_enable(struct perf_event *event)
2162{ 2275{
2163 struct perf_event_context *ctx = event->ctx; 2276 struct perf_event_context *ctx = event->ctx;
2164 struct task_struct *task = ctx->task; 2277 struct task_struct *task = ctx->task;
@@ -2214,9 +2327,21 @@ retry:
2214out: 2327out:
2215 raw_spin_unlock_irq(&ctx->lock); 2328 raw_spin_unlock_irq(&ctx->lock);
2216} 2329}
2330
2331/*
2332 * See perf_event_disable();
2333 */
2334void perf_event_enable(struct perf_event *event)
2335{
2336 struct perf_event_context *ctx;
2337
2338 ctx = perf_event_ctx_lock(event);
2339 _perf_event_enable(event);
2340 perf_event_ctx_unlock(event, ctx);
2341}
2217EXPORT_SYMBOL_GPL(perf_event_enable); 2342EXPORT_SYMBOL_GPL(perf_event_enable);
2218 2343
2219int perf_event_refresh(struct perf_event *event, int refresh) 2344static int _perf_event_refresh(struct perf_event *event, int refresh)
2220{ 2345{
2221 /* 2346 /*
2222 * not supported on inherited events 2347 * not supported on inherited events
@@ -2225,10 +2350,25 @@ int perf_event_refresh(struct perf_event *event, int refresh)
2225 return -EINVAL; 2350 return -EINVAL;
2226 2351
2227 atomic_add(refresh, &event->event_limit); 2352 atomic_add(refresh, &event->event_limit);
2228 perf_event_enable(event); 2353 _perf_event_enable(event);
2229 2354
2230 return 0; 2355 return 0;
2231} 2356}
2357
2358/*
2359 * See perf_event_disable()
2360 */
2361int perf_event_refresh(struct perf_event *event, int refresh)
2362{
2363 struct perf_event_context *ctx;
2364 int ret;
2365
2366 ctx = perf_event_ctx_lock(event);
2367 ret = _perf_event_refresh(event, refresh);
2368 perf_event_ctx_unlock(event, ctx);
2369
2370 return ret;
2371}
2232EXPORT_SYMBOL_GPL(perf_event_refresh); 2372EXPORT_SYMBOL_GPL(perf_event_refresh);
2233 2373
2234static void ctx_sched_out(struct perf_event_context *ctx, 2374static void ctx_sched_out(struct perf_event_context *ctx,
@@ -2612,12 +2752,6 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2612 2752
2613 perf_pmu_enable(ctx->pmu); 2753 perf_pmu_enable(ctx->pmu);
2614 perf_ctx_unlock(cpuctx, ctx); 2754 perf_ctx_unlock(cpuctx, ctx);
2615
2616 /*
2617 * Since these rotations are per-cpu, we need to ensure the
2618 * cpu-context we got scheduled on is actually rotating.
2619 */
2620 perf_pmu_rotate_start(ctx->pmu);
2621} 2755}
2622 2756
2623/* 2757/*
@@ -2905,25 +3039,18 @@ static void rotate_ctx(struct perf_event_context *ctx)
2905 list_rotate_left(&ctx->flexible_groups); 3039 list_rotate_left(&ctx->flexible_groups);
2906} 3040}
2907 3041
2908/*
2909 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2910 * because they're strictly cpu affine and rotate_start is called with IRQs
2911 * disabled, while rotate_context is called from IRQ context.
2912 */
2913static int perf_rotate_context(struct perf_cpu_context *cpuctx) 3042static int perf_rotate_context(struct perf_cpu_context *cpuctx)
2914{ 3043{
2915 struct perf_event_context *ctx = NULL; 3044 struct perf_event_context *ctx = NULL;
2916 int rotate = 0, remove = 1; 3045 int rotate = 0;
2917 3046
2918 if (cpuctx->ctx.nr_events) { 3047 if (cpuctx->ctx.nr_events) {
2919 remove = 0;
2920 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 3048 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2921 rotate = 1; 3049 rotate = 1;
2922 } 3050 }
2923 3051
2924 ctx = cpuctx->task_ctx; 3052 ctx = cpuctx->task_ctx;
2925 if (ctx && ctx->nr_events) { 3053 if (ctx && ctx->nr_events) {
2926 remove = 0;
2927 if (ctx->nr_events != ctx->nr_active) 3054 if (ctx->nr_events != ctx->nr_active)
2928 rotate = 1; 3055 rotate = 1;
2929 } 3056 }
@@ -2947,8 +3074,6 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
2947 perf_pmu_enable(cpuctx->ctx.pmu); 3074 perf_pmu_enable(cpuctx->ctx.pmu);
2948 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 3075 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2949done: 3076done:
2950 if (remove)
2951 list_del_init(&cpuctx->rotation_list);
2952 3077
2953 return rotate; 3078 return rotate;
2954} 3079}
@@ -2966,9 +3091,8 @@ bool perf_event_can_stop_tick(void)
2966 3091
2967void perf_event_task_tick(void) 3092void perf_event_task_tick(void)
2968{ 3093{
2969 struct list_head *head = this_cpu_ptr(&rotation_list); 3094 struct list_head *head = this_cpu_ptr(&active_ctx_list);
2970 struct perf_cpu_context *cpuctx, *tmp; 3095 struct perf_event_context *ctx, *tmp;
2971 struct perf_event_context *ctx;
2972 int throttled; 3096 int throttled;
2973 3097
2974 WARN_ON(!irqs_disabled()); 3098 WARN_ON(!irqs_disabled());
@@ -2976,14 +3100,8 @@ void perf_event_task_tick(void)
2976 __this_cpu_inc(perf_throttled_seq); 3100 __this_cpu_inc(perf_throttled_seq);
2977 throttled = __this_cpu_xchg(perf_throttled_count, 0); 3101 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2978 3102
2979 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { 3103 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
2980 ctx = &cpuctx->ctx;
2981 perf_adjust_freq_unthr_context(ctx, throttled); 3104 perf_adjust_freq_unthr_context(ctx, throttled);
2982
2983 ctx = cpuctx->task_ctx;
2984 if (ctx)
2985 perf_adjust_freq_unthr_context(ctx, throttled);
2986 }
2987} 3105}
2988 3106
2989static int event_enable_on_exec(struct perf_event *event, 3107static int event_enable_on_exec(struct perf_event *event,
@@ -3142,6 +3260,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
3142{ 3260{
3143 raw_spin_lock_init(&ctx->lock); 3261 raw_spin_lock_init(&ctx->lock);
3144 mutex_init(&ctx->mutex); 3262 mutex_init(&ctx->mutex);
3263 INIT_LIST_HEAD(&ctx->active_ctx_list);
3145 INIT_LIST_HEAD(&ctx->pinned_groups); 3264 INIT_LIST_HEAD(&ctx->pinned_groups);
3146 INIT_LIST_HEAD(&ctx->flexible_groups); 3265 INIT_LIST_HEAD(&ctx->flexible_groups);
3147 INIT_LIST_HEAD(&ctx->event_list); 3266 INIT_LIST_HEAD(&ctx->event_list);
@@ -3421,7 +3540,16 @@ static void perf_remove_from_owner(struct perf_event *event)
3421 rcu_read_unlock(); 3540 rcu_read_unlock();
3422 3541
3423 if (owner) { 3542 if (owner) {
3424 mutex_lock(&owner->perf_event_mutex); 3543 /*
3544 * If we're here through perf_event_exit_task() we're already
3545 * holding ctx->mutex which would be an inversion wrt. the
3546 * normal lock order.
3547 *
3548 * However we can safely take this lock because its the child
3549 * ctx->mutex.
3550 */
3551 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
3552
3425 /* 3553 /*
3426 * We have to re-check the event->owner field, if it is cleared 3554 * We have to re-check the event->owner field, if it is cleared
3427 * we raced with perf_event_exit_task(), acquiring the mutex 3555 * we raced with perf_event_exit_task(), acquiring the mutex
@@ -3440,7 +3568,7 @@ static void perf_remove_from_owner(struct perf_event *event)
3440 */ 3568 */
3441static void put_event(struct perf_event *event) 3569static void put_event(struct perf_event *event)
3442{ 3570{
3443 struct perf_event_context *ctx = event->ctx; 3571 struct perf_event_context *ctx;
3444 3572
3445 if (!atomic_long_dec_and_test(&event->refcount)) 3573 if (!atomic_long_dec_and_test(&event->refcount))
3446 return; 3574 return;
@@ -3448,7 +3576,6 @@ static void put_event(struct perf_event *event)
3448 if (!is_kernel_event(event)) 3576 if (!is_kernel_event(event))
3449 perf_remove_from_owner(event); 3577 perf_remove_from_owner(event);
3450 3578
3451 WARN_ON_ONCE(ctx->parent_ctx);
3452 /* 3579 /*
3453 * There are two ways this annotation is useful: 3580 * There are two ways this annotation is useful:
3454 * 3581 *
@@ -3461,7 +3588,8 @@ static void put_event(struct perf_event *event)
3461 * the last filedesc died, so there is no possibility 3588 * the last filedesc died, so there is no possibility
3462 * to trigger the AB-BA case. 3589 * to trigger the AB-BA case.
3463 */ 3590 */
3464 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); 3591 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
3592 WARN_ON_ONCE(ctx->parent_ctx);
3465 perf_remove_from_context(event, true); 3593 perf_remove_from_context(event, true);
3466 mutex_unlock(&ctx->mutex); 3594 mutex_unlock(&ctx->mutex);
3467 3595
@@ -3547,12 +3675,13 @@ static int perf_event_read_group(struct perf_event *event,
3547 u64 read_format, char __user *buf) 3675 u64 read_format, char __user *buf)
3548{ 3676{
3549 struct perf_event *leader = event->group_leader, *sub; 3677 struct perf_event *leader = event->group_leader, *sub;
3550 int n = 0, size = 0, ret = -EFAULT;
3551 struct perf_event_context *ctx = leader->ctx; 3678 struct perf_event_context *ctx = leader->ctx;
3552 u64 values[5]; 3679 int n = 0, size = 0, ret;
3553 u64 count, enabled, running; 3680 u64 count, enabled, running;
3681 u64 values[5];
3682
3683 lockdep_assert_held(&ctx->mutex);
3554 3684
3555 mutex_lock(&ctx->mutex);
3556 count = perf_event_read_value(leader, &enabled, &running); 3685 count = perf_event_read_value(leader, &enabled, &running);
3557 3686
3558 values[n++] = 1 + leader->nr_siblings; 3687 values[n++] = 1 + leader->nr_siblings;
@@ -3567,7 +3696,7 @@ static int perf_event_read_group(struct perf_event *event,
3567 size = n * sizeof(u64); 3696 size = n * sizeof(u64);
3568 3697
3569 if (copy_to_user(buf, values, size)) 3698 if (copy_to_user(buf, values, size))
3570 goto unlock; 3699 return -EFAULT;
3571 3700
3572 ret = size; 3701 ret = size;
3573 3702
@@ -3581,14 +3710,11 @@ static int perf_event_read_group(struct perf_event *event,
3581 size = n * sizeof(u64); 3710 size = n * sizeof(u64);
3582 3711
3583 if (copy_to_user(buf + ret, values, size)) { 3712 if (copy_to_user(buf + ret, values, size)) {
3584 ret = -EFAULT; 3713 return -EFAULT;
3585 goto unlock;
3586 } 3714 }
3587 3715
3588 ret += size; 3716 ret += size;
3589 } 3717 }
3590unlock:
3591 mutex_unlock(&ctx->mutex);
3592 3718
3593 return ret; 3719 return ret;
3594} 3720}
@@ -3660,8 +3786,14 @@ static ssize_t
3660perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 3786perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3661{ 3787{
3662 struct perf_event *event = file->private_data; 3788 struct perf_event *event = file->private_data;
3789 struct perf_event_context *ctx;
3790 int ret;
3663 3791
3664 return perf_read_hw(event, buf, count); 3792 ctx = perf_event_ctx_lock(event);
3793 ret = perf_read_hw(event, buf, count);
3794 perf_event_ctx_unlock(event, ctx);
3795
3796 return ret;
3665} 3797}
3666 3798
3667static unsigned int perf_poll(struct file *file, poll_table *wait) 3799static unsigned int perf_poll(struct file *file, poll_table *wait)
@@ -3687,7 +3819,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
3687 return events; 3819 return events;
3688} 3820}
3689 3821
3690static void perf_event_reset(struct perf_event *event) 3822static void _perf_event_reset(struct perf_event *event)
3691{ 3823{
3692 (void)perf_event_read(event); 3824 (void)perf_event_read(event);
3693 local64_set(&event->count, 0); 3825 local64_set(&event->count, 0);
@@ -3706,6 +3838,7 @@ static void perf_event_for_each_child(struct perf_event *event,
3706 struct perf_event *child; 3838 struct perf_event *child;
3707 3839
3708 WARN_ON_ONCE(event->ctx->parent_ctx); 3840 WARN_ON_ONCE(event->ctx->parent_ctx);
3841
3709 mutex_lock(&event->child_mutex); 3842 mutex_lock(&event->child_mutex);
3710 func(event); 3843 func(event);
3711 list_for_each_entry(child, &event->child_list, child_list) 3844 list_for_each_entry(child, &event->child_list, child_list)
@@ -3719,14 +3852,13 @@ static void perf_event_for_each(struct perf_event *event,
3719 struct perf_event_context *ctx = event->ctx; 3852 struct perf_event_context *ctx = event->ctx;
3720 struct perf_event *sibling; 3853 struct perf_event *sibling;
3721 3854
3722 WARN_ON_ONCE(ctx->parent_ctx); 3855 lockdep_assert_held(&ctx->mutex);
3723 mutex_lock(&ctx->mutex); 3856
3724 event = event->group_leader; 3857 event = event->group_leader;
3725 3858
3726 perf_event_for_each_child(event, func); 3859 perf_event_for_each_child(event, func);
3727 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3860 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3728 perf_event_for_each_child(sibling, func); 3861 perf_event_for_each_child(sibling, func);
3729 mutex_unlock(&ctx->mutex);
3730} 3862}
3731 3863
3732static int perf_event_period(struct perf_event *event, u64 __user *arg) 3864static int perf_event_period(struct perf_event *event, u64 __user *arg)
@@ -3796,25 +3928,24 @@ static int perf_event_set_output(struct perf_event *event,
3796 struct perf_event *output_event); 3928 struct perf_event *output_event);
3797static int perf_event_set_filter(struct perf_event *event, void __user *arg); 3929static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3798 3930
3799static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3931static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
3800{ 3932{
3801 struct perf_event *event = file->private_data;
3802 void (*func)(struct perf_event *); 3933 void (*func)(struct perf_event *);
3803 u32 flags = arg; 3934 u32 flags = arg;
3804 3935
3805 switch (cmd) { 3936 switch (cmd) {
3806 case PERF_EVENT_IOC_ENABLE: 3937 case PERF_EVENT_IOC_ENABLE:
3807 func = perf_event_enable; 3938 func = _perf_event_enable;
3808 break; 3939 break;
3809 case PERF_EVENT_IOC_DISABLE: 3940 case PERF_EVENT_IOC_DISABLE:
3810 func = perf_event_disable; 3941 func = _perf_event_disable;
3811 break; 3942 break;
3812 case PERF_EVENT_IOC_RESET: 3943 case PERF_EVENT_IOC_RESET:
3813 func = perf_event_reset; 3944 func = _perf_event_reset;
3814 break; 3945 break;
3815 3946
3816 case PERF_EVENT_IOC_REFRESH: 3947 case PERF_EVENT_IOC_REFRESH:
3817 return perf_event_refresh(event, arg); 3948 return _perf_event_refresh(event, arg);
3818 3949
3819 case PERF_EVENT_IOC_PERIOD: 3950 case PERF_EVENT_IOC_PERIOD:
3820 return perf_event_period(event, (u64 __user *)arg); 3951 return perf_event_period(event, (u64 __user *)arg);
@@ -3861,6 +3992,19 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3861 return 0; 3992 return 0;
3862} 3993}
3863 3994
3995static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3996{
3997 struct perf_event *event = file->private_data;
3998 struct perf_event_context *ctx;
3999 long ret;
4000
4001 ctx = perf_event_ctx_lock(event);
4002 ret = _perf_ioctl(event, cmd, arg);
4003 perf_event_ctx_unlock(event, ctx);
4004
4005 return ret;
4006}
4007
3864#ifdef CONFIG_COMPAT 4008#ifdef CONFIG_COMPAT
3865static long perf_compat_ioctl(struct file *file, unsigned int cmd, 4009static long perf_compat_ioctl(struct file *file, unsigned int cmd,
3866 unsigned long arg) 4010 unsigned long arg)
@@ -3883,11 +4027,15 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd,
3883 4027
3884int perf_event_task_enable(void) 4028int perf_event_task_enable(void)
3885{ 4029{
4030 struct perf_event_context *ctx;
3886 struct perf_event *event; 4031 struct perf_event *event;
3887 4032
3888 mutex_lock(&current->perf_event_mutex); 4033 mutex_lock(&current->perf_event_mutex);
3889 list_for_each_entry(event, &current->perf_event_list, owner_entry) 4034 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
3890 perf_event_for_each_child(event, perf_event_enable); 4035 ctx = perf_event_ctx_lock(event);
4036 perf_event_for_each_child(event, _perf_event_enable);
4037 perf_event_ctx_unlock(event, ctx);
4038 }
3891 mutex_unlock(&current->perf_event_mutex); 4039 mutex_unlock(&current->perf_event_mutex);
3892 4040
3893 return 0; 4041 return 0;
@@ -3895,11 +4043,15 @@ int perf_event_task_enable(void)
3895 4043
3896int perf_event_task_disable(void) 4044int perf_event_task_disable(void)
3897{ 4045{
4046 struct perf_event_context *ctx;
3898 struct perf_event *event; 4047 struct perf_event *event;
3899 4048
3900 mutex_lock(&current->perf_event_mutex); 4049 mutex_lock(&current->perf_event_mutex);
3901 list_for_each_entry(event, &current->perf_event_list, owner_entry) 4050 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
3902 perf_event_for_each_child(event, perf_event_disable); 4051 ctx = perf_event_ctx_lock(event);
4052 perf_event_for_each_child(event, _perf_event_disable);
4053 perf_event_ctx_unlock(event, ctx);
4054 }
3903 mutex_unlock(&current->perf_event_mutex); 4055 mutex_unlock(&current->perf_event_mutex);
3904 4056
3905 return 0; 4057 return 0;
@@ -5889,6 +6041,8 @@ end:
5889 rcu_read_unlock(); 6041 rcu_read_unlock();
5890} 6042}
5891 6043
6044DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
6045
5892int perf_swevent_get_recursion_context(void) 6046int perf_swevent_get_recursion_context(void)
5893{ 6047{
5894 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); 6048 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
@@ -5904,21 +6058,30 @@ inline void perf_swevent_put_recursion_context(int rctx)
5904 put_recursion_context(swhash->recursion, rctx); 6058 put_recursion_context(swhash->recursion, rctx);
5905} 6059}
5906 6060
5907void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 6061void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
5908{ 6062{
5909 struct perf_sample_data data; 6063 struct perf_sample_data data;
5910 int rctx;
5911 6064
5912 preempt_disable_notrace(); 6065 if (WARN_ON_ONCE(!regs))
5913 rctx = perf_swevent_get_recursion_context();
5914 if (rctx < 0)
5915 return; 6066 return;
5916 6067
5917 perf_sample_data_init(&data, addr, 0); 6068 perf_sample_data_init(&data, addr, 0);
5918
5919 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 6069 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
6070}
6071
6072void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
6073{
6074 int rctx;
6075
6076 preempt_disable_notrace();
6077 rctx = perf_swevent_get_recursion_context();
6078 if (unlikely(rctx < 0))
6079 goto fail;
6080
6081 ___perf_sw_event(event_id, nr, regs, addr);
5920 6082
5921 perf_swevent_put_recursion_context(rctx); 6083 perf_swevent_put_recursion_context(rctx);
6084fail:
5922 preempt_enable_notrace(); 6085 preempt_enable_notrace();
5923} 6086}
5924 6087
@@ -6780,7 +6943,6 @@ skip_type:
6780 6943
6781 __perf_cpu_hrtimer_init(cpuctx, cpu); 6944 __perf_cpu_hrtimer_init(cpuctx, cpu);
6782 6945
6783 INIT_LIST_HEAD(&cpuctx->rotation_list);
6784 cpuctx->unique_pmu = pmu; 6946 cpuctx->unique_pmu = pmu;
6785 } 6947 }
6786 6948
@@ -6853,6 +7015,20 @@ void perf_pmu_unregister(struct pmu *pmu)
6853} 7015}
6854EXPORT_SYMBOL_GPL(perf_pmu_unregister); 7016EXPORT_SYMBOL_GPL(perf_pmu_unregister);
6855 7017
7018static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
7019{
7020 int ret;
7021
7022 if (!try_module_get(pmu->module))
7023 return -ENODEV;
7024 event->pmu = pmu;
7025 ret = pmu->event_init(event);
7026 if (ret)
7027 module_put(pmu->module);
7028
7029 return ret;
7030}
7031
6856struct pmu *perf_init_event(struct perf_event *event) 7032struct pmu *perf_init_event(struct perf_event *event)
6857{ 7033{
6858 struct pmu *pmu = NULL; 7034 struct pmu *pmu = NULL;
@@ -6865,24 +7041,14 @@ struct pmu *perf_init_event(struct perf_event *event)
6865 pmu = idr_find(&pmu_idr, event->attr.type); 7041 pmu = idr_find(&pmu_idr, event->attr.type);
6866 rcu_read_unlock(); 7042 rcu_read_unlock();
6867 if (pmu) { 7043 if (pmu) {
6868 if (!try_module_get(pmu->module)) { 7044 ret = perf_try_init_event(pmu, event);
6869 pmu = ERR_PTR(-ENODEV);
6870 goto unlock;
6871 }
6872 event->pmu = pmu;
6873 ret = pmu->event_init(event);
6874 if (ret) 7045 if (ret)
6875 pmu = ERR_PTR(ret); 7046 pmu = ERR_PTR(ret);
6876 goto unlock; 7047 goto unlock;
6877 } 7048 }
6878 7049
6879 list_for_each_entry_rcu(pmu, &pmus, entry) { 7050 list_for_each_entry_rcu(pmu, &pmus, entry) {
6880 if (!try_module_get(pmu->module)) { 7051 ret = perf_try_init_event(pmu, event);
6881 pmu = ERR_PTR(-ENODEV);
6882 goto unlock;
6883 }
6884 event->pmu = pmu;
6885 ret = pmu->event_init(event);
6886 if (!ret) 7052 if (!ret)
6887 goto unlock; 7053 goto unlock;
6888 7054
@@ -7246,6 +7412,15 @@ out:
7246 return ret; 7412 return ret;
7247} 7413}
7248 7414
7415static void mutex_lock_double(struct mutex *a, struct mutex *b)
7416{
7417 if (b < a)
7418 swap(a, b);
7419
7420 mutex_lock(a);
7421 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
7422}
7423
7249/** 7424/**
7250 * sys_perf_event_open - open a performance event, associate it to a task/cpu 7425 * sys_perf_event_open - open a performance event, associate it to a task/cpu
7251 * 7426 *
@@ -7261,7 +7436,7 @@ SYSCALL_DEFINE5(perf_event_open,
7261 struct perf_event *group_leader = NULL, *output_event = NULL; 7436 struct perf_event *group_leader = NULL, *output_event = NULL;
7262 struct perf_event *event, *sibling; 7437 struct perf_event *event, *sibling;
7263 struct perf_event_attr attr; 7438 struct perf_event_attr attr;
7264 struct perf_event_context *ctx; 7439 struct perf_event_context *ctx, *uninitialized_var(gctx);
7265 struct file *event_file = NULL; 7440 struct file *event_file = NULL;
7266 struct fd group = {NULL, 0}; 7441 struct fd group = {NULL, 0};
7267 struct task_struct *task = NULL; 7442 struct task_struct *task = NULL;
@@ -7459,43 +7634,68 @@ SYSCALL_DEFINE5(perf_event_open,
7459 } 7634 }
7460 7635
7461 if (move_group) { 7636 if (move_group) {
7462 struct perf_event_context *gctx = group_leader->ctx; 7637 gctx = group_leader->ctx;
7463
7464 mutex_lock(&gctx->mutex);
7465 perf_remove_from_context(group_leader, false);
7466 7638
7467 /* 7639 /*
7468 * Removing from the context ends up with disabled 7640 * See perf_event_ctx_lock() for comments on the details
7469 * event. What we want here is event in the initial 7641 * of swizzling perf_event::ctx.
7470 * startup state, ready to be add into new context.
7471 */ 7642 */
7472 perf_event__state_init(group_leader); 7643 mutex_lock_double(&gctx->mutex, &ctx->mutex);
7644
7645 perf_remove_from_context(group_leader, false);
7646
7473 list_for_each_entry(sibling, &group_leader->sibling_list, 7647 list_for_each_entry(sibling, &group_leader->sibling_list,
7474 group_entry) { 7648 group_entry) {
7475 perf_remove_from_context(sibling, false); 7649 perf_remove_from_context(sibling, false);
7476 perf_event__state_init(sibling);
7477 put_ctx(gctx); 7650 put_ctx(gctx);
7478 } 7651 }
7479 mutex_unlock(&gctx->mutex); 7652 } else {
7480 put_ctx(gctx); 7653 mutex_lock(&ctx->mutex);
7481 } 7654 }
7482 7655
7483 WARN_ON_ONCE(ctx->parent_ctx); 7656 WARN_ON_ONCE(ctx->parent_ctx);
7484 mutex_lock(&ctx->mutex);
7485 7657
7486 if (move_group) { 7658 if (move_group) {
7659 /*
7660 * Wait for everybody to stop referencing the events through
7661 * the old lists, before installing it on new lists.
7662 */
7487 synchronize_rcu(); 7663 synchronize_rcu();
7488 perf_install_in_context(ctx, group_leader, group_leader->cpu); 7664
7489 get_ctx(ctx); 7665 /*
7666 * Install the group siblings before the group leader.
7667 *
7668 * Because a group leader will try and install the entire group
7669 * (through the sibling list, which is still in-tact), we can
7670 * end up with siblings installed in the wrong context.
7671 *
7672 * By installing siblings first we NO-OP because they're not
7673 * reachable through the group lists.
7674 */
7490 list_for_each_entry(sibling, &group_leader->sibling_list, 7675 list_for_each_entry(sibling, &group_leader->sibling_list,
7491 group_entry) { 7676 group_entry) {
7677 perf_event__state_init(sibling);
7492 perf_install_in_context(ctx, sibling, sibling->cpu); 7678 perf_install_in_context(ctx, sibling, sibling->cpu);
7493 get_ctx(ctx); 7679 get_ctx(ctx);
7494 } 7680 }
7681
7682 /*
7683 * Removing from the context ends up with disabled
7684 * event. What we want here is event in the initial
7685 * startup state, ready to be add into new context.
7686 */
7687 perf_event__state_init(group_leader);
7688 perf_install_in_context(ctx, group_leader, group_leader->cpu);
7689 get_ctx(ctx);
7495 } 7690 }
7496 7691
7497 perf_install_in_context(ctx, event, event->cpu); 7692 perf_install_in_context(ctx, event, event->cpu);
7498 perf_unpin_context(ctx); 7693 perf_unpin_context(ctx);
7694
7695 if (move_group) {
7696 mutex_unlock(&gctx->mutex);
7697 put_ctx(gctx);
7698 }
7499 mutex_unlock(&ctx->mutex); 7699 mutex_unlock(&ctx->mutex);
7500 7700
7501 put_online_cpus(); 7701 put_online_cpus();
@@ -7603,7 +7803,11 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7603 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; 7803 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
7604 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; 7804 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
7605 7805
7606 mutex_lock(&src_ctx->mutex); 7806 /*
7807 * See perf_event_ctx_lock() for comments on the details
7808 * of swizzling perf_event::ctx.
7809 */
7810 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
7607 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 7811 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7608 event_entry) { 7812 event_entry) {
7609 perf_remove_from_context(event, false); 7813 perf_remove_from_context(event, false);
@@ -7611,11 +7815,36 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7611 put_ctx(src_ctx); 7815 put_ctx(src_ctx);
7612 list_add(&event->migrate_entry, &events); 7816 list_add(&event->migrate_entry, &events);
7613 } 7817 }
7614 mutex_unlock(&src_ctx->mutex);
7615 7818
7819 /*
7820 * Wait for the events to quiesce before re-instating them.
7821 */
7616 synchronize_rcu(); 7822 synchronize_rcu();
7617 7823
7618 mutex_lock(&dst_ctx->mutex); 7824 /*
7825 * Re-instate events in 2 passes.
7826 *
7827 * Skip over group leaders and only install siblings on this first
7828 * pass, siblings will not get enabled without a leader, however a
7829 * leader will enable its siblings, even if those are still on the old
7830 * context.
7831 */
7832 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7833 if (event->group_leader == event)
7834 continue;
7835
7836 list_del(&event->migrate_entry);
7837 if (event->state >= PERF_EVENT_STATE_OFF)
7838 event->state = PERF_EVENT_STATE_INACTIVE;
7839 account_event_cpu(event, dst_cpu);
7840 perf_install_in_context(dst_ctx, event, dst_cpu);
7841 get_ctx(dst_ctx);
7842 }
7843
7844 /*
7845 * Once all the siblings are setup properly, install the group leaders
7846 * to make it go.
7847 */
7619 list_for_each_entry_safe(event, tmp, &events, migrate_entry) { 7848 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7620 list_del(&event->migrate_entry); 7849 list_del(&event->migrate_entry);
7621 if (event->state >= PERF_EVENT_STATE_OFF) 7850 if (event->state >= PERF_EVENT_STATE_OFF)
@@ -7625,6 +7854,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7625 get_ctx(dst_ctx); 7854 get_ctx(dst_ctx);
7626 } 7855 }
7627 mutex_unlock(&dst_ctx->mutex); 7856 mutex_unlock(&dst_ctx->mutex);
7857 mutex_unlock(&src_ctx->mutex);
7628} 7858}
7629EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); 7859EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
7630 7860
@@ -7811,14 +8041,19 @@ static void perf_free_event(struct perf_event *event,
7811 8041
7812 put_event(parent); 8042 put_event(parent);
7813 8043
8044 raw_spin_lock_irq(&ctx->lock);
7814 perf_group_detach(event); 8045 perf_group_detach(event);
7815 list_del_event(event, ctx); 8046 list_del_event(event, ctx);
8047 raw_spin_unlock_irq(&ctx->lock);
7816 free_event(event); 8048 free_event(event);
7817} 8049}
7818 8050
7819/* 8051/*
7820 * free an unexposed, unused context as created by inheritance by 8052 * Free an unexposed, unused context as created by inheritance by
7821 * perf_event_init_task below, used by fork() in case of fail. 8053 * perf_event_init_task below, used by fork() in case of fail.
8054 *
8055 * Not all locks are strictly required, but take them anyway to be nice and
8056 * help out with the lockdep assertions.
7822 */ 8057 */
7823void perf_event_free_task(struct task_struct *task) 8058void perf_event_free_task(struct task_struct *task)
7824{ 8059{
@@ -8137,7 +8372,7 @@ static void __init perf_event_init_all_cpus(void)
8137 for_each_possible_cpu(cpu) { 8372 for_each_possible_cpu(cpu) {
8138 swhash = &per_cpu(swevent_htable, cpu); 8373 swhash = &per_cpu(swevent_htable, cpu);
8139 mutex_init(&swhash->hlist_mutex); 8374 mutex_init(&swhash->hlist_mutex);
8140 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); 8375 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
8141 } 8376 }
8142} 8377}
8143 8378
@@ -8158,22 +8393,11 @@ static void perf_event_init_cpu(int cpu)
8158} 8393}
8159 8394
8160#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC 8395#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
8161static void perf_pmu_rotate_stop(struct pmu *pmu)
8162{
8163 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
8164
8165 WARN_ON(!irqs_disabled());
8166
8167 list_del_init(&cpuctx->rotation_list);
8168}
8169
8170static void __perf_event_exit_context(void *__info) 8396static void __perf_event_exit_context(void *__info)
8171{ 8397{
8172 struct remove_event re = { .detach_group = true }; 8398 struct remove_event re = { .detach_group = true };
8173 struct perf_event_context *ctx = __info; 8399 struct perf_event_context *ctx = __info;
8174 8400
8175 perf_pmu_rotate_stop(ctx->pmu);
8176
8177 rcu_read_lock(); 8401 rcu_read_lock();
8178 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) 8402 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
8179 __perf_remove_from_context(&re); 8403 __perf_remove_from_context(&re);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 146a5792b1d2..eadb95ce7aac 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -13,12 +13,13 @@
13#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/circ_buf.h> 15#include <linux/circ_buf.h>
16#include <linux/poll.h>
16 17
17#include "internal.h" 18#include "internal.h"
18 19
19static void perf_output_wakeup(struct perf_output_handle *handle) 20static void perf_output_wakeup(struct perf_output_handle *handle)
20{ 21{
21 atomic_set(&handle->rb->poll, POLL_IN); 22 atomic_set(&handle->rb->poll, POLLIN);
22 23
23 handle->event->pending_wakeup = 1; 24 handle->event->pending_wakeup = 1;
24 irq_work_queue(&handle->event->pending); 25 irq_work_queue(&handle->event->pending);
diff --git a/kernel/futex.c b/kernel/futex.c
index 63678b573d61..4eeb63de7e54 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2258,7 +2258,7 @@ static long futex_wait_restart(struct restart_block *restart)
2258 * if there are waiters then it will block, it does PI, etc. (Due to 2258 * if there are waiters then it will block, it does PI, etc. (Due to
2259 * races the kernel might see a 0 value of the futex too.) 2259 * races the kernel might see a 0 value of the futex too.)
2260 */ 2260 */
2261static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect, 2261static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2262 ktime_t *time, int trylock) 2262 ktime_t *time, int trylock)
2263{ 2263{
2264 struct hrtimer_sleeper timeout, *to = NULL; 2264 struct hrtimer_sleeper timeout, *to = NULL;
@@ -2953,11 +2953,11 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2953 case FUTEX_WAKE_OP: 2953 case FUTEX_WAKE_OP:
2954 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); 2954 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2955 case FUTEX_LOCK_PI: 2955 case FUTEX_LOCK_PI:
2956 return futex_lock_pi(uaddr, flags, val, timeout, 0); 2956 return futex_lock_pi(uaddr, flags, timeout, 0);
2957 case FUTEX_UNLOCK_PI: 2957 case FUTEX_UNLOCK_PI:
2958 return futex_unlock_pi(uaddr, flags); 2958 return futex_unlock_pi(uaddr, flags);
2959 case FUTEX_TRYLOCK_PI: 2959 case FUTEX_TRYLOCK_PI:
2960 return futex_lock_pi(uaddr, flags, 0, timeout, 1); 2960 return futex_lock_pi(uaddr, flags, NULL, 1);
2961 case FUTEX_WAIT_REQUEUE_PI: 2961 case FUTEX_WAIT_REQUEUE_PI:
2962 val3 = FUTEX_BITSET_MATCH_ANY; 2962 val3 = FUTEX_BITSET_MATCH_ANY;
2963 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, 2963 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 8541bfdfd232..4ca8eb151975 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -1,5 +1,5 @@
1 1
2obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o 2obj-y += mutex.o semaphore.o rwsem.o
3 3
4ifdef CONFIG_FUNCTION_TRACER 4ifdef CONFIG_FUNCTION_TRACER
5CFLAGS_REMOVE_lockdep.o = -pg 5CFLAGS_REMOVE_lockdep.o = -pg
@@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y)
14obj-$(CONFIG_LOCKDEP) += lockdep_proc.o 14obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
15endif 15endif
16obj-$(CONFIG_SMP) += spinlock.o 16obj-$(CONFIG_SMP) += spinlock.o
17obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
17obj-$(CONFIG_SMP) += lglock.o 18obj-$(CONFIG_SMP) += lglock.o
18obj-$(CONFIG_PROVE_LOCKING) += spinlock.o 19obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
19obj-$(CONFIG_RT_MUTEXES) += rtmutex.o 20obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 4d60986fcbee..d1fe2ba5bac9 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -108,20 +108,4 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
108 arch_mcs_spin_unlock_contended(&next->locked); 108 arch_mcs_spin_unlock_contended(&next->locked);
109} 109}
110 110
111/*
112 * Cancellable version of the MCS lock above.
113 *
114 * Intended for adaptive spinning of sleeping locks:
115 * mutex_lock()/rwsem_down_{read,write}() etc.
116 */
117
118struct optimistic_spin_node {
119 struct optimistic_spin_node *next, *prev;
120 int locked; /* 1 if lock acquired */
121 int cpu; /* encoded CPU # value */
122};
123
124extern bool osq_lock(struct optimistic_spin_queue *lock);
125extern void osq_unlock(struct optimistic_spin_queue *lock);
126
127#endif /* __LINUX_MCS_SPINLOCK_H */ 111#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 454195194d4a..94674e5919cb 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -81,7 +81,7 @@ __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
81 * The mutex must later on be released by the same task that 81 * The mutex must later on be released by the same task that
82 * acquired it. Recursive locking is not allowed. The task 82 * acquired it. Recursive locking is not allowed. The task
83 * may not exit without first unlocking the mutex. Also, kernel 83 * may not exit without first unlocking the mutex. Also, kernel
84 * memory where the mutex resides mutex must not be freed with 84 * memory where the mutex resides must not be freed with
85 * the mutex still locked. The mutex must first be initialized 85 * the mutex still locked. The mutex must first be initialized
86 * (or statically defined) before it can be locked. memset()-ing 86 * (or statically defined) before it can be locked. memset()-ing
87 * the mutex to 0 is not allowed. 87 * the mutex to 0 is not allowed.
@@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
147} 147}
148 148
149/* 149/*
150 * after acquiring lock with fastpath or when we lost out in contested 150 * After acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck. 151 * slowpath, set ctx and wake up any waiters so they can recheck.
152 * 152 *
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, 153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
@@ -191,19 +191,32 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
191 spin_unlock_mutex(&lock->base.wait_lock, flags); 191 spin_unlock_mutex(&lock->base.wait_lock, flags);
192} 192}
193 193
194
195#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
196/* 194/*
197 * In order to avoid a stampede of mutex spinners from acquiring the mutex 195 * After acquiring lock in the slowpath set ctx and wake up any
198 * more or less simultaneously, the spinners need to acquire a MCS lock 196 * waiters so they can recheck.
199 * first before spinning on the owner field.
200 * 197 *
198 * Callers must hold the mutex wait_lock.
201 */ 199 */
200static __always_inline void
201ww_mutex_set_context_slowpath(struct ww_mutex *lock,
202 struct ww_acquire_ctx *ctx)
203{
204 struct mutex_waiter *cur;
202 205
203/* 206 ww_mutex_lock_acquired(lock, ctx);
204 * Mutex spinning code migrated from kernel/sched/core.c 207 lock->ctx = ctx;
205 */ 208
209 /*
210 * Give any possible sleeping processes the chance to wake up,
211 * so they can recheck if they have to back off.
212 */
213 list_for_each_entry(cur, &lock->base.wait_list, list) {
214 debug_mutex_wake_waiter(&lock->base, cur);
215 wake_up_process(cur->task);
216 }
217}
206 218
219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
207static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 220static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
208{ 221{
209 if (lock->owner != owner) 222 if (lock->owner != owner)
@@ -307,6 +320,11 @@ static bool mutex_optimistic_spin(struct mutex *lock,
307 if (!mutex_can_spin_on_owner(lock)) 320 if (!mutex_can_spin_on_owner(lock))
308 goto done; 321 goto done;
309 322
323 /*
324 * In order to avoid a stampede of mutex spinners trying to
325 * acquire the mutex all at once, the spinners need to take a
326 * MCS (queued) lock first before spinning on the owner field.
327 */
310 if (!osq_lock(&lock->osq)) 328 if (!osq_lock(&lock->osq))
311 goto done; 329 goto done;
312 330
@@ -469,7 +487,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
469EXPORT_SYMBOL(ww_mutex_unlock); 487EXPORT_SYMBOL(ww_mutex_unlock);
470 488
471static inline int __sched 489static inline int __sched
472__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 490__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
473{ 491{
474 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 492 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
475 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); 493 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
@@ -557,7 +575,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
557 } 575 }
558 576
559 if (use_ww_ctx && ww_ctx->acquired > 0) { 577 if (use_ww_ctx && ww_ctx->acquired > 0) {
560 ret = __mutex_lock_check_stamp(lock, ww_ctx); 578 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
561 if (ret) 579 if (ret)
562 goto err; 580 goto err;
563 } 581 }
@@ -569,6 +587,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
569 schedule_preempt_disabled(); 587 schedule_preempt_disabled();
570 spin_lock_mutex(&lock->wait_lock, flags); 588 spin_lock_mutex(&lock->wait_lock, flags);
571 } 589 }
590 __set_task_state(task, TASK_RUNNING);
591
572 mutex_remove_waiter(lock, &waiter, current_thread_info()); 592 mutex_remove_waiter(lock, &waiter, current_thread_info());
573 /* set it to 0 if there are no waiters left: */ 593 /* set it to 0 if there are no waiters left: */
574 if (likely(list_empty(&lock->wait_list))) 594 if (likely(list_empty(&lock->wait_list)))
@@ -582,23 +602,7 @@ skip_wait:
582 602
583 if (use_ww_ctx) { 603 if (use_ww_ctx) {
584 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 604 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
585 struct mutex_waiter *cur; 605 ww_mutex_set_context_slowpath(ww, ww_ctx);
586
587 /*
588 * This branch gets optimized out for the common case,
589 * and is only important for ww_mutex_lock.
590 */
591 ww_mutex_lock_acquired(ww, ww_ctx);
592 ww->ctx = ww_ctx;
593
594 /*
595 * Give any possible sleeping processes the chance to wake up,
596 * so they can recheck if they have to back off.
597 */
598 list_for_each_entry(cur, &lock->wait_list, list) {
599 debug_mutex_wake_waiter(lock, cur);
600 wake_up_process(cur->task);
601 }
602 } 606 }
603 607
604 spin_unlock_mutex(&lock->wait_lock, flags); 608 spin_unlock_mutex(&lock->wait_lock, flags);
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/osq_lock.c
index 9887a905a762..c112d00341b0 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/osq_lock.c
@@ -1,8 +1,6 @@
1#include <linux/percpu.h> 1#include <linux/percpu.h>
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include "mcs_spinlock.h" 3#include <linux/osq_lock.h>
4
5#ifdef CONFIG_SMP
6 4
7/* 5/*
8 * An MCS like lock especially tailored for optimistic spinning for sleeping 6 * An MCS like lock especially tailored for optimistic spinning for sleeping
@@ -111,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
111 * cmpxchg in an attempt to undo our queueing. 109 * cmpxchg in an attempt to undo our queueing.
112 */ 110 */
113 111
114 while (!smp_load_acquire(&node->locked)) { 112 while (!ACCESS_ONCE(node->locked)) {
115 /* 113 /*
116 * If we need to reschedule bail... so we can block. 114 * If we need to reschedule bail... so we can block.
117 */ 115 */
@@ -203,6 +201,3 @@ void osq_unlock(struct optimistic_spin_queue *lock)
203 if (next) 201 if (next)
204 ACCESS_ONCE(next->locked) = 1; 202 ACCESS_ONCE(next->locked) = 1;
205} 203}
206
207#endif
208
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 7c98873a3077..3059bc2f022d 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1130,6 +1130,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
1130 set_current_state(state); 1130 set_current_state(state);
1131 } 1131 }
1132 1132
1133 __set_current_state(TASK_RUNNING);
1133 return ret; 1134 return ret;
1134} 1135}
1135 1136
@@ -1188,10 +1189,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1188 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); 1189 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1189 1190
1190 if (likely(!ret)) 1191 if (likely(!ret))
1192 /* sleep on the mutex */
1191 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); 1193 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1192 1194
1193 set_current_state(TASK_RUNNING);
1194
1195 if (unlikely(ret)) { 1195 if (unlikely(ret)) {
1196 remove_waiter(lock, &waiter); 1196 remove_waiter(lock, &waiter);
1197 rt_mutex_handle_deadlock(ret, chwalk, &waiter); 1197 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
@@ -1626,10 +1626,9 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1626 1626
1627 set_current_state(TASK_INTERRUPTIBLE); 1627 set_current_state(TASK_INTERRUPTIBLE);
1628 1628
1629 /* sleep on the mutex */
1629 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); 1630 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1630 1631
1631 set_current_state(TASK_RUNNING);
1632
1633 if (unlikely(ret)) 1632 if (unlikely(ret))
1634 remove_waiter(lock, waiter); 1633 remove_waiter(lock, waiter);
1635 1634
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 2c93571162cb..2555ae15ec14 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -154,7 +154,7 @@ void __sched __down_read(struct rw_semaphore *sem)
154 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 154 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
155 } 155 }
156 156
157 tsk->state = TASK_RUNNING; 157 __set_task_state(tsk, TASK_RUNNING);
158 out: 158 out:
159 ; 159 ;
160} 160}
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 7628c3fc37ca..2f7cc4076f50 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -242,8 +242,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
242 schedule(); 242 schedule();
243 } 243 }
244 244
245 tsk->state = TASK_RUNNING; 245 __set_task_state(tsk, TASK_RUNNING);
246
247 return sem; 246 return sem;
248} 247}
249EXPORT_SYMBOL(rwsem_down_read_failed); 248EXPORT_SYMBOL(rwsem_down_read_failed);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4803da6eab62..ae9fc7cc360e 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -402,6 +402,7 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh,
402} 402}
403EXPORT_SYMBOL_GPL(raw_notifier_call_chain); 403EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
404 404
405#ifdef CONFIG_SRCU
405/* 406/*
406 * SRCU notifier chain routines. Registration and unregistration 407 * SRCU notifier chain routines. Registration and unregistration
407 * use a mutex, and call_chain is synchronized by SRCU (no locks). 408 * use a mutex, and call_chain is synchronized by SRCU (no locks).
@@ -528,6 +529,8 @@ void srcu_init_notifier_head(struct srcu_notifier_head *nh)
528} 529}
529EXPORT_SYMBOL_GPL(srcu_init_notifier_head); 530EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
530 531
532#endif /* CONFIG_SRCU */
533
531static ATOMIC_NOTIFIER_HEAD(die_chain); 534static ATOMIC_NOTIFIER_HEAD(die_chain);
532 535
533int notrace notify_die(enum die_val val, const char *str, 536int notrace notify_die(enum die_val val, const char *str,
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 48b28d387c7f..7e01f78f0417 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -251,6 +251,7 @@ config APM_EMULATION
251 251
252config PM_OPP 252config PM_OPP
253 bool 253 bool
254 select SRCU
254 ---help--- 255 ---help---
255 SOCs have a standard set of tuples consisting of frequency and 256 SOCs have a standard set of tuples consisting of frequency and
256 voltage pairs that the device will support per voltage domain. This 257 voltage pairs that the device will support per voltage domain. This
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 5f4c006c4b1e..97b0df71303e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -41,6 +41,8 @@
41#include <linux/platform_device.h> 41#include <linux/platform_device.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/kernel.h> 43#include <linux/kernel.h>
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
44 46
45#include <linux/uaccess.h> 47#include <linux/uaccess.h>
46#include <linux/export.h> 48#include <linux/export.h>
@@ -182,6 +184,81 @@ static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
182 c->target_value = value; 184 c->target_value = value;
183} 185}
184 186
187static inline int pm_qos_get_value(struct pm_qos_constraints *c);
188static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
189{
190 struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
191 struct pm_qos_constraints *c;
192 struct pm_qos_request *req;
193 char *type;
194 unsigned long flags;
195 int tot_reqs = 0;
196 int active_reqs = 0;
197
198 if (IS_ERR_OR_NULL(qos)) {
199 pr_err("%s: bad qos param!\n", __func__);
200 return -EINVAL;
201 }
202 c = qos->constraints;
203 if (IS_ERR_OR_NULL(c)) {
204 pr_err("%s: Bad constraints on qos?\n", __func__);
205 return -EINVAL;
206 }
207
208 /* Lock to ensure we have a snapshot */
209 spin_lock_irqsave(&pm_qos_lock, flags);
210 if (plist_head_empty(&c->list)) {
211 seq_puts(s, "Empty!\n");
212 goto out;
213 }
214
215 switch (c->type) {
216 case PM_QOS_MIN:
217 type = "Minimum";
218 break;
219 case PM_QOS_MAX:
220 type = "Maximum";
221 break;
222 case PM_QOS_SUM:
223 type = "Sum";
224 break;
225 default:
226 type = "Unknown";
227 }
228
229 plist_for_each_entry(req, &c->list, node) {
230 char *state = "Default";
231
232 if ((req->node).prio != c->default_value) {
233 active_reqs++;
234 state = "Active";
235 }
236 tot_reqs++;
237 seq_printf(s, "%d: %d: %s\n", tot_reqs,
238 (req->node).prio, state);
239 }
240
241 seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
242 type, pm_qos_get_value(c), active_reqs, tot_reqs);
243
244out:
245 spin_unlock_irqrestore(&pm_qos_lock, flags);
246 return 0;
247}
248
249static int pm_qos_dbg_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, pm_qos_dbg_show_requests,
252 inode->i_private);
253}
254
255static const struct file_operations pm_qos_debug_fops = {
256 .open = pm_qos_dbg_open,
257 .read = seq_read,
258 .llseek = seq_lseek,
259 .release = single_release,
260};
261
185/** 262/**
186 * pm_qos_update_target - manages the constraints list and calls the notifiers 263 * pm_qos_update_target - manages the constraints list and calls the notifiers
187 * if needed 264 * if needed
@@ -509,12 +586,17 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
509EXPORT_SYMBOL_GPL(pm_qos_remove_notifier); 586EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
510 587
511/* User space interface to PM QoS classes via misc devices */ 588/* User space interface to PM QoS classes via misc devices */
512static int register_pm_qos_misc(struct pm_qos_object *qos) 589static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
513{ 590{
514 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR; 591 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
515 qos->pm_qos_power_miscdev.name = qos->name; 592 qos->pm_qos_power_miscdev.name = qos->name;
516 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops; 593 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
517 594
595 if (d) {
596 (void)debugfs_create_file(qos->name, S_IRUGO, d,
597 (void *)qos, &pm_qos_debug_fops);
598 }
599
518 return misc_register(&qos->pm_qos_power_miscdev); 600 return misc_register(&qos->pm_qos_power_miscdev);
519} 601}
520 602
@@ -608,11 +690,16 @@ static int __init pm_qos_power_init(void)
608{ 690{
609 int ret = 0; 691 int ret = 0;
610 int i; 692 int i;
693 struct dentry *d;
611 694
612 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES); 695 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
613 696
697 d = debugfs_create_dir("pm_qos", NULL);
698 if (IS_ERR_OR_NULL(d))
699 d = NULL;
700
614 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) { 701 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
615 ret = register_pm_qos_misc(pm_qos_array[i]); 702 ret = register_pm_qos_misc(pm_qos_array[i], d);
616 if (ret < 0) { 703 if (ret < 0) {
617 printk(KERN_ERR "pm_qos_param: %s setup failed\n", 704 printk(KERN_ERR "pm_qos_param: %s setup failed\n",
618 pm_qos_array[i]->name); 705 pm_qos_array[i]->name);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0c40c16174b4..c24d5a23bf93 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1472,9 +1472,9 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1472/** 1472/**
1473 * free_unnecessary_pages - Release preallocated pages not needed for the image 1473 * free_unnecessary_pages - Release preallocated pages not needed for the image
1474 */ 1474 */
1475static void free_unnecessary_pages(void) 1475static unsigned long free_unnecessary_pages(void)
1476{ 1476{
1477 unsigned long save, to_free_normal, to_free_highmem; 1477 unsigned long save, to_free_normal, to_free_highmem, free;
1478 1478
1479 save = count_data_pages(); 1479 save = count_data_pages();
1480 if (alloc_normal >= save) { 1480 if (alloc_normal >= save) {
@@ -1495,6 +1495,7 @@ static void free_unnecessary_pages(void)
1495 else 1495 else
1496 to_free_normal = 0; 1496 to_free_normal = 0;
1497 } 1497 }
1498 free = to_free_normal + to_free_highmem;
1498 1499
1499 memory_bm_position_reset(&copy_bm); 1500 memory_bm_position_reset(&copy_bm);
1500 1501
@@ -1518,6 +1519,8 @@ static void free_unnecessary_pages(void)
1518 swsusp_unset_page_free(page); 1519 swsusp_unset_page_free(page);
1519 __free_page(page); 1520 __free_page(page);
1520 } 1521 }
1522
1523 return free;
1521} 1524}
1522 1525
1523/** 1526/**
@@ -1707,7 +1710,7 @@ int hibernate_preallocate_memory(void)
1707 * pages in memory, but we have allocated more. Release the excessive 1710 * pages in memory, but we have allocated more. Release the excessive
1708 * ones now. 1711 * ones now.
1709 */ 1712 */
1710 free_unnecessary_pages(); 1713 pages -= free_unnecessary_pages();
1711 1714
1712 out: 1715 out:
1713 stop = ktime_get(); 1716 stop = ktime_get();
@@ -2310,8 +2313,6 @@ static inline void free_highmem_data(void)
2310 free_image_page(buffer, PG_UNSAFE_CLEAR); 2313 free_image_page(buffer, PG_UNSAFE_CLEAR);
2311} 2314}
2312#else 2315#else
2313static inline int get_safe_write_buffer(void) { return 0; }
2314
2315static unsigned int 2316static unsigned int
2316count_highmem_image_pages(struct memory_bitmap *bm) { return 0; } 2317count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2317 2318
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index e6fae503d1bc..50a808424b06 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -1,4 +1,5 @@
1obj-y += update.o srcu.o 1obj-y += update.o
2obj-$(CONFIG_SRCU) += srcu.o
2obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o 3obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
3obj-$(CONFIG_TREE_RCU) += tree.o 4obj-$(CONFIG_TREE_RCU) += tree.o
4obj-$(CONFIG_PREEMPT_RCU) += tree.o 5obj-$(CONFIG_PREEMPT_RCU) += tree.o
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 07bb02eda844..80adef7d4c3d 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -137,4 +137,10 @@ int rcu_jiffies_till_stall_check(void);
137 137
138void rcu_early_boot_tests(void); 138void rcu_early_boot_tests(void);
139 139
140/*
141 * This function really isn't for public consumption, but RCU is special in
142 * that context switches can allow the state machine to make progress.
143 */
144extern void resched_cpu(int cpu);
145
140#endif /* __LINUX_RCU_H */ 146#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 4d559baf06e0..30d42aa55d83 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -244,7 +244,8 @@ struct rcu_torture_ops {
244 int (*readlock)(void); 244 int (*readlock)(void);
245 void (*read_delay)(struct torture_random_state *rrsp); 245 void (*read_delay)(struct torture_random_state *rrsp);
246 void (*readunlock)(int idx); 246 void (*readunlock)(int idx);
247 int (*completed)(void); 247 unsigned long (*started)(void);
248 unsigned long (*completed)(void);
248 void (*deferred_free)(struct rcu_torture *p); 249 void (*deferred_free)(struct rcu_torture *p);
249 void (*sync)(void); 250 void (*sync)(void);
250 void (*exp_sync)(void); 251 void (*exp_sync)(void);
@@ -296,11 +297,6 @@ static void rcu_torture_read_unlock(int idx) __releases(RCU)
296 rcu_read_unlock(); 297 rcu_read_unlock();
297} 298}
298 299
299static int rcu_torture_completed(void)
300{
301 return rcu_batches_completed();
302}
303
304/* 300/*
305 * Update callback in the pipe. This should be invoked after a grace period. 301 * Update callback in the pipe. This should be invoked after a grace period.
306 */ 302 */
@@ -356,7 +352,7 @@ rcu_torture_cb(struct rcu_head *p)
356 cur_ops->deferred_free(rp); 352 cur_ops->deferred_free(rp);
357} 353}
358 354
359static int rcu_no_completed(void) 355static unsigned long rcu_no_completed(void)
360{ 356{
361 return 0; 357 return 0;
362} 358}
@@ -377,7 +373,8 @@ static struct rcu_torture_ops rcu_ops = {
377 .readlock = rcu_torture_read_lock, 373 .readlock = rcu_torture_read_lock,
378 .read_delay = rcu_read_delay, 374 .read_delay = rcu_read_delay,
379 .readunlock = rcu_torture_read_unlock, 375 .readunlock = rcu_torture_read_unlock,
380 .completed = rcu_torture_completed, 376 .started = rcu_batches_started,
377 .completed = rcu_batches_completed,
381 .deferred_free = rcu_torture_deferred_free, 378 .deferred_free = rcu_torture_deferred_free,
382 .sync = synchronize_rcu, 379 .sync = synchronize_rcu,
383 .exp_sync = synchronize_rcu_expedited, 380 .exp_sync = synchronize_rcu_expedited,
@@ -407,11 +404,6 @@ static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
407 rcu_read_unlock_bh(); 404 rcu_read_unlock_bh();
408} 405}
409 406
410static int rcu_bh_torture_completed(void)
411{
412 return rcu_batches_completed_bh();
413}
414
415static void rcu_bh_torture_deferred_free(struct rcu_torture *p) 407static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
416{ 408{
417 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); 409 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
@@ -423,7 +415,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
423 .readlock = rcu_bh_torture_read_lock, 415 .readlock = rcu_bh_torture_read_lock,
424 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 416 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
425 .readunlock = rcu_bh_torture_read_unlock, 417 .readunlock = rcu_bh_torture_read_unlock,
426 .completed = rcu_bh_torture_completed, 418 .started = rcu_batches_started_bh,
419 .completed = rcu_batches_completed_bh,
427 .deferred_free = rcu_bh_torture_deferred_free, 420 .deferred_free = rcu_bh_torture_deferred_free,
428 .sync = synchronize_rcu_bh, 421 .sync = synchronize_rcu_bh,
429 .exp_sync = synchronize_rcu_bh_expedited, 422 .exp_sync = synchronize_rcu_bh_expedited,
@@ -466,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
466 .readlock = rcu_torture_read_lock, 459 .readlock = rcu_torture_read_lock,
467 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 460 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
468 .readunlock = rcu_torture_read_unlock, 461 .readunlock = rcu_torture_read_unlock,
462 .started = rcu_no_completed,
469 .completed = rcu_no_completed, 463 .completed = rcu_no_completed,
470 .deferred_free = rcu_busted_torture_deferred_free, 464 .deferred_free = rcu_busted_torture_deferred_free,
471 .sync = synchronize_rcu_busted, 465 .sync = synchronize_rcu_busted,
@@ -510,7 +504,7 @@ static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
510 srcu_read_unlock(&srcu_ctl, idx); 504 srcu_read_unlock(&srcu_ctl, idx);
511} 505}
512 506
513static int srcu_torture_completed(void) 507static unsigned long srcu_torture_completed(void)
514{ 508{
515 return srcu_batches_completed(&srcu_ctl); 509 return srcu_batches_completed(&srcu_ctl);
516} 510}
@@ -564,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = {
564 .readlock = srcu_torture_read_lock, 558 .readlock = srcu_torture_read_lock,
565 .read_delay = srcu_read_delay, 559 .read_delay = srcu_read_delay,
566 .readunlock = srcu_torture_read_unlock, 560 .readunlock = srcu_torture_read_unlock,
561 .started = NULL,
567 .completed = srcu_torture_completed, 562 .completed = srcu_torture_completed,
568 .deferred_free = srcu_torture_deferred_free, 563 .deferred_free = srcu_torture_deferred_free,
569 .sync = srcu_torture_synchronize, 564 .sync = srcu_torture_synchronize,
@@ -600,7 +595,8 @@ static struct rcu_torture_ops sched_ops = {
600 .readlock = sched_torture_read_lock, 595 .readlock = sched_torture_read_lock,
601 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 596 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
602 .readunlock = sched_torture_read_unlock, 597 .readunlock = sched_torture_read_unlock,
603 .completed = rcu_no_completed, 598 .started = rcu_batches_started_sched,
599 .completed = rcu_batches_completed_sched,
604 .deferred_free = rcu_sched_torture_deferred_free, 600 .deferred_free = rcu_sched_torture_deferred_free,
605 .sync = synchronize_sched, 601 .sync = synchronize_sched,
606 .exp_sync = synchronize_sched_expedited, 602 .exp_sync = synchronize_sched_expedited,
@@ -638,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = {
638 .readlock = tasks_torture_read_lock, 634 .readlock = tasks_torture_read_lock,
639 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 635 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
640 .readunlock = tasks_torture_read_unlock, 636 .readunlock = tasks_torture_read_unlock,
637 .started = rcu_no_completed,
641 .completed = rcu_no_completed, 638 .completed = rcu_no_completed,
642 .deferred_free = rcu_tasks_torture_deferred_free, 639 .deferred_free = rcu_tasks_torture_deferred_free,
643 .sync = synchronize_rcu_tasks, 640 .sync = synchronize_rcu_tasks,
@@ -1015,8 +1012,8 @@ static void rcutorture_trace_dump(void)
1015static void rcu_torture_timer(unsigned long unused) 1012static void rcu_torture_timer(unsigned long unused)
1016{ 1013{
1017 int idx; 1014 int idx;
1018 int completed; 1015 unsigned long started;
1019 int completed_end; 1016 unsigned long completed;
1020 static DEFINE_TORTURE_RANDOM(rand); 1017 static DEFINE_TORTURE_RANDOM(rand);
1021 static DEFINE_SPINLOCK(rand_lock); 1018 static DEFINE_SPINLOCK(rand_lock);
1022 struct rcu_torture *p; 1019 struct rcu_torture *p;
@@ -1024,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused)
1024 unsigned long long ts; 1021 unsigned long long ts;
1025 1022
1026 idx = cur_ops->readlock(); 1023 idx = cur_ops->readlock();
1027 completed = cur_ops->completed(); 1024 if (cur_ops->started)
1025 started = cur_ops->started();
1026 else
1027 started = cur_ops->completed();
1028 ts = rcu_trace_clock_local(); 1028 ts = rcu_trace_clock_local();
1029 p = rcu_dereference_check(rcu_torture_current, 1029 p = rcu_dereference_check(rcu_torture_current,
1030 rcu_read_lock_bh_held() || 1030 rcu_read_lock_bh_held() ||
@@ -1047,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused)
1047 /* Should not happen, but... */ 1047 /* Should not happen, but... */
1048 pipe_count = RCU_TORTURE_PIPE_LEN; 1048 pipe_count = RCU_TORTURE_PIPE_LEN;
1049 } 1049 }
1050 completed_end = cur_ops->completed(); 1050 completed = cur_ops->completed();
1051 if (pipe_count > 1) { 1051 if (pipe_count > 1) {
1052 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, 1052 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
1053 completed, completed_end); 1053 started, completed);
1054 rcutorture_trace_dump(); 1054 rcutorture_trace_dump();
1055 } 1055 }
1056 __this_cpu_inc(rcu_torture_count[pipe_count]); 1056 __this_cpu_inc(rcu_torture_count[pipe_count]);
1057 completed = completed_end - completed; 1057 completed = completed - started;
1058 if (cur_ops->started)
1059 completed++;
1058 if (completed > RCU_TORTURE_PIPE_LEN) { 1060 if (completed > RCU_TORTURE_PIPE_LEN) {
1059 /* Should not happen, but... */ 1061 /* Should not happen, but... */
1060 completed = RCU_TORTURE_PIPE_LEN; 1062 completed = RCU_TORTURE_PIPE_LEN;
@@ -1073,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused)
1073static int 1075static int
1074rcu_torture_reader(void *arg) 1076rcu_torture_reader(void *arg)
1075{ 1077{
1076 int completed; 1078 unsigned long started;
1077 int completed_end; 1079 unsigned long completed;
1078 int idx; 1080 int idx;
1079 DEFINE_TORTURE_RANDOM(rand); 1081 DEFINE_TORTURE_RANDOM(rand);
1080 struct rcu_torture *p; 1082 struct rcu_torture *p;
@@ -1093,7 +1095,10 @@ rcu_torture_reader(void *arg)
1093 mod_timer(&t, jiffies + 1); 1095 mod_timer(&t, jiffies + 1);
1094 } 1096 }
1095 idx = cur_ops->readlock(); 1097 idx = cur_ops->readlock();
1096 completed = cur_ops->completed(); 1098 if (cur_ops->started)
1099 started = cur_ops->started();
1100 else
1101 started = cur_ops->completed();
1097 ts = rcu_trace_clock_local(); 1102 ts = rcu_trace_clock_local();
1098 p = rcu_dereference_check(rcu_torture_current, 1103 p = rcu_dereference_check(rcu_torture_current,
1099 rcu_read_lock_bh_held() || 1104 rcu_read_lock_bh_held() ||
@@ -1114,14 +1119,16 @@ rcu_torture_reader(void *arg)
1114 /* Should not happen, but... */ 1119 /* Should not happen, but... */
1115 pipe_count = RCU_TORTURE_PIPE_LEN; 1120 pipe_count = RCU_TORTURE_PIPE_LEN;
1116 } 1121 }
1117 completed_end = cur_ops->completed(); 1122 completed = cur_ops->completed();
1118 if (pipe_count > 1) { 1123 if (pipe_count > 1) {
1119 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1124 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1120 ts, completed, completed_end); 1125 ts, started, completed);
1121 rcutorture_trace_dump(); 1126 rcutorture_trace_dump();
1122 } 1127 }
1123 __this_cpu_inc(rcu_torture_count[pipe_count]); 1128 __this_cpu_inc(rcu_torture_count[pipe_count]);
1124 completed = completed_end - completed; 1129 completed = completed - started;
1130 if (cur_ops->started)
1131 completed++;
1125 if (completed > RCU_TORTURE_PIPE_LEN) { 1132 if (completed > RCU_TORTURE_PIPE_LEN) {
1126 /* Should not happen, but... */ 1133 /* Should not happen, but... */
1127 completed = RCU_TORTURE_PIPE_LEN; 1134 completed = RCU_TORTURE_PIPE_LEN;
@@ -1420,6 +1427,9 @@ static int rcu_torture_barrier(void *arg)
1420 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 1427 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1421 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 1428 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1422 n_rcu_torture_barrier_error++; 1429 n_rcu_torture_barrier_error++;
1430 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1431 atomic_read(&barrier_cbs_invoked),
1432 n_barrier_cbs);
1423 WARN_ON_ONCE(1); 1433 WARN_ON_ONCE(1);
1424 } 1434 }
1425 n_barrier_successes++; 1435 n_barrier_successes++;
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index e037f3eb2f7b..445bf8ffe3fb 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(srcu_barrier);
546 * Report the number of batches, correlated with, but not necessarily 546 * Report the number of batches, correlated with, but not necessarily
547 * precisely the same as, the number of grace periods that have elapsed. 547 * precisely the same as, the number of grace periods that have elapsed.
548 */ 548 */
549long srcu_batches_completed(struct srcu_struct *sp) 549unsigned long srcu_batches_completed(struct srcu_struct *sp)
550{ 550{
551 return sp->completed; 551 return sp->completed;
552} 552}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 0db5649f8817..cc9ceca7bde1 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head,
47 void (*func)(struct rcu_head *rcu), 47 void (*func)(struct rcu_head *rcu),
48 struct rcu_ctrlblk *rcp); 48 struct rcu_ctrlblk *rcp);
49 49
50static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
51
52#include "tiny_plugin.h" 50#include "tiny_plugin.h"
53 51
54/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
55static void rcu_idle_enter_common(long long newval)
56{
57 if (newval) {
58 RCU_TRACE(trace_rcu_dyntick(TPS("--="),
59 rcu_dynticks_nesting, newval));
60 rcu_dynticks_nesting = newval;
61 return;
62 }
63 RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
64 rcu_dynticks_nesting, newval));
65 if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
66 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
67
68 RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
69 rcu_dynticks_nesting, newval));
70 ftrace_dump(DUMP_ALL);
71 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
72 current->pid, current->comm,
73 idle->pid, idle->comm); /* must be idle task! */
74 }
75 rcu_sched_qs(); /* implies rcu_bh_inc() */
76 barrier();
77 rcu_dynticks_nesting = newval;
78}
79
80/* 52/*
81 * Enter idle, which is an extended quiescent state if we have fully 53 * Enter idle, which is an extended quiescent state if we have fully
82 * entered that mode (i.e., if the new value of dynticks_nesting is zero). 54 * entered that mode.
83 */ 55 */
84void rcu_idle_enter(void) 56void rcu_idle_enter(void)
85{ 57{
86 unsigned long flags;
87 long long newval;
88
89 local_irq_save(flags);
90 WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
91 if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
92 DYNTICK_TASK_NEST_VALUE)
93 newval = 0;
94 else
95 newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
96 rcu_idle_enter_common(newval);
97 local_irq_restore(flags);
98} 58}
99EXPORT_SYMBOL_GPL(rcu_idle_enter); 59EXPORT_SYMBOL_GPL(rcu_idle_enter);
100 60
@@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
103 */ 63 */
104void rcu_irq_exit(void) 64void rcu_irq_exit(void)
105{ 65{
106 unsigned long flags;
107 long long newval;
108
109 local_irq_save(flags);
110 newval = rcu_dynticks_nesting - 1;
111 WARN_ON_ONCE(newval < 0);
112 rcu_idle_enter_common(newval);
113 local_irq_restore(flags);
114} 66}
115EXPORT_SYMBOL_GPL(rcu_irq_exit); 67EXPORT_SYMBOL_GPL(rcu_irq_exit);
116 68
117/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
118static void rcu_idle_exit_common(long long oldval)
119{
120 if (oldval) {
121 RCU_TRACE(trace_rcu_dyntick(TPS("++="),
122 oldval, rcu_dynticks_nesting));
123 return;
124 }
125 RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
126 if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
127 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
128
129 RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
130 oldval, rcu_dynticks_nesting));
131 ftrace_dump(DUMP_ALL);
132 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
133 current->pid, current->comm,
134 idle->pid, idle->comm); /* must be idle task! */
135 }
136}
137
138/* 69/*
139 * Exit idle, so that we are no longer in an extended quiescent state. 70 * Exit idle, so that we are no longer in an extended quiescent state.
140 */ 71 */
141void rcu_idle_exit(void) 72void rcu_idle_exit(void)
142{ 73{
143 unsigned long flags;
144 long long oldval;
145
146 local_irq_save(flags);
147 oldval = rcu_dynticks_nesting;
148 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
149 if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
150 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
151 else
152 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
153 rcu_idle_exit_common(oldval);
154 local_irq_restore(flags);
155} 74}
156EXPORT_SYMBOL_GPL(rcu_idle_exit); 75EXPORT_SYMBOL_GPL(rcu_idle_exit);
157 76
@@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
160 */ 79 */
161void rcu_irq_enter(void) 80void rcu_irq_enter(void)
162{ 81{
163 unsigned long flags;
164 long long oldval;
165
166 local_irq_save(flags);
167 oldval = rcu_dynticks_nesting;
168 rcu_dynticks_nesting++;
169 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
170 rcu_idle_exit_common(oldval);
171 local_irq_restore(flags);
172} 82}
173EXPORT_SYMBOL_GPL(rcu_irq_enter); 83EXPORT_SYMBOL_GPL(rcu_irq_enter);
174 84
@@ -179,23 +89,13 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
179 */ 89 */
180bool notrace __rcu_is_watching(void) 90bool notrace __rcu_is_watching(void)
181{ 91{
182 return rcu_dynticks_nesting; 92 return true;
183} 93}
184EXPORT_SYMBOL(__rcu_is_watching); 94EXPORT_SYMBOL(__rcu_is_watching);
185 95
186#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 96#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
187 97
188/* 98/*
189 * Test whether the current CPU was interrupted from idle. Nested
190 * interrupts don't count, we must be running at the first interrupt
191 * level.
192 */
193static int rcu_is_cpu_rrupt_from_idle(void)
194{
195 return rcu_dynticks_nesting <= 1;
196}
197
198/*
199 * Helper function for rcu_sched_qs() and rcu_bh_qs(). 99 * Helper function for rcu_sched_qs() and rcu_bh_qs().
200 * Also irqs are disabled to avoid confusion due to interrupt handlers 100 * Also irqs are disabled to avoid confusion due to interrupt handlers
201 * invoking call_rcu(). 101 * invoking call_rcu().
@@ -250,7 +150,7 @@ void rcu_bh_qs(void)
250void rcu_check_callbacks(int user) 150void rcu_check_callbacks(int user)
251{ 151{
252 RCU_TRACE(check_cpu_stalls()); 152 RCU_TRACE(check_cpu_stalls());
253 if (user || rcu_is_cpu_rrupt_from_idle()) 153 if (user)
254 rcu_sched_qs(); 154 rcu_sched_qs();
255 else if (!in_softirq()) 155 else if (!in_softirq())
256 rcu_bh_qs(); 156 rcu_bh_qs();
@@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head,
357 rcp->curtail = &head->next; 257 rcp->curtail = &head->next;
358 RCU_TRACE(rcp->qlen++); 258 RCU_TRACE(rcp->qlen++);
359 local_irq_restore(flags); 259 local_irq_restore(flags);
260
261 if (unlikely(is_idle_task(current))) {
262 /* force scheduling for rcu_sched_qs() */
263 resched_cpu(0);
264 }
360} 265}
361 266
362/* 267/*
@@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
383void __init rcu_init(void) 288void __init rcu_init(void)
384{ 289{
385 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 290 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
291 RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
292 RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
386 293
387 rcu_early_boot_tests(); 294 rcu_early_boot_tests();
388} 295}
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 858c56569127..f94e209a10d6 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -145,17 +145,16 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
145 rcp->ticks_this_gp++; 145 rcp->ticks_this_gp++;
146 j = jiffies; 146 j = jiffies;
147 js = ACCESS_ONCE(rcp->jiffies_stall); 147 js = ACCESS_ONCE(rcp->jiffies_stall);
148 if (*rcp->curtail && ULONG_CMP_GE(j, js)) { 148 if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
149 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", 149 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
150 rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting, 150 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
151 jiffies - rcp->gp_start, rcp->qlen); 151 jiffies - rcp->gp_start, rcp->qlen);
152 dump_stack(); 152 dump_stack();
153 }
154 if (*rcp->curtail && ULONG_CMP_GE(j, js))
155 ACCESS_ONCE(rcp->jiffies_stall) = jiffies + 153 ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
156 3 * rcu_jiffies_till_stall_check() + 3; 154 3 * rcu_jiffies_till_stall_check() + 3;
157 else if (ULONG_CMP_GE(j, js)) 155 } else if (ULONG_CMP_GE(j, js)) {
158 ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); 156 ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
157 }
159} 158}
160 159
161static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) 160static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7680fc275036..48d640ca1a05 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
156static void invoke_rcu_core(void); 156static void invoke_rcu_core(void);
157static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 157static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
158 158
159/* rcuc/rcub kthread realtime priority */
160static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
161module_param(kthread_prio, int, 0644);
162
159/* 163/*
160 * Track the rcutorture test sequence number and the update version 164 * Track the rcutorture test sequence number and the update version
161 * number within a given test. The rcutorture_testseq is incremented 165 * number within a given test. The rcutorture_testseq is incremented
@@ -215,6 +219,9 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
215#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 219#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
216}; 220};
217 221
222DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
223EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
224
218/* 225/*
219 * Let the RCU core know that this CPU has gone through the scheduler, 226 * Let the RCU core know that this CPU has gone through the scheduler,
220 * which is a quiescent state. This is called when the need for a 227 * which is a quiescent state. This is called when the need for a
@@ -284,6 +291,22 @@ void rcu_note_context_switch(void)
284} 291}
285EXPORT_SYMBOL_GPL(rcu_note_context_switch); 292EXPORT_SYMBOL_GPL(rcu_note_context_switch);
286 293
294/*
295 * Register a quiesecent state for all RCU flavors. If there is an
296 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
297 * dyntick-idle quiescent state visible to other CPUs (but only for those
298 * RCU flavors in desparate need of a quiescent state, which will normally
299 * be none of them). Either way, do a lightweight quiescent state for
300 * all RCU flavors.
301 */
302void rcu_all_qs(void)
303{
304 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
305 rcu_momentary_dyntick_idle();
306 this_cpu_inc(rcu_qs_ctr);
307}
308EXPORT_SYMBOL_GPL(rcu_all_qs);
309
287static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 310static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
288static long qhimark = 10000; /* If this many pending, ignore blimit. */ 311static long qhimark = 10000; /* If this many pending, ignore blimit. */
289static long qlowmark = 100; /* Once only this many pending, use blimit. */ 312static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -315,18 +338,54 @@ static void force_quiescent_state(struct rcu_state *rsp);
315static int rcu_pending(void); 338static int rcu_pending(void);
316 339
317/* 340/*
318 * Return the number of RCU-sched batches processed thus far for debug & stats. 341 * Return the number of RCU batches started thus far for debug & stats.
342 */
343unsigned long rcu_batches_started(void)
344{
345 return rcu_state_p->gpnum;
346}
347EXPORT_SYMBOL_GPL(rcu_batches_started);
348
349/*
350 * Return the number of RCU-sched batches started thus far for debug & stats.
351 */
352unsigned long rcu_batches_started_sched(void)
353{
354 return rcu_sched_state.gpnum;
355}
356EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
357
358/*
359 * Return the number of RCU BH batches started thus far for debug & stats.
319 */ 360 */
320long rcu_batches_completed_sched(void) 361unsigned long rcu_batches_started_bh(void)
362{
363 return rcu_bh_state.gpnum;
364}
365EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
366
367/*
368 * Return the number of RCU batches completed thus far for debug & stats.
369 */
370unsigned long rcu_batches_completed(void)
371{
372 return rcu_state_p->completed;
373}
374EXPORT_SYMBOL_GPL(rcu_batches_completed);
375
376/*
377 * Return the number of RCU-sched batches completed thus far for debug & stats.
378 */
379unsigned long rcu_batches_completed_sched(void)
321{ 380{
322 return rcu_sched_state.completed; 381 return rcu_sched_state.completed;
323} 382}
324EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); 383EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
325 384
326/* 385/*
327 * Return the number of RCU BH batches processed thus far for debug & stats. 386 * Return the number of RCU BH batches completed thus far for debug & stats.
328 */ 387 */
329long rcu_batches_completed_bh(void) 388unsigned long rcu_batches_completed_bh(void)
330{ 389{
331 return rcu_bh_state.completed; 390 return rcu_bh_state.completed;
332} 391}
@@ -759,39 +818,71 @@ void rcu_irq_enter(void)
759/** 818/**
760 * rcu_nmi_enter - inform RCU of entry to NMI context 819 * rcu_nmi_enter - inform RCU of entry to NMI context
761 * 820 *
762 * If the CPU was idle with dynamic ticks active, and there is no 821 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
763 * irq handler running, this updates rdtp->dynticks_nmi to let the 822 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
764 * RCU grace-period handling know that the CPU is active. 823 * that the CPU is active. This implementation permits nested NMIs, as
824 * long as the nesting level does not overflow an int. (You will probably
825 * run out of stack space first.)
765 */ 826 */
766void rcu_nmi_enter(void) 827void rcu_nmi_enter(void)
767{ 828{
768 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 829 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
830 int incby = 2;
769 831
770 if (rdtp->dynticks_nmi_nesting == 0 && 832 /* Complain about underflow. */
771 (atomic_read(&rdtp->dynticks) & 0x1)) 833 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
772 return; 834
773 rdtp->dynticks_nmi_nesting++; 835 /*
774 smp_mb__before_atomic(); /* Force delay from prior write. */ 836 * If idle from RCU viewpoint, atomically increment ->dynticks
775 atomic_inc(&rdtp->dynticks); 837 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
776 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 838 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
777 smp_mb__after_atomic(); /* See above. */ 839 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
778 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 840 * to be in the outermost NMI handler that interrupted an RCU-idle
841 * period (observation due to Andy Lutomirski).
842 */
843 if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
844 smp_mb__before_atomic(); /* Force delay from prior write. */
845 atomic_inc(&rdtp->dynticks);
846 /* atomic_inc() before later RCU read-side crit sects */
847 smp_mb__after_atomic(); /* See above. */
848 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
849 incby = 1;
850 }
851 rdtp->dynticks_nmi_nesting += incby;
852 barrier();
779} 853}
780 854
781/** 855/**
782 * rcu_nmi_exit - inform RCU of exit from NMI context 856 * rcu_nmi_exit - inform RCU of exit from NMI context
783 * 857 *
784 * If the CPU was idle with dynamic ticks active, and there is no 858 * If we are returning from the outermost NMI handler that interrupted an
785 * irq handler running, this updates rdtp->dynticks_nmi to let the 859 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
786 * RCU grace-period handling know that the CPU is no longer active. 860 * to let the RCU grace-period handling know that the CPU is back to
861 * being RCU-idle.
787 */ 862 */
788void rcu_nmi_exit(void) 863void rcu_nmi_exit(void)
789{ 864{
790 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 865 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
791 866
792 if (rdtp->dynticks_nmi_nesting == 0 || 867 /*
793 --rdtp->dynticks_nmi_nesting != 0) 868 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
869 * (We are exiting an NMI handler, so RCU better be paying attention
870 * to us!)
871 */
872 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
873 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
874
875 /*
876 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
877 * leave it in non-RCU-idle state.
878 */
879 if (rdtp->dynticks_nmi_nesting != 1) {
880 rdtp->dynticks_nmi_nesting -= 2;
794 return; 881 return;
882 }
883
884 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
885 rdtp->dynticks_nmi_nesting = 0;
795 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 886 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
796 smp_mb__before_atomic(); /* See above. */ 887 smp_mb__before_atomic(); /* See above. */
797 atomic_inc(&rdtp->dynticks); 888 atomic_inc(&rdtp->dynticks);
@@ -898,17 +989,14 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
898 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 989 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
899 return 1; 990 return 1;
900 } else { 991 } else {
992 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
993 rdp->mynode->gpnum))
994 ACCESS_ONCE(rdp->gpwrap) = true;
901 return 0; 995 return 0;
902 } 996 }
903} 997}
904 998
905/* 999/*
906 * This function really isn't for public consumption, but RCU is special in
907 * that context switches can allow the state machine to make progress.
908 */
909extern void resched_cpu(int cpu);
910
911/*
912 * Return true if the specified CPU has passed through a quiescent 1000 * Return true if the specified CPU has passed through a quiescent
913 * state by virtue of being in or having passed through an dynticks 1001 * state by virtue of being in or having passed through an dynticks
914 * idle state since the last call to dyntick_save_progress_counter() 1002 * idle state since the last call to dyntick_save_progress_counter()
@@ -1011,6 +1099,22 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
1011 j1 = rcu_jiffies_till_stall_check(); 1099 j1 = rcu_jiffies_till_stall_check();
1012 ACCESS_ONCE(rsp->jiffies_stall) = j + j1; 1100 ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
1013 rsp->jiffies_resched = j + j1 / 2; 1101 rsp->jiffies_resched = j + j1 / 2;
1102 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
1103}
1104
1105/*
1106 * Complain about starvation of grace-period kthread.
1107 */
1108static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1109{
1110 unsigned long gpa;
1111 unsigned long j;
1112
1113 j = jiffies;
1114 gpa = ACCESS_ONCE(rsp->gp_activity);
1115 if (j - gpa > 2 * HZ)
1116 pr_err("%s kthread starved for %ld jiffies!\n",
1117 rsp->name, j - gpa);
1014} 1118}
1015 1119
1016/* 1120/*
@@ -1033,11 +1137,13 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1033 } 1137 }
1034} 1138}
1035 1139
1036static void print_other_cpu_stall(struct rcu_state *rsp) 1140static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1037{ 1141{
1038 int cpu; 1142 int cpu;
1039 long delta; 1143 long delta;
1040 unsigned long flags; 1144 unsigned long flags;
1145 unsigned long gpa;
1146 unsigned long j;
1041 int ndetected = 0; 1147 int ndetected = 0;
1042 struct rcu_node *rnp = rcu_get_root(rsp); 1148 struct rcu_node *rnp = rcu_get_root(rsp);
1043 long totqlen = 0; 1149 long totqlen = 0;
@@ -1075,30 +1181,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
1075 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1181 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1076 } 1182 }
1077 1183
1078 /*
1079 * Now rat on any tasks that got kicked up to the root rcu_node
1080 * due to CPU offlining.
1081 */
1082 rnp = rcu_get_root(rsp);
1083 raw_spin_lock_irqsave(&rnp->lock, flags);
1084 ndetected += rcu_print_task_stall(rnp);
1085 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1086
1087 print_cpu_stall_info_end(); 1184 print_cpu_stall_info_end();
1088 for_each_possible_cpu(cpu) 1185 for_each_possible_cpu(cpu)
1089 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; 1186 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1090 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", 1187 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1091 smp_processor_id(), (long)(jiffies - rsp->gp_start), 1188 smp_processor_id(), (long)(jiffies - rsp->gp_start),
1092 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1189 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1093 if (ndetected == 0) 1190 if (ndetected) {
1094 pr_err("INFO: Stall ended before state dump start\n");
1095 else
1096 rcu_dump_cpu_stacks(rsp); 1191 rcu_dump_cpu_stacks(rsp);
1192 } else {
1193 if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
1194 ACCESS_ONCE(rsp->completed) == gpnum) {
1195 pr_err("INFO: Stall ended before state dump start\n");
1196 } else {
1197 j = jiffies;
1198 gpa = ACCESS_ONCE(rsp->gp_activity);
1199 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n",
1200 rsp->name, j - gpa, j, gpa,
1201 jiffies_till_next_fqs);
1202 /* In this case, the current CPU might be at fault. */
1203 sched_show_task(current);
1204 }
1205 }
1097 1206
1098 /* Complain about tasks blocking the grace period. */ 1207 /* Complain about tasks blocking the grace period. */
1099
1100 rcu_print_detail_task_stall(rsp); 1208 rcu_print_detail_task_stall(rsp);
1101 1209
1210 rcu_check_gp_kthread_starvation(rsp);
1211
1102 force_quiescent_state(rsp); /* Kick them all. */ 1212 force_quiescent_state(rsp); /* Kick them all. */
1103} 1213}
1104 1214
@@ -1123,6 +1233,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
1123 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", 1233 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1124 jiffies - rsp->gp_start, 1234 jiffies - rsp->gp_start,
1125 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1235 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1236
1237 rcu_check_gp_kthread_starvation(rsp);
1238
1126 rcu_dump_cpu_stacks(rsp); 1239 rcu_dump_cpu_stacks(rsp);
1127 1240
1128 raw_spin_lock_irqsave(&rnp->lock, flags); 1241 raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1193,7 +1306,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1193 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { 1306 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1194 1307
1195 /* They had a few time units to dump stack, so complain. */ 1308 /* They had a few time units to dump stack, so complain. */
1196 print_other_cpu_stall(rsp); 1309 print_other_cpu_stall(rsp, gpnum);
1197 } 1310 }
1198} 1311}
1199 1312
@@ -1530,7 +1643,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1530 bool ret; 1643 bool ret;
1531 1644
1532 /* Handle the ends of any preceding grace periods first. */ 1645 /* Handle the ends of any preceding grace periods first. */
1533 if (rdp->completed == rnp->completed) { 1646 if (rdp->completed == rnp->completed &&
1647 !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
1534 1648
1535 /* No grace period end, so just accelerate recent callbacks. */ 1649 /* No grace period end, so just accelerate recent callbacks. */
1536 ret = rcu_accelerate_cbs(rsp, rnp, rdp); 1650 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1545,7 +1659,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1545 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); 1659 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1546 } 1660 }
1547 1661
1548 if (rdp->gpnum != rnp->gpnum) { 1662 if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
1549 /* 1663 /*
1550 * If the current grace period is waiting for this CPU, 1664 * If the current grace period is waiting for this CPU,
1551 * set up to detect a quiescent state, otherwise don't 1665 * set up to detect a quiescent state, otherwise don't
@@ -1554,8 +1668,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1554 rdp->gpnum = rnp->gpnum; 1668 rdp->gpnum = rnp->gpnum;
1555 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1669 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1556 rdp->passed_quiesce = 0; 1670 rdp->passed_quiesce = 0;
1671 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1557 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); 1672 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1558 zero_cpu_stall_ticks(rdp); 1673 zero_cpu_stall_ticks(rdp);
1674 ACCESS_ONCE(rdp->gpwrap) = false;
1559 } 1675 }
1560 return ret; 1676 return ret;
1561} 1677}
@@ -1569,7 +1685,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1569 local_irq_save(flags); 1685 local_irq_save(flags);
1570 rnp = rdp->mynode; 1686 rnp = rdp->mynode;
1571 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && 1687 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1572 rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */ 1688 rdp->completed == ACCESS_ONCE(rnp->completed) &&
1689 !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
1573 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ 1690 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1574 local_irq_restore(flags); 1691 local_irq_restore(flags);
1575 return; 1692 return;
@@ -1589,6 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1589 struct rcu_data *rdp; 1706 struct rcu_data *rdp;
1590 struct rcu_node *rnp = rcu_get_root(rsp); 1707 struct rcu_node *rnp = rcu_get_root(rsp);
1591 1708
1709 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1592 rcu_bind_gp_kthread(); 1710 rcu_bind_gp_kthread();
1593 raw_spin_lock_irq(&rnp->lock); 1711 raw_spin_lock_irq(&rnp->lock);
1594 smp_mb__after_unlock_lock(); 1712 smp_mb__after_unlock_lock();
@@ -1649,6 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
1649 rnp->grphi, rnp->qsmask); 1767 rnp->grphi, rnp->qsmask);
1650 raw_spin_unlock_irq(&rnp->lock); 1768 raw_spin_unlock_irq(&rnp->lock);
1651 cond_resched_rcu_qs(); 1769 cond_resched_rcu_qs();
1770 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1652 } 1771 }
1653 1772
1654 mutex_unlock(&rsp->onoff_mutex); 1773 mutex_unlock(&rsp->onoff_mutex);
@@ -1665,6 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1665 unsigned long maxj; 1784 unsigned long maxj;
1666 struct rcu_node *rnp = rcu_get_root(rsp); 1785 struct rcu_node *rnp = rcu_get_root(rsp);
1667 1786
1787 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1668 rsp->n_force_qs++; 1788 rsp->n_force_qs++;
1669 if (fqs_state == RCU_SAVE_DYNTICK) { 1789 if (fqs_state == RCU_SAVE_DYNTICK) {
1670 /* Collect dyntick-idle snapshots. */ 1790 /* Collect dyntick-idle snapshots. */
@@ -1703,6 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1703 struct rcu_data *rdp; 1823 struct rcu_data *rdp;
1704 struct rcu_node *rnp = rcu_get_root(rsp); 1824 struct rcu_node *rnp = rcu_get_root(rsp);
1705 1825
1826 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1706 raw_spin_lock_irq(&rnp->lock); 1827 raw_spin_lock_irq(&rnp->lock);
1707 smp_mb__after_unlock_lock(); 1828 smp_mb__after_unlock_lock();
1708 gp_duration = jiffies - rsp->gp_start; 1829 gp_duration = jiffies - rsp->gp_start;
@@ -1739,6 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
1739 nocb += rcu_future_gp_cleanup(rsp, rnp); 1860 nocb += rcu_future_gp_cleanup(rsp, rnp);
1740 raw_spin_unlock_irq(&rnp->lock); 1861 raw_spin_unlock_irq(&rnp->lock);
1741 cond_resched_rcu_qs(); 1862 cond_resched_rcu_qs();
1863 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1742 } 1864 }
1743 rnp = rcu_get_root(rsp); 1865 rnp = rcu_get_root(rsp);
1744 raw_spin_lock_irq(&rnp->lock); 1866 raw_spin_lock_irq(&rnp->lock);
@@ -1788,6 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
1788 if (rcu_gp_init(rsp)) 1910 if (rcu_gp_init(rsp))
1789 break; 1911 break;
1790 cond_resched_rcu_qs(); 1912 cond_resched_rcu_qs();
1913 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1791 WARN_ON(signal_pending(current)); 1914 WARN_ON(signal_pending(current));
1792 trace_rcu_grace_period(rsp->name, 1915 trace_rcu_grace_period(rsp->name,
1793 ACCESS_ONCE(rsp->gpnum), 1916 ACCESS_ONCE(rsp->gpnum),
@@ -1831,9 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
1831 ACCESS_ONCE(rsp->gpnum), 1954 ACCESS_ONCE(rsp->gpnum),
1832 TPS("fqsend")); 1955 TPS("fqsend"));
1833 cond_resched_rcu_qs(); 1956 cond_resched_rcu_qs();
1957 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1834 } else { 1958 } else {
1835 /* Deal with stray signal. */ 1959 /* Deal with stray signal. */
1836 cond_resched_rcu_qs(); 1960 cond_resched_rcu_qs();
1961 ACCESS_ONCE(rsp->gp_activity) = jiffies;
1837 WARN_ON(signal_pending(current)); 1962 WARN_ON(signal_pending(current));
1838 trace_rcu_grace_period(rsp->name, 1963 trace_rcu_grace_period(rsp->name,
1839 ACCESS_ONCE(rsp->gpnum), 1964 ACCESS_ONCE(rsp->gpnum),
@@ -2010,8 +2135,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2010 rnp = rdp->mynode; 2135 rnp = rdp->mynode;
2011 raw_spin_lock_irqsave(&rnp->lock, flags); 2136 raw_spin_lock_irqsave(&rnp->lock, flags);
2012 smp_mb__after_unlock_lock(); 2137 smp_mb__after_unlock_lock();
2013 if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || 2138 if ((rdp->passed_quiesce == 0 &&
2014 rnp->completed == rnp->gpnum) { 2139 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
2140 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
2141 rdp->gpwrap) {
2015 2142
2016 /* 2143 /*
2017 * The grace period in which this quiescent state was 2144 * The grace period in which this quiescent state was
@@ -2020,6 +2147,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2020 * within the current grace period. 2147 * within the current grace period.
2021 */ 2148 */
2022 rdp->passed_quiesce = 0; /* need qs for new gp. */ 2149 rdp->passed_quiesce = 0; /* need qs for new gp. */
2150 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
2023 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2151 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2024 return; 2152 return;
2025 } 2153 }
@@ -2064,7 +2192,8 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2064 * Was there a quiescent state since the beginning of the grace 2192 * Was there a quiescent state since the beginning of the grace
2065 * period? If no, then exit and wait for the next call. 2193 * period? If no, then exit and wait for the next call.
2066 */ 2194 */
2067 if (!rdp->passed_quiesce) 2195 if (!rdp->passed_quiesce &&
2196 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
2068 return; 2197 return;
2069 2198
2070 /* 2199 /*
@@ -2195,6 +2324,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2195} 2324}
2196 2325
2197/* 2326/*
2327 * All CPUs for the specified rcu_node structure have gone offline,
2328 * and all tasks that were preempted within an RCU read-side critical
2329 * section while running on one of those CPUs have since exited their RCU
2330 * read-side critical section. Some other CPU is reporting this fact with
2331 * the specified rcu_node structure's ->lock held and interrupts disabled.
2332 * This function therefore goes up the tree of rcu_node structures,
2333 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2334 * the leaf rcu_node structure's ->qsmaskinit field has already been
2335 * updated
2336 *
2337 * This function does check that the specified rcu_node structure has
2338 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2339 * prematurely. That said, invoking it after the fact will cost you
2340 * a needless lock acquisition. So once it has done its work, don't
2341 * invoke it again.
2342 */
2343static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2344{
2345 long mask;
2346 struct rcu_node *rnp = rnp_leaf;
2347
2348 if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2349 return;
2350 for (;;) {
2351 mask = rnp->grpmask;
2352 rnp = rnp->parent;
2353 if (!rnp)
2354 break;
2355 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2356 smp_mb__after_unlock_lock(); /* GP memory ordering. */
2357 rnp->qsmaskinit &= ~mask;
2358 if (rnp->qsmaskinit) {
2359 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2360 return;
2361 }
2362 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2363 }
2364}
2365
2366/*
2198 * The CPU has been completely removed, and some other CPU is reporting 2367 * The CPU has been completely removed, and some other CPU is reporting
2199 * this fact from process context. Do the remainder of the cleanup, 2368 * this fact from process context. Do the remainder of the cleanup,
2200 * including orphaning the outgoing CPU's RCU callbacks, and also 2369 * including orphaning the outgoing CPU's RCU callbacks, and also
@@ -2204,8 +2373,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2204static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2373static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2205{ 2374{
2206 unsigned long flags; 2375 unsigned long flags;
2207 unsigned long mask;
2208 int need_report = 0;
2209 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 2376 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2210 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2377 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2211 2378
@@ -2219,40 +2386,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2219 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ 2386 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2220 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 2387 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2221 rcu_adopt_orphan_cbs(rsp, flags); 2388 rcu_adopt_orphan_cbs(rsp, flags);
2389 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
2222 2390
2223 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 2391 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2224 mask = rdp->grpmask; /* rnp->grplo is constant. */ 2392 raw_spin_lock_irqsave(&rnp->lock, flags);
2225 do { 2393 smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
2226 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 2394 rnp->qsmaskinit &= ~rdp->grpmask;
2227 smp_mb__after_unlock_lock(); 2395 if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
2228 rnp->qsmaskinit &= ~mask; 2396 rcu_cleanup_dead_rnp(rnp);
2229 if (rnp->qsmaskinit != 0) { 2397 rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
2230 if (rnp != rdp->mynode)
2231 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2232 break;
2233 }
2234 if (rnp == rdp->mynode)
2235 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2236 else
2237 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2238 mask = rnp->grpmask;
2239 rnp = rnp->parent;
2240 } while (rnp != NULL);
2241
2242 /*
2243 * We still hold the leaf rcu_node structure lock here, and
2244 * irqs are still disabled. The reason for this subterfuge is
2245 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
2246 * held leads to deadlock.
2247 */
2248 raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
2249 rnp = rdp->mynode;
2250 if (need_report & RCU_OFL_TASKS_NORM_GP)
2251 rcu_report_unblock_qs_rnp(rnp, flags);
2252 else
2253 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2254 if (need_report & RCU_OFL_TASKS_EXP_GP)
2255 rcu_report_exp_rnp(rsp, rnp, true);
2256 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, 2398 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2257 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", 2399 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2258 cpu, rdp->qlen, rdp->nxtlist); 2400 cpu, rdp->qlen, rdp->nxtlist);
@@ -2268,6 +2410,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2268{ 2410{
2269} 2411}
2270 2412
2413static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2414{
2415}
2416
2271static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2417static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2272{ 2418{
2273} 2419}
@@ -2464,12 +2610,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
2464 } 2610 }
2465 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2611 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2466 } 2612 }
2467 rnp = rcu_get_root(rsp);
2468 if (rnp->qsmask == 0) {
2469 raw_spin_lock_irqsave(&rnp->lock, flags);
2470 smp_mb__after_unlock_lock();
2471 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2472 }
2473} 2613}
2474 2614
2475/* 2615/*
@@ -2569,7 +2709,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
2569 * Schedule RCU callback invocation. If the specified type of RCU 2709 * Schedule RCU callback invocation. If the specified type of RCU
2570 * does not support RCU priority boosting, just do a direct call, 2710 * does not support RCU priority boosting, just do a direct call,
2571 * otherwise wake up the per-CPU kernel kthread. Note that because we 2711 * otherwise wake up the per-CPU kernel kthread. Note that because we
2572 * are running on the current CPU with interrupts disabled, the 2712 * are running on the current CPU with softirqs disabled, the
2573 * rcu_cpu_kthread_task cannot disappear out from under us. 2713 * rcu_cpu_kthread_task cannot disappear out from under us.
2574 */ 2714 */
2575static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 2715static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
@@ -3109,9 +3249,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3109 3249
3110 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3250 /* Is the RCU core waiting for a quiescent state from this CPU? */
3111 if (rcu_scheduler_fully_active && 3251 if (rcu_scheduler_fully_active &&
3112 rdp->qs_pending && !rdp->passed_quiesce) { 3252 rdp->qs_pending && !rdp->passed_quiesce &&
3253 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
3113 rdp->n_rp_qs_pending++; 3254 rdp->n_rp_qs_pending++;
3114 } else if (rdp->qs_pending && rdp->passed_quiesce) { 3255 } else if (rdp->qs_pending &&
3256 (rdp->passed_quiesce ||
3257 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
3115 rdp->n_rp_report_qs++; 3258 rdp->n_rp_report_qs++;
3116 return 1; 3259 return 1;
3117 } 3260 }
@@ -3135,7 +3278,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3135 } 3278 }
3136 3279
3137 /* Has a new RCU grace period started? */ 3280 /* Has a new RCU grace period started? */
3138 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ 3281 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
3282 unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
3139 rdp->n_rp_gp_started++; 3283 rdp->n_rp_gp_started++;
3140 return 1; 3284 return 1;
3141 } 3285 }
@@ -3318,6 +3462,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
3318 } else { 3462 } else {
3319 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, 3463 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3320 rsp->n_barrier_done); 3464 rsp->n_barrier_done);
3465 smp_mb__before_atomic();
3321 atomic_inc(&rsp->barrier_cpu_count); 3466 atomic_inc(&rsp->barrier_cpu_count);
3322 __call_rcu(&rdp->barrier_head, 3467 __call_rcu(&rdp->barrier_head,
3323 rcu_barrier_callback, rsp, cpu, 0); 3468 rcu_barrier_callback, rsp, cpu, 0);
@@ -3385,9 +3530,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3385 /* Set up local state, ensuring consistent view of global state. */ 3530 /* Set up local state, ensuring consistent view of global state. */
3386 raw_spin_lock_irqsave(&rnp->lock, flags); 3531 raw_spin_lock_irqsave(&rnp->lock, flags);
3387 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); 3532 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3388 init_callback_list(rdp);
3389 rdp->qlen_lazy = 0;
3390 ACCESS_ONCE(rdp->qlen) = 0;
3391 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 3533 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3392 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); 3534 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3393 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 3535 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
@@ -3444,6 +3586,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3444 rdp->gpnum = rnp->completed; 3586 rdp->gpnum = rnp->completed;
3445 rdp->completed = rnp->completed; 3587 rdp->completed = rnp->completed;
3446 rdp->passed_quiesce = 0; 3588 rdp->passed_quiesce = 0;
3589 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
3447 rdp->qs_pending = 0; 3590 rdp->qs_pending = 0;
3448 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 3591 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3449 } 3592 }
@@ -3535,17 +3678,35 @@ static int rcu_pm_notify(struct notifier_block *self,
3535static int __init rcu_spawn_gp_kthread(void) 3678static int __init rcu_spawn_gp_kthread(void)
3536{ 3679{
3537 unsigned long flags; 3680 unsigned long flags;
3681 int kthread_prio_in = kthread_prio;
3538 struct rcu_node *rnp; 3682 struct rcu_node *rnp;
3539 struct rcu_state *rsp; 3683 struct rcu_state *rsp;
3684 struct sched_param sp;
3540 struct task_struct *t; 3685 struct task_struct *t;
3541 3686
3687 /* Force priority into range. */
3688 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3689 kthread_prio = 1;
3690 else if (kthread_prio < 0)
3691 kthread_prio = 0;
3692 else if (kthread_prio > 99)
3693 kthread_prio = 99;
3694 if (kthread_prio != kthread_prio_in)
3695 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3696 kthread_prio, kthread_prio_in);
3697
3542 rcu_scheduler_fully_active = 1; 3698 rcu_scheduler_fully_active = 1;
3543 for_each_rcu_flavor(rsp) { 3699 for_each_rcu_flavor(rsp) {
3544 t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name); 3700 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3545 BUG_ON(IS_ERR(t)); 3701 BUG_ON(IS_ERR(t));
3546 rnp = rcu_get_root(rsp); 3702 rnp = rcu_get_root(rsp);
3547 raw_spin_lock_irqsave(&rnp->lock, flags); 3703 raw_spin_lock_irqsave(&rnp->lock, flags);
3548 rsp->gp_kthread = t; 3704 rsp->gp_kthread = t;
3705 if (kthread_prio) {
3706 sp.sched_priority = kthread_prio;
3707 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3708 }
3709 wake_up_process(t);
3549 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3710 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3550 } 3711 }
3551 rcu_spawn_nocb_kthreads(); 3712 rcu_spawn_nocb_kthreads();
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 8e7b1843896e..119de399eb2f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -27,7 +27,6 @@
27#include <linux/threads.h> 27#include <linux/threads.h>
28#include <linux/cpumask.h> 28#include <linux/cpumask.h>
29#include <linux/seqlock.h> 29#include <linux/seqlock.h>
30#include <linux/irq_work.h>
31 30
32/* 31/*
33 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and 32 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -172,11 +171,6 @@ struct rcu_node {
172 /* queued on this rcu_node structure that */ 171 /* queued on this rcu_node structure that */
173 /* are blocking the current grace period, */ 172 /* are blocking the current grace period, */
174 /* there can be no such task. */ 173 /* there can be no such task. */
175 struct completion boost_completion;
176 /* Used to ensure that the rt_mutex used */
177 /* to carry out the boosting is fully */
178 /* released with no future boostee accesses */
179 /* before that rt_mutex is re-initialized. */
180 struct rt_mutex boost_mtx; 174 struct rt_mutex boost_mtx;
181 /* Used only for the priority-boosting */ 175 /* Used only for the priority-boosting */
182 /* side effect, not as a lock. */ 176 /* side effect, not as a lock. */
@@ -257,9 +251,12 @@ struct rcu_data {
257 /* in order to detect GP end. */ 251 /* in order to detect GP end. */
258 unsigned long gpnum; /* Highest gp number that this CPU */ 252 unsigned long gpnum; /* Highest gp number that this CPU */
259 /* is aware of having started. */ 253 /* is aware of having started. */
254 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
255 /* for rcu_all_qs() invocations. */
260 bool passed_quiesce; /* User-mode/idle loop etc. */ 256 bool passed_quiesce; /* User-mode/idle loop etc. */
261 bool qs_pending; /* Core waits for quiesc state. */ 257 bool qs_pending; /* Core waits for quiesc state. */
262 bool beenonline; /* CPU online at least once. */ 258 bool beenonline; /* CPU online at least once. */
259 bool gpwrap; /* Possible gpnum/completed wrap. */
263 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 260 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
264 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 261 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
265#ifdef CONFIG_RCU_CPU_STALL_INFO 262#ifdef CONFIG_RCU_CPU_STALL_INFO
@@ -340,14 +337,10 @@ struct rcu_data {
340#ifdef CONFIG_RCU_NOCB_CPU 337#ifdef CONFIG_RCU_NOCB_CPU
341 struct rcu_head *nocb_head; /* CBs waiting for kthread. */ 338 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
342 struct rcu_head **nocb_tail; 339 struct rcu_head **nocb_tail;
343 atomic_long_t nocb_q_count; /* # CBs waiting for kthread */ 340 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
344 atomic_long_t nocb_q_count_lazy; /* (approximate). */ 341 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
345 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ 342 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
346 struct rcu_head **nocb_follower_tail; 343 struct rcu_head **nocb_follower_tail;
347 atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
348 atomic_long_t nocb_follower_count_lazy; /* (approximate). */
349 int nocb_p_count; /* # CBs being invoked by kthread */
350 int nocb_p_count_lazy; /* (approximate). */
351 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ 344 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
352 struct task_struct *nocb_kthread; 345 struct task_struct *nocb_kthread;
353 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 346 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
@@ -356,8 +349,6 @@ struct rcu_data {
356 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; 349 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
357 /* CBs waiting for GP. */ 350 /* CBs waiting for GP. */
358 struct rcu_head **nocb_gp_tail; 351 struct rcu_head **nocb_gp_tail;
359 long nocb_gp_count;
360 long nocb_gp_count_lazy;
361 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ 352 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
362 struct rcu_data *nocb_next_follower; 353 struct rcu_data *nocb_next_follower;
363 /* Next follower in wakeup chain. */ 354 /* Next follower in wakeup chain. */
@@ -488,10 +479,14 @@ struct rcu_state {
488 /* due to no GP active. */ 479 /* due to no GP active. */
489 unsigned long gp_start; /* Time at which GP started, */ 480 unsigned long gp_start; /* Time at which GP started, */
490 /* but in jiffies. */ 481 /* but in jiffies. */
482 unsigned long gp_activity; /* Time of last GP kthread */
483 /* activity in jiffies. */
491 unsigned long jiffies_stall; /* Time at which to check */ 484 unsigned long jiffies_stall; /* Time at which to check */
492 /* for CPU stalls. */ 485 /* for CPU stalls. */
493 unsigned long jiffies_resched; /* Time at which to resched */ 486 unsigned long jiffies_resched; /* Time at which to resched */
494 /* a reluctant CPU. */ 487 /* a reluctant CPU. */
488 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
489 /* GP start. */
495 unsigned long gp_max; /* Maximum GP duration in */ 490 unsigned long gp_max; /* Maximum GP duration in */
496 /* jiffies. */ 491 /* jiffies. */
497 const char *name; /* Name of structure. */ 492 const char *name; /* Name of structure. */
@@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors;
514#define for_each_rcu_flavor(rsp) \ 509#define for_each_rcu_flavor(rsp) \
515 list_for_each_entry((rsp), &rcu_struct_flavors, flavors) 510 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
516 511
517/* Return values for rcu_preempt_offline_tasks(). */
518
519#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
520 /* GP were moved to root. */
521#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
522 /* GP were moved to root. */
523
524/* 512/*
525 * RCU implementation internal declarations: 513 * RCU implementation internal declarations:
526 */ 514 */
@@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
546 534
547/* Forward declarations for rcutree_plugin.h */ 535/* Forward declarations for rcutree_plugin.h */
548static void rcu_bootup_announce(void); 536static void rcu_bootup_announce(void);
549long rcu_batches_completed(void);
550static void rcu_preempt_note_context_switch(void); 537static void rcu_preempt_note_context_switch(void);
551static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 538static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
552#ifdef CONFIG_HOTPLUG_CPU 539#ifdef CONFIG_HOTPLUG_CPU
553static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, 540static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
554 unsigned long flags);
555#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 541#endif /* #ifdef CONFIG_HOTPLUG_CPU */
556static void rcu_print_detail_task_stall(struct rcu_state *rsp); 542static void rcu_print_detail_task_stall(struct rcu_state *rsp);
557static int rcu_print_task_stall(struct rcu_node *rnp); 543static int rcu_print_task_stall(struct rcu_node *rnp);
558static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 544static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
559#ifdef CONFIG_HOTPLUG_CPU
560static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
561 struct rcu_node *rnp,
562 struct rcu_data *rdp);
563#endif /* #ifdef CONFIG_HOTPLUG_CPU */
564static void rcu_preempt_check_callbacks(void); 545static void rcu_preempt_check_callbacks(void);
565void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 546void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
566#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU)
567static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
568 bool wake);
569#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */
570static void __init __rcu_init_preempt(void); 547static void __init __rcu_init_preempt(void);
571static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 548static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
572static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 549static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void);
622#endif /* #ifndef RCU_TREE_NONCORE */ 599#endif /* #ifndef RCU_TREE_NONCORE */
623 600
624#ifdef CONFIG_RCU_TRACE 601#ifdef CONFIG_RCU_TRACE
625#ifdef CONFIG_RCU_NOCB_CPU 602/* Read out queue lengths for tracing. */
626/* Sum up queue lengths for tracing. */
627static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) 603static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
628{ 604{
629 *ql = atomic_long_read(&rdp->nocb_q_count) + 605#ifdef CONFIG_RCU_NOCB_CPU
630 rdp->nocb_p_count + 606 *ql = atomic_long_read(&rdp->nocb_q_count);
631 atomic_long_read(&rdp->nocb_follower_count) + 607 *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
632 rdp->nocb_p_count + rdp->nocb_gp_count;
633 *qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
634 rdp->nocb_p_count_lazy +
635 atomic_long_read(&rdp->nocb_follower_count_lazy) +
636 rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
637}
638#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 608#else /* #ifdef CONFIG_RCU_NOCB_CPU */
639static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
640{
641 *ql = 0; 609 *ql = 0;
642 *qll = 0; 610 *qll = 0;
643}
644#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 611#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
612}
645#endif /* #ifdef CONFIG_RCU_TRACE */ 613#endif /* #ifdef CONFIG_RCU_TRACE */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 3ec85cb5d544..2e850a51bb8f 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -34,10 +34,6 @@
34 34
35#include "../locking/rtmutex_common.h" 35#include "../locking/rtmutex_common.h"
36 36
37/* rcuc/rcub kthread realtime priority */
38static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
39module_param(kthread_prio, int, 0644);
40
41/* 37/*
42 * Control variables for per-CPU and per-rcu_node kthreads. These 38 * Control variables for per-CPU and per-rcu_node kthreads. These
43 * handle all flavors of RCU. 39 * handle all flavors of RCU.
@@ -103,6 +99,8 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
103static struct rcu_state *rcu_state_p = &rcu_preempt_state; 99static struct rcu_state *rcu_state_p = &rcu_preempt_state;
104 100
105static int rcu_preempted_readers_exp(struct rcu_node *rnp); 101static int rcu_preempted_readers_exp(struct rcu_node *rnp);
102static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
103 bool wake);
106 104
107/* 105/*
108 * Tell them what RCU they are running. 106 * Tell them what RCU they are running.
@@ -114,25 +112,6 @@ static void __init rcu_bootup_announce(void)
114} 112}
115 113
116/* 114/*
117 * Return the number of RCU-preempt batches processed thus far
118 * for debug and statistics.
119 */
120static long rcu_batches_completed_preempt(void)
121{
122 return rcu_preempt_state.completed;
123}
124EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
125
126/*
127 * Return the number of RCU batches processed thus far for debug & stats.
128 */
129long rcu_batches_completed(void)
130{
131 return rcu_batches_completed_preempt();
132}
133EXPORT_SYMBOL_GPL(rcu_batches_completed);
134
135/*
136 * Record a preemptible-RCU quiescent state for the specified CPU. Note 115 * Record a preemptible-RCU quiescent state for the specified CPU. Note
137 * that this just means that the task currently running on the CPU is 116 * that this just means that the task currently running on the CPU is
138 * not in a quiescent state. There might be any number of tasks blocked 117 * not in a quiescent state. There might be any number of tasks blocked
@@ -307,15 +286,25 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
307} 286}
308 287
309/* 288/*
289 * Return true if the specified rcu_node structure has tasks that were
290 * preempted within an RCU read-side critical section.
291 */
292static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
293{
294 return !list_empty(&rnp->blkd_tasks);
295}
296
297/*
310 * Handle special cases during rcu_read_unlock(), such as needing to 298 * Handle special cases during rcu_read_unlock(), such as needing to
311 * notify RCU core processing or task having blocked during the RCU 299 * notify RCU core processing or task having blocked during the RCU
312 * read-side critical section. 300 * read-side critical section.
313 */ 301 */
314void rcu_read_unlock_special(struct task_struct *t) 302void rcu_read_unlock_special(struct task_struct *t)
315{ 303{
316 int empty; 304 bool empty;
317 int empty_exp; 305 bool empty_exp;
318 int empty_exp_now; 306 bool empty_norm;
307 bool empty_exp_now;
319 unsigned long flags; 308 unsigned long flags;
320 struct list_head *np; 309 struct list_head *np;
321#ifdef CONFIG_RCU_BOOST 310#ifdef CONFIG_RCU_BOOST
@@ -367,7 +356,8 @@ void rcu_read_unlock_special(struct task_struct *t)
367 break; 356 break;
368 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 357 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
369 } 358 }
370 empty = !rcu_preempt_blocked_readers_cgp(rnp); 359 empty = !rcu_preempt_has_tasks(rnp);
360 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
371 empty_exp = !rcu_preempted_readers_exp(rnp); 361 empty_exp = !rcu_preempted_readers_exp(rnp);
372 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 362 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
373 np = rcu_next_node_entry(t, rnp); 363 np = rcu_next_node_entry(t, rnp);
@@ -387,13 +377,21 @@ void rcu_read_unlock_special(struct task_struct *t)
387#endif /* #ifdef CONFIG_RCU_BOOST */ 377#endif /* #ifdef CONFIG_RCU_BOOST */
388 378
389 /* 379 /*
380 * If this was the last task on the list, go see if we
381 * need to propagate ->qsmaskinit bit clearing up the
382 * rcu_node tree.
383 */
384 if (!empty && !rcu_preempt_has_tasks(rnp))
385 rcu_cleanup_dead_rnp(rnp);
386
387 /*
390 * If this was the last task on the current list, and if 388 * If this was the last task on the current list, and if
391 * we aren't waiting on any CPUs, report the quiescent state. 389 * we aren't waiting on any CPUs, report the quiescent state.
392 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, 390 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
393 * so we must take a snapshot of the expedited state. 391 * so we must take a snapshot of the expedited state.
394 */ 392 */
395 empty_exp_now = !rcu_preempted_readers_exp(rnp); 393 empty_exp_now = !rcu_preempted_readers_exp(rnp);
396 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) { 394 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
397 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 395 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
398 rnp->gpnum, 396 rnp->gpnum,
399 0, rnp->qsmask, 397 0, rnp->qsmask,
@@ -408,10 +406,8 @@ void rcu_read_unlock_special(struct task_struct *t)
408 406
409#ifdef CONFIG_RCU_BOOST 407#ifdef CONFIG_RCU_BOOST
410 /* Unboost if we were boosted. */ 408 /* Unboost if we were boosted. */
411 if (drop_boost_mutex) { 409 if (drop_boost_mutex)
412 rt_mutex_unlock(&rnp->boost_mtx); 410 rt_mutex_unlock(&rnp->boost_mtx);
413 complete(&rnp->boost_completion);
414 }
415#endif /* #ifdef CONFIG_RCU_BOOST */ 411#endif /* #ifdef CONFIG_RCU_BOOST */
416 412
417 /* 413 /*
@@ -519,99 +515,13 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
519static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 515static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
520{ 516{
521 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 517 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
522 if (!list_empty(&rnp->blkd_tasks)) 518 if (rcu_preempt_has_tasks(rnp))
523 rnp->gp_tasks = rnp->blkd_tasks.next; 519 rnp->gp_tasks = rnp->blkd_tasks.next;
524 WARN_ON_ONCE(rnp->qsmask); 520 WARN_ON_ONCE(rnp->qsmask);
525} 521}
526 522
527#ifdef CONFIG_HOTPLUG_CPU 523#ifdef CONFIG_HOTPLUG_CPU
528 524
529/*
530 * Handle tasklist migration for case in which all CPUs covered by the
531 * specified rcu_node have gone offline. Move them up to the root
532 * rcu_node. The reason for not just moving them to the immediate
533 * parent is to remove the need for rcu_read_unlock_special() to
534 * make more than two attempts to acquire the target rcu_node's lock.
535 * Returns true if there were tasks blocking the current RCU grace
536 * period.
537 *
538 * Returns 1 if there was previously a task blocking the current grace
539 * period on the specified rcu_node structure.
540 *
541 * The caller must hold rnp->lock with irqs disabled.
542 */
543static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
544 struct rcu_node *rnp,
545 struct rcu_data *rdp)
546{
547 struct list_head *lp;
548 struct list_head *lp_root;
549 int retval = 0;
550 struct rcu_node *rnp_root = rcu_get_root(rsp);
551 struct task_struct *t;
552
553 if (rnp == rnp_root) {
554 WARN_ONCE(1, "Last CPU thought to be offlined?");
555 return 0; /* Shouldn't happen: at least one CPU online. */
556 }
557
558 /* If we are on an internal node, complain bitterly. */
559 WARN_ON_ONCE(rnp != rdp->mynode);
560
561 /*
562 * Move tasks up to root rcu_node. Don't try to get fancy for
563 * this corner-case operation -- just put this node's tasks
564 * at the head of the root node's list, and update the root node's
565 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
566 * if non-NULL. This might result in waiting for more tasks than
567 * absolutely necessary, but this is a good performance/complexity
568 * tradeoff.
569 */
570 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
571 retval |= RCU_OFL_TASKS_NORM_GP;
572 if (rcu_preempted_readers_exp(rnp))
573 retval |= RCU_OFL_TASKS_EXP_GP;
574 lp = &rnp->blkd_tasks;
575 lp_root = &rnp_root->blkd_tasks;
576 while (!list_empty(lp)) {
577 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
578 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
579 smp_mb__after_unlock_lock();
580 list_del(&t->rcu_node_entry);
581 t->rcu_blocked_node = rnp_root;
582 list_add(&t->rcu_node_entry, lp_root);
583 if (&t->rcu_node_entry == rnp->gp_tasks)
584 rnp_root->gp_tasks = rnp->gp_tasks;
585 if (&t->rcu_node_entry == rnp->exp_tasks)
586 rnp_root->exp_tasks = rnp->exp_tasks;
587#ifdef CONFIG_RCU_BOOST
588 if (&t->rcu_node_entry == rnp->boost_tasks)
589 rnp_root->boost_tasks = rnp->boost_tasks;
590#endif /* #ifdef CONFIG_RCU_BOOST */
591 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
592 }
593
594 rnp->gp_tasks = NULL;
595 rnp->exp_tasks = NULL;
596#ifdef CONFIG_RCU_BOOST
597 rnp->boost_tasks = NULL;
598 /*
599 * In case root is being boosted and leaf was not. Make sure
600 * that we boost the tasks blocking the current grace period
601 * in this case.
602 */
603 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
604 smp_mb__after_unlock_lock();
605 if (rnp_root->boost_tasks != NULL &&
606 rnp_root->boost_tasks != rnp_root->gp_tasks &&
607 rnp_root->boost_tasks != rnp_root->exp_tasks)
608 rnp_root->boost_tasks = rnp_root->gp_tasks;
609 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
610#endif /* #ifdef CONFIG_RCU_BOOST */
611
612 return retval;
613}
614
615#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 525#endif /* #ifdef CONFIG_HOTPLUG_CPU */
616 526
617/* 527/*
@@ -771,7 +681,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
771 681
772 raw_spin_lock_irqsave(&rnp->lock, flags); 682 raw_spin_lock_irqsave(&rnp->lock, flags);
773 smp_mb__after_unlock_lock(); 683 smp_mb__after_unlock_lock();
774 if (list_empty(&rnp->blkd_tasks)) { 684 if (!rcu_preempt_has_tasks(rnp)) {
775 raw_spin_unlock_irqrestore(&rnp->lock, flags); 685 raw_spin_unlock_irqrestore(&rnp->lock, flags);
776 } else { 686 } else {
777 rnp->exp_tasks = rnp->blkd_tasks.next; 687 rnp->exp_tasks = rnp->blkd_tasks.next;
@@ -933,15 +843,6 @@ static void __init rcu_bootup_announce(void)
933} 843}
934 844
935/* 845/*
936 * Return the number of RCU batches processed thus far for debug & stats.
937 */
938long rcu_batches_completed(void)
939{
940 return rcu_batches_completed_sched();
941}
942EXPORT_SYMBOL_GPL(rcu_batches_completed);
943
944/*
945 * Because preemptible RCU does not exist, we never have to check for 846 * Because preemptible RCU does not exist, we never have to check for
946 * CPUs being in quiescent states. 847 * CPUs being in quiescent states.
947 */ 848 */
@@ -960,11 +861,12 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
960 861
961#ifdef CONFIG_HOTPLUG_CPU 862#ifdef CONFIG_HOTPLUG_CPU
962 863
963/* Because preemptible RCU does not exist, no quieting of tasks. */ 864/*
964static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 865 * Because there is no preemptible RCU, there can be no readers blocked.
965 __releases(rnp->lock) 866 */
867static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
966{ 868{
967 raw_spin_unlock_irqrestore(&rnp->lock, flags); 869 return false;
968} 870}
969 871
970#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 872#endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -996,23 +898,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
996 WARN_ON_ONCE(rnp->qsmask); 898 WARN_ON_ONCE(rnp->qsmask);
997} 899}
998 900
999#ifdef CONFIG_HOTPLUG_CPU
1000
1001/*
1002 * Because preemptible RCU does not exist, it never needs to migrate
1003 * tasks that were blocked within RCU read-side critical sections, and
1004 * such non-existent tasks cannot possibly have been blocking the current
1005 * grace period.
1006 */
1007static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1008 struct rcu_node *rnp,
1009 struct rcu_data *rdp)
1010{
1011 return 0;
1012}
1013
1014#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1015
1016/* 901/*
1017 * Because preemptible RCU does not exist, it never has any callbacks 902 * Because preemptible RCU does not exist, it never has any callbacks
1018 * to check. 903 * to check.
@@ -1031,20 +916,6 @@ void synchronize_rcu_expedited(void)
1031} 916}
1032EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 917EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1033 918
1034#ifdef CONFIG_HOTPLUG_CPU
1035
1036/*
1037 * Because preemptible RCU does not exist, there is never any need to
1038 * report on tasks preempted in RCU read-side critical sections during
1039 * expedited RCU grace periods.
1040 */
1041static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1042 bool wake)
1043{
1044}
1045
1046#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1047
1048/* 919/*
1049 * Because preemptible RCU does not exist, rcu_barrier() is just 920 * Because preemptible RCU does not exist, rcu_barrier() is just
1050 * another name for rcu_barrier_sched(). 921 * another name for rcu_barrier_sched().
@@ -1080,7 +951,7 @@ void exit_rcu(void)
1080 951
1081static void rcu_initiate_boost_trace(struct rcu_node *rnp) 952static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1082{ 953{
1083 if (list_empty(&rnp->blkd_tasks)) 954 if (!rcu_preempt_has_tasks(rnp))
1084 rnp->n_balk_blkd_tasks++; 955 rnp->n_balk_blkd_tasks++;
1085 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) 956 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1086 rnp->n_balk_exp_gp_tasks++; 957 rnp->n_balk_exp_gp_tasks++;
@@ -1127,7 +998,8 @@ static int rcu_boost(struct rcu_node *rnp)
1127 struct task_struct *t; 998 struct task_struct *t;
1128 struct list_head *tb; 999 struct list_head *tb;
1129 1000
1130 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) 1001 if (ACCESS_ONCE(rnp->exp_tasks) == NULL &&
1002 ACCESS_ONCE(rnp->boost_tasks) == NULL)
1131 return 0; /* Nothing left to boost. */ 1003 return 0; /* Nothing left to boost. */
1132 1004
1133 raw_spin_lock_irqsave(&rnp->lock, flags); 1005 raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1175,15 +1047,11 @@ static int rcu_boost(struct rcu_node *rnp)
1175 */ 1047 */
1176 t = container_of(tb, struct task_struct, rcu_node_entry); 1048 t = container_of(tb, struct task_struct, rcu_node_entry);
1177 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); 1049 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1178 init_completion(&rnp->boost_completion);
1179 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1050 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1180 /* Lock only for side effect: boosts task t's priority. */ 1051 /* Lock only for side effect: boosts task t's priority. */
1181 rt_mutex_lock(&rnp->boost_mtx); 1052 rt_mutex_lock(&rnp->boost_mtx);
1182 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ 1053 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
1183 1054
1184 /* Wait for boostee to be done w/boost_mtx before reinitializing. */
1185 wait_for_completion(&rnp->boost_completion);
1186
1187 return ACCESS_ONCE(rnp->exp_tasks) != NULL || 1055 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1188 ACCESS_ONCE(rnp->boost_tasks) != NULL; 1056 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1189} 1057}
@@ -1416,12 +1284,8 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1416 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) 1284 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1417 if ((mask & 0x1) && cpu != outgoingcpu) 1285 if ((mask & 0x1) && cpu != outgoingcpu)
1418 cpumask_set_cpu(cpu, cm); 1286 cpumask_set_cpu(cpu, cm);
1419 if (cpumask_weight(cm) == 0) { 1287 if (cpumask_weight(cm) == 0)
1420 cpumask_setall(cm); 1288 cpumask_setall(cm);
1421 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1422 cpumask_clear_cpu(cpu, cm);
1423 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1424 }
1425 set_cpus_allowed_ptr(t, cm); 1289 set_cpus_allowed_ptr(t, cm);
1426 free_cpumask_var(cm); 1290 free_cpumask_var(cm);
1427} 1291}
@@ -1446,12 +1310,8 @@ static void __init rcu_spawn_boost_kthreads(void)
1446 for_each_possible_cpu(cpu) 1310 for_each_possible_cpu(cpu)
1447 per_cpu(rcu_cpu_has_work, cpu) = 0; 1311 per_cpu(rcu_cpu_has_work, cpu) = 0;
1448 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); 1312 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1449 rnp = rcu_get_root(rcu_state_p); 1313 rcu_for_each_leaf_node(rcu_state_p, rnp)
1450 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); 1314 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1451 if (NUM_RCU_NODES > 1) {
1452 rcu_for_each_leaf_node(rcu_state_p, rnp)
1453 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1454 }
1455} 1315}
1456 1316
1457static void rcu_prepare_kthreads(int cpu) 1317static void rcu_prepare_kthreads(int cpu)
@@ -1605,7 +1465,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
1605 * completed since we last checked and there are 1465 * completed since we last checked and there are
1606 * callbacks not yet ready to invoke. 1466 * callbacks not yet ready to invoke.
1607 */ 1467 */
1608 if (rdp->completed != rnp->completed && 1468 if ((rdp->completed != rnp->completed ||
1469 unlikely(ACCESS_ONCE(rdp->gpwrap))) &&
1609 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) 1470 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1610 note_gp_changes(rsp, rdp); 1471 note_gp_changes(rsp, rdp);
1611 1472
@@ -1898,11 +1759,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1898 ticks_value = rsp->gpnum - rdp->gpnum; 1759 ticks_value = rsp->gpnum - rdp->gpnum;
1899 } 1760 }
1900 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1761 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1901 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", 1762 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
1902 cpu, ticks_value, ticks_title, 1763 cpu, ticks_value, ticks_title,
1903 atomic_read(&rdtp->dynticks) & 0xfff, 1764 atomic_read(&rdtp->dynticks) & 0xfff,
1904 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, 1765 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1905 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), 1766 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1767 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1906 fast_no_hz); 1768 fast_no_hz);
1907} 1769}
1908 1770
@@ -2056,9 +1918,26 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
2056static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) 1918static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2057{ 1919{
2058 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1920 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1921 unsigned long ret;
1922#ifdef CONFIG_PROVE_RCU
2059 struct rcu_head *rhp; 1923 struct rcu_head *rhp;
1924#endif /* #ifdef CONFIG_PROVE_RCU */
2060 1925
2061 /* No-CBs CPUs might have callbacks on any of three lists. */ 1926 /*
1927 * Check count of all no-CBs callbacks awaiting invocation.
1928 * There needs to be a barrier before this function is called,
1929 * but associated with a prior determination that no more
1930 * callbacks would be posted. In the worst case, the first
1931 * barrier in _rcu_barrier() suffices (but the caller cannot
1932 * necessarily rely on this, not a substitute for the caller
1933 * getting the concurrency design right!). There must also be
1934 * a barrier between the following load an posting of a callback
1935 * (if a callback is in fact needed). This is associated with an
1936 * atomic_inc() in the caller.
1937 */
1938 ret = atomic_long_read(&rdp->nocb_q_count);
1939
1940#ifdef CONFIG_PROVE_RCU
2062 rhp = ACCESS_ONCE(rdp->nocb_head); 1941 rhp = ACCESS_ONCE(rdp->nocb_head);
2063 if (!rhp) 1942 if (!rhp)
2064 rhp = ACCESS_ONCE(rdp->nocb_gp_head); 1943 rhp = ACCESS_ONCE(rdp->nocb_gp_head);
@@ -2072,8 +1951,9 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2072 cpu, rhp->func); 1951 cpu, rhp->func);
2073 WARN_ON_ONCE(1); 1952 WARN_ON_ONCE(1);
2074 } 1953 }
1954#endif /* #ifdef CONFIG_PROVE_RCU */
2075 1955
2076 return !!rhp; 1956 return !!ret;
2077} 1957}
2078 1958
2079/* 1959/*
@@ -2095,9 +1975,10 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2095 struct task_struct *t; 1975 struct task_struct *t;
2096 1976
2097 /* Enqueue the callback on the nocb list and update counts. */ 1977 /* Enqueue the callback on the nocb list and update counts. */
1978 atomic_long_add(rhcount, &rdp->nocb_q_count);
1979 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
2098 old_rhpp = xchg(&rdp->nocb_tail, rhtp); 1980 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2099 ACCESS_ONCE(*old_rhpp) = rhp; 1981 ACCESS_ONCE(*old_rhpp) = rhp;
2100 atomic_long_add(rhcount, &rdp->nocb_q_count);
2101 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); 1982 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2102 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ 1983 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
2103 1984
@@ -2288,9 +2169,6 @@ wait_again:
2288 /* Move callbacks to wait-for-GP list, which is empty. */ 2169 /* Move callbacks to wait-for-GP list, which is empty. */
2289 ACCESS_ONCE(rdp->nocb_head) = NULL; 2170 ACCESS_ONCE(rdp->nocb_head) = NULL;
2290 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); 2171 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2291 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
2292 rdp->nocb_gp_count_lazy =
2293 atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2294 gotcbs = true; 2172 gotcbs = true;
2295 } 2173 }
2296 2174
@@ -2338,9 +2216,6 @@ wait_again:
2338 /* Append callbacks to follower's "done" list. */ 2216 /* Append callbacks to follower's "done" list. */
2339 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail); 2217 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
2340 *tail = rdp->nocb_gp_head; 2218 *tail = rdp->nocb_gp_head;
2341 atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
2342 atomic_long_add(rdp->nocb_gp_count_lazy,
2343 &rdp->nocb_follower_count_lazy);
2344 smp_mb__after_atomic(); /* Store *tail before wakeup. */ 2219 smp_mb__after_atomic(); /* Store *tail before wakeup. */
2345 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2220 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2346 /* 2221 /*
@@ -2415,13 +2290,11 @@ static int rcu_nocb_kthread(void *arg)
2415 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); 2290 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
2416 ACCESS_ONCE(rdp->nocb_follower_head) = NULL; 2291 ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
2417 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); 2292 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
2418 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
2419 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
2420 rdp->nocb_p_count += c;
2421 rdp->nocb_p_count_lazy += cl;
2422 2293
2423 /* Each pass through the following loop invokes a callback. */ 2294 /* Each pass through the following loop invokes a callback. */
2424 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1); 2295 trace_rcu_batch_start(rdp->rsp->name,
2296 atomic_long_read(&rdp->nocb_q_count_lazy),
2297 atomic_long_read(&rdp->nocb_q_count), -1);
2425 c = cl = 0; 2298 c = cl = 0;
2426 while (list) { 2299 while (list) {
2427 next = list->next; 2300 next = list->next;
@@ -2443,9 +2316,9 @@ static int rcu_nocb_kthread(void *arg)
2443 list = next; 2316 list = next;
2444 } 2317 }
2445 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); 2318 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2446 ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c; 2319 smp_mb__before_atomic(); /* _add after CB invocation. */
2447 ACCESS_ONCE(rdp->nocb_p_count_lazy) = 2320 atomic_long_add(-c, &rdp->nocb_q_count);
2448 rdp->nocb_p_count_lazy - cl; 2321 atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2449 rdp->n_nocbs_invoked += c; 2322 rdp->n_nocbs_invoked += c;
2450 } 2323 }
2451 return 0; 2324 return 0;
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 5cdc62e1beeb..fbb6240509ea 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -46,6 +46,8 @@
46#define RCU_TREE_NONCORE 46#define RCU_TREE_NONCORE
47#include "tree.h" 47#include "tree.h"
48 48
49DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
50
49static int r_open(struct inode *inode, struct file *file, 51static int r_open(struct inode *inode, struct file *file,
50 const struct seq_operations *op) 52 const struct seq_operations *op)
51{ 53{
@@ -115,11 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
115 117
116 if (!rdp->beenonline) 118 if (!rdp->beenonline)
117 return; 119 return;
118 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d", 120 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d",
119 rdp->cpu, 121 rdp->cpu,
120 cpu_is_offline(rdp->cpu) ? '!' : ' ', 122 cpu_is_offline(rdp->cpu) ? '!' : ' ',
121 ulong2long(rdp->completed), ulong2long(rdp->gpnum), 123 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
122 rdp->passed_quiesce, rdp->qs_pending); 124 rdp->passed_quiesce,
125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
126 rdp->qs_pending);
123 seq_printf(m, " dt=%d/%llx/%d df=%lu", 127 seq_printf(m, " dt=%d/%llx/%d df=%lu",
124 atomic_read(&rdp->dynticks->dynticks), 128 atomic_read(&rdp->dynticks->dynticks),
125 rdp->dynticks->dynticks_nesting, 129 rdp->dynticks->dynticks_nesting,
diff --git a/kernel/resource.c b/kernel/resource.c
index 0bcebffc4e77..19f2357dfda3 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -22,6 +22,7 @@
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/pfn.h> 23#include <linux/pfn.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/resource_ext.h>
25#include <asm/io.h> 26#include <asm/io.h>
26 27
27 28
@@ -1529,6 +1530,30 @@ int iomem_is_exclusive(u64 addr)
1529 return err; 1530 return err;
1530} 1531}
1531 1532
1533struct resource_entry *resource_list_create_entry(struct resource *res,
1534 size_t extra_size)
1535{
1536 struct resource_entry *entry;
1537
1538 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1539 if (entry) {
1540 INIT_LIST_HEAD(&entry->node);
1541 entry->res = res ? res : &entry->__res;
1542 }
1543
1544 return entry;
1545}
1546EXPORT_SYMBOL(resource_list_create_entry);
1547
1548void resource_list_free(struct list_head *head)
1549{
1550 struct resource_entry *entry, *tmp;
1551
1552 list_for_each_entry_safe(entry, tmp, head, node)
1553 resource_list_destroy_entry(entry);
1554}
1555EXPORT_SYMBOL(resource_list_free);
1556
1532static int __init strict_iomem(char *str) 1557static int __init strict_iomem(char *str)
1533{ 1558{
1534 if (strstr(str, "relaxed")) 1559 if (strstr(str, "relaxed"))
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 607f852b4d04..7052d3fd4e7b 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -268,6 +268,15 @@ bool try_wait_for_completion(struct completion *x)
268 unsigned long flags; 268 unsigned long flags;
269 int ret = 1; 269 int ret = 1;
270 270
271 /*
272 * Since x->done will need to be locked only
273 * in the non-blocking case, we check x->done
274 * first without taking the lock so we can
275 * return early in the blocking case.
276 */
277 if (!ACCESS_ONCE(x->done))
278 return 0;
279
271 spin_lock_irqsave(&x->wait.lock, flags); 280 spin_lock_irqsave(&x->wait.lock, flags);
272 if (!x->done) 281 if (!x->done)
273 ret = 0; 282 ret = 0;
@@ -288,13 +297,6 @@ EXPORT_SYMBOL(try_wait_for_completion);
288 */ 297 */
289bool completion_done(struct completion *x) 298bool completion_done(struct completion *x)
290{ 299{
291 unsigned long flags; 300 return !!ACCESS_ONCE(x->done);
292 int ret = 1;
293
294 spin_lock_irqsave(&x->wait.lock, flags);
295 if (!x->done)
296 ret = 0;
297 spin_unlock_irqrestore(&x->wait.lock, flags);
298 return ret;
299} 301}
300EXPORT_SYMBOL(completion_done); 302EXPORT_SYMBOL(completion_done);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5eab11d4b747..1f37fe7f77a4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -119,7 +119,9 @@ void update_rq_clock(struct rq *rq)
119{ 119{
120 s64 delta; 120 s64 delta;
121 121
122 if (rq->skip_clock_update > 0) 122 lockdep_assert_held(&rq->lock);
123
124 if (rq->clock_skip_update & RQCF_ACT_SKIP)
123 return; 125 return;
124 126
125 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 127 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
@@ -490,6 +492,11 @@ static __init void init_hrtick(void)
490 */ 492 */
491void hrtick_start(struct rq *rq, u64 delay) 493void hrtick_start(struct rq *rq, u64 delay)
492{ 494{
495 /*
496 * Don't schedule slices shorter than 10000ns, that just
497 * doesn't make sense. Rely on vruntime for fairness.
498 */
499 delay = max_t(u64, delay, 10000LL);
493 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, 500 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
494 HRTIMER_MODE_REL_PINNED, 0); 501 HRTIMER_MODE_REL_PINNED, 0);
495} 502}
@@ -1046,7 +1053,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1046 * this case, we can save a useless back to back clock update. 1053 * this case, we can save a useless back to back clock update.
1047 */ 1054 */
1048 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1055 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
1049 rq->skip_clock_update = 1; 1056 rq_clock_skip_update(rq, true);
1050} 1057}
1051 1058
1052#ifdef CONFIG_SMP 1059#ifdef CONFIG_SMP
@@ -1082,7 +1089,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1082 if (p->sched_class->migrate_task_rq) 1089 if (p->sched_class->migrate_task_rq)
1083 p->sched_class->migrate_task_rq(p, new_cpu); 1090 p->sched_class->migrate_task_rq(p, new_cpu);
1084 p->se.nr_migrations++; 1091 p->se.nr_migrations++;
1085 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); 1092 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
1086 } 1093 }
1087 1094
1088 __set_task_cpu(p, new_cpu); 1095 __set_task_cpu(p, new_cpu);
@@ -1836,6 +1843,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
1836 p->se.prev_sum_exec_runtime = 0; 1843 p->se.prev_sum_exec_runtime = 0;
1837 p->se.nr_migrations = 0; 1844 p->se.nr_migrations = 0;
1838 p->se.vruntime = 0; 1845 p->se.vruntime = 0;
1846#ifdef CONFIG_SMP
1847 p->se.avg.decay_count = 0;
1848#endif
1839 INIT_LIST_HEAD(&p->se.group_node); 1849 INIT_LIST_HEAD(&p->se.group_node);
1840 1850
1841#ifdef CONFIG_SCHEDSTATS 1851#ifdef CONFIG_SCHEDSTATS
@@ -2755,6 +2765,10 @@ again:
2755 * - explicit schedule() call 2765 * - explicit schedule() call
2756 * - return from syscall or exception to user-space 2766 * - return from syscall or exception to user-space
2757 * - return from interrupt-handler to user-space 2767 * - return from interrupt-handler to user-space
2768 *
2769 * WARNING: all callers must re-check need_resched() afterward and reschedule
2770 * accordingly in case an event triggered the need for rescheduling (such as
2771 * an interrupt waking up a task) while preemption was disabled in __schedule().
2758 */ 2772 */
2759static void __sched __schedule(void) 2773static void __sched __schedule(void)
2760{ 2774{
@@ -2763,7 +2777,6 @@ static void __sched __schedule(void)
2763 struct rq *rq; 2777 struct rq *rq;
2764 int cpu; 2778 int cpu;
2765 2779
2766need_resched:
2767 preempt_disable(); 2780 preempt_disable();
2768 cpu = smp_processor_id(); 2781 cpu = smp_processor_id();
2769 rq = cpu_rq(cpu); 2782 rq = cpu_rq(cpu);
@@ -2783,6 +2796,8 @@ need_resched:
2783 smp_mb__before_spinlock(); 2796 smp_mb__before_spinlock();
2784 raw_spin_lock_irq(&rq->lock); 2797 raw_spin_lock_irq(&rq->lock);
2785 2798
2799 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
2800
2786 switch_count = &prev->nivcsw; 2801 switch_count = &prev->nivcsw;
2787 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 2802 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2788 if (unlikely(signal_pending_state(prev->state, prev))) { 2803 if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2807,13 +2822,13 @@ need_resched:
2807 switch_count = &prev->nvcsw; 2822 switch_count = &prev->nvcsw;
2808 } 2823 }
2809 2824
2810 if (task_on_rq_queued(prev) || rq->skip_clock_update < 0) 2825 if (task_on_rq_queued(prev))
2811 update_rq_clock(rq); 2826 update_rq_clock(rq);
2812 2827
2813 next = pick_next_task(rq, prev); 2828 next = pick_next_task(rq, prev);
2814 clear_tsk_need_resched(prev); 2829 clear_tsk_need_resched(prev);
2815 clear_preempt_need_resched(); 2830 clear_preempt_need_resched();
2816 rq->skip_clock_update = 0; 2831 rq->clock_skip_update = 0;
2817 2832
2818 if (likely(prev != next)) { 2833 if (likely(prev != next)) {
2819 rq->nr_switches++; 2834 rq->nr_switches++;
@@ -2828,8 +2843,6 @@ need_resched:
2828 post_schedule(rq); 2843 post_schedule(rq);
2829 2844
2830 sched_preempt_enable_no_resched(); 2845 sched_preempt_enable_no_resched();
2831 if (need_resched())
2832 goto need_resched;
2833} 2846}
2834 2847
2835static inline void sched_submit_work(struct task_struct *tsk) 2848static inline void sched_submit_work(struct task_struct *tsk)
@@ -2849,7 +2862,9 @@ asmlinkage __visible void __sched schedule(void)
2849 struct task_struct *tsk = current; 2862 struct task_struct *tsk = current;
2850 2863
2851 sched_submit_work(tsk); 2864 sched_submit_work(tsk);
2852 __schedule(); 2865 do {
2866 __schedule();
2867 } while (need_resched());
2853} 2868}
2854EXPORT_SYMBOL(schedule); 2869EXPORT_SYMBOL(schedule);
2855 2870
@@ -2884,6 +2899,21 @@ void __sched schedule_preempt_disabled(void)
2884 preempt_disable(); 2899 preempt_disable();
2885} 2900}
2886 2901
2902static void preempt_schedule_common(void)
2903{
2904 do {
2905 __preempt_count_add(PREEMPT_ACTIVE);
2906 __schedule();
2907 __preempt_count_sub(PREEMPT_ACTIVE);
2908
2909 /*
2910 * Check again in case we missed a preemption opportunity
2911 * between schedule and now.
2912 */
2913 barrier();
2914 } while (need_resched());
2915}
2916
2887#ifdef CONFIG_PREEMPT 2917#ifdef CONFIG_PREEMPT
2888/* 2918/*
2889 * this is the entry point to schedule() from in-kernel preemption 2919 * this is the entry point to schedule() from in-kernel preemption
@@ -2899,17 +2929,7 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
2899 if (likely(!preemptible())) 2929 if (likely(!preemptible()))
2900 return; 2930 return;
2901 2931
2902 do { 2932 preempt_schedule_common();
2903 __preempt_count_add(PREEMPT_ACTIVE);
2904 __schedule();
2905 __preempt_count_sub(PREEMPT_ACTIVE);
2906
2907 /*
2908 * Check again in case we missed a preemption opportunity
2909 * between schedule and now.
2910 */
2911 barrier();
2912 } while (need_resched());
2913} 2933}
2914NOKPROBE_SYMBOL(preempt_schedule); 2934NOKPROBE_SYMBOL(preempt_schedule);
2915EXPORT_SYMBOL(preempt_schedule); 2935EXPORT_SYMBOL(preempt_schedule);
@@ -3405,6 +3425,20 @@ static bool check_same_owner(struct task_struct *p)
3405 return match; 3425 return match;
3406} 3426}
3407 3427
3428static bool dl_param_changed(struct task_struct *p,
3429 const struct sched_attr *attr)
3430{
3431 struct sched_dl_entity *dl_se = &p->dl;
3432
3433 if (dl_se->dl_runtime != attr->sched_runtime ||
3434 dl_se->dl_deadline != attr->sched_deadline ||
3435 dl_se->dl_period != attr->sched_period ||
3436 dl_se->flags != attr->sched_flags)
3437 return true;
3438
3439 return false;
3440}
3441
3408static int __sched_setscheduler(struct task_struct *p, 3442static int __sched_setscheduler(struct task_struct *p,
3409 const struct sched_attr *attr, 3443 const struct sched_attr *attr,
3410 bool user) 3444 bool user)
@@ -3533,7 +3567,7 @@ recheck:
3533 goto change; 3567 goto change;
3534 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 3568 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3535 goto change; 3569 goto change;
3536 if (dl_policy(policy)) 3570 if (dl_policy(policy) && dl_param_changed(p, attr))
3537 goto change; 3571 goto change;
3538 3572
3539 p->sched_reset_on_fork = reset_on_fork; 3573 p->sched_reset_on_fork = reset_on_fork;
@@ -4225,17 +4259,10 @@ SYSCALL_DEFINE0(sched_yield)
4225 return 0; 4259 return 0;
4226} 4260}
4227 4261
4228static void __cond_resched(void)
4229{
4230 __preempt_count_add(PREEMPT_ACTIVE);
4231 __schedule();
4232 __preempt_count_sub(PREEMPT_ACTIVE);
4233}
4234
4235int __sched _cond_resched(void) 4262int __sched _cond_resched(void)
4236{ 4263{
4237 if (should_resched()) { 4264 if (should_resched()) {
4238 __cond_resched(); 4265 preempt_schedule_common();
4239 return 1; 4266 return 1;
4240 } 4267 }
4241 return 0; 4268 return 0;
@@ -4260,7 +4287,7 @@ int __cond_resched_lock(spinlock_t *lock)
4260 if (spin_needbreak(lock) || resched) { 4287 if (spin_needbreak(lock) || resched) {
4261 spin_unlock(lock); 4288 spin_unlock(lock);
4262 if (resched) 4289 if (resched)
4263 __cond_resched(); 4290 preempt_schedule_common();
4264 else 4291 else
4265 cpu_relax(); 4292 cpu_relax();
4266 ret = 1; 4293 ret = 1;
@@ -4276,7 +4303,7 @@ int __sched __cond_resched_softirq(void)
4276 4303
4277 if (should_resched()) { 4304 if (should_resched()) {
4278 local_bh_enable(); 4305 local_bh_enable();
4279 __cond_resched(); 4306 preempt_schedule_common();
4280 local_bh_disable(); 4307 local_bh_disable();
4281 return 1; 4308 return 1;
4282 } 4309 }
@@ -4531,9 +4558,10 @@ void sched_show_task(struct task_struct *p)
4531{ 4558{
4532 unsigned long free = 0; 4559 unsigned long free = 0;
4533 int ppid; 4560 int ppid;
4534 unsigned state; 4561 unsigned long state = p->state;
4535 4562
4536 state = p->state ? __ffs(p->state) + 1 : 0; 4563 if (state)
4564 state = __ffs(state) + 1;
4537 printk(KERN_INFO "%-15.15s %c", p->comm, 4565 printk(KERN_INFO "%-15.15s %c", p->comm,
4538 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 4566 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4539#if BITS_PER_LONG == 32 4567#if BITS_PER_LONG == 32
@@ -4766,7 +4794,7 @@ static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
4766 4794
4767void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 4795void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4768{ 4796{
4769 if (p->sched_class && p->sched_class->set_cpus_allowed) 4797 if (p->sched_class->set_cpus_allowed)
4770 p->sched_class->set_cpus_allowed(p, new_mask); 4798 p->sched_class->set_cpus_allowed(p, new_mask);
4771 4799
4772 cpumask_copy(&p->cpus_allowed, new_mask); 4800 cpumask_copy(&p->cpus_allowed, new_mask);
@@ -7276,6 +7304,11 @@ void __init sched_init(void)
7276 enter_lazy_tlb(&init_mm, current); 7304 enter_lazy_tlb(&init_mm, current);
7277 7305
7278 /* 7306 /*
7307 * During early bootup we pretend to be a normal task:
7308 */
7309 current->sched_class = &fair_sched_class;
7310
7311 /*
7279 * Make us the idle thread. Technically, schedule() should not be 7312 * Make us the idle thread. Technically, schedule() should not be
7280 * called from this thread, however somewhere below it might be, 7313 * called from this thread, however somewhere below it might be,
7281 * but because we are the idle thread, we just pick up running again 7314 * but because we are the idle thread, we just pick up running again
@@ -7285,11 +7318,6 @@ void __init sched_init(void)
7285 7318
7286 calc_load_update = jiffies + LOAD_FREQ; 7319 calc_load_update = jiffies + LOAD_FREQ;
7287 7320
7288 /*
7289 * During early bootup we pretend to be a normal task:
7290 */
7291 current->sched_class = &fair_sched_class;
7292
7293#ifdef CONFIG_SMP 7321#ifdef CONFIG_SMP
7294 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 7322 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7295 /* May be allocated at isolcpus cmdline parse time */ 7323 /* May be allocated at isolcpus cmdline parse time */
@@ -7350,6 +7378,9 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
7350 in_atomic(), irqs_disabled(), 7378 in_atomic(), irqs_disabled(),
7351 current->pid, current->comm); 7379 current->pid, current->comm);
7352 7380
7381 if (task_stack_end_corrupted(current))
7382 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7383
7353 debug_show_held_locks(current); 7384 debug_show_held_locks(current);
7354 if (irqs_disabled()) 7385 if (irqs_disabled())
7355 print_irqtrace_events(current); 7386 print_irqtrace_events(current);
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 539ca3ce071b..c6acb07466bb 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -107,7 +107,8 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
107 int best_cpu = -1; 107 int best_cpu = -1;
108 const struct sched_dl_entity *dl_se = &p->dl; 108 const struct sched_dl_entity *dl_se = &p->dl;
109 109
110 if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) { 110 if (later_mask &&
111 cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
111 best_cpu = cpumask_any(later_mask); 112 best_cpu = cpumask_any(later_mask);
112 goto out; 113 goto out;
113 } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && 114 } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
@@ -186,6 +187,26 @@ out:
186} 187}
187 188
188/* 189/*
190 * cpudl_set_freecpu - Set the cpudl.free_cpus
191 * @cp: the cpudl max-heap context
192 * @cpu: rd attached cpu
193 */
194void cpudl_set_freecpu(struct cpudl *cp, int cpu)
195{
196 cpumask_set_cpu(cpu, cp->free_cpus);
197}
198
199/*
200 * cpudl_clear_freecpu - Clear the cpudl.free_cpus
201 * @cp: the cpudl max-heap context
202 * @cpu: rd attached cpu
203 */
204void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
205{
206 cpumask_clear_cpu(cpu, cp->free_cpus);
207}
208
209/*
189 * cpudl_init - initialize the cpudl structure 210 * cpudl_init - initialize the cpudl structure
190 * @cp: the cpudl max-heap context 211 * @cp: the cpudl max-heap context
191 */ 212 */
@@ -203,7 +224,7 @@ int cpudl_init(struct cpudl *cp)
203 if (!cp->elements) 224 if (!cp->elements)
204 return -ENOMEM; 225 return -ENOMEM;
205 226
206 if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) { 227 if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
207 kfree(cp->elements); 228 kfree(cp->elements);
208 return -ENOMEM; 229 return -ENOMEM;
209 } 230 }
@@ -211,8 +232,6 @@ int cpudl_init(struct cpudl *cp)
211 for_each_possible_cpu(i) 232 for_each_possible_cpu(i)
212 cp->elements[i].idx = IDX_INVALID; 233 cp->elements[i].idx = IDX_INVALID;
213 234
214 cpumask_setall(cp->free_cpus);
215
216 return 0; 235 return 0;
217} 236}
218 237
diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h
index 020039bd1326..1a0a6ef2fbe1 100644
--- a/kernel/sched/cpudeadline.h
+++ b/kernel/sched/cpudeadline.h
@@ -24,6 +24,8 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
24 struct cpumask *later_mask); 24 struct cpumask *later_mask);
25void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid); 25void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
26int cpudl_init(struct cpudl *cp); 26int cpudl_init(struct cpudl *cp);
27void cpudl_set_freecpu(struct cpudl *cp, int cpu);
28void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
27void cpudl_cleanup(struct cpudl *cp); 29void cpudl_cleanup(struct cpudl *cp);
28#endif /* CONFIG_SMP */ 30#endif /* CONFIG_SMP */
29 31
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 726470d47f87..a027799ae130 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
350 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 350 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
351 dl_se->runtime = pi_se->dl_runtime; 351 dl_se->runtime = pi_se->dl_runtime;
352 } 352 }
353
354 if (dl_se->dl_yielded)
355 dl_se->dl_yielded = 0;
356 if (dl_se->dl_throttled)
357 dl_se->dl_throttled = 0;
353} 358}
354 359
355/* 360/*
@@ -536,23 +541,19 @@ again:
536 541
537 sched_clock_tick(); 542 sched_clock_tick();
538 update_rq_clock(rq); 543 update_rq_clock(rq);
539 dl_se->dl_throttled = 0; 544 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
540 dl_se->dl_yielded = 0; 545 if (dl_task(rq->curr))
541 if (task_on_rq_queued(p)) { 546 check_preempt_curr_dl(rq, p, 0);
542 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 547 else
543 if (dl_task(rq->curr)) 548 resched_curr(rq);
544 check_preempt_curr_dl(rq, p, 0);
545 else
546 resched_curr(rq);
547#ifdef CONFIG_SMP 549#ifdef CONFIG_SMP
548 /* 550 /*
549 * Queueing this task back might have overloaded rq, 551 * Queueing this task back might have overloaded rq,
550 * check if we need to kick someone away. 552 * check if we need to kick someone away.
551 */ 553 */
552 if (has_pushable_dl_tasks(rq)) 554 if (has_pushable_dl_tasks(rq))
553 push_dl_task(rq); 555 push_dl_task(rq);
554#endif 556#endif
555 }
556unlock: 557unlock:
557 raw_spin_unlock(&rq->lock); 558 raw_spin_unlock(&rq->lock);
558 559
@@ -613,10 +614,9 @@ static void update_curr_dl(struct rq *rq)
613 614
614 dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; 615 dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
615 if (dl_runtime_exceeded(rq, dl_se)) { 616 if (dl_runtime_exceeded(rq, dl_se)) {
617 dl_se->dl_throttled = 1;
616 __dequeue_task_dl(rq, curr, 0); 618 __dequeue_task_dl(rq, curr, 0);
617 if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) 619 if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
618 dl_se->dl_throttled = 1;
619 else
620 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); 620 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
621 621
622 if (!is_leftmost(curr, &rq->dl)) 622 if (!is_leftmost(curr, &rq->dl))
@@ -853,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
853 * its rq, the bandwidth timer callback (which clearly has not 853 * its rq, the bandwidth timer callback (which clearly has not
854 * run yet) will take care of this. 854 * run yet) will take care of this.
855 */ 855 */
856 if (p->dl.dl_throttled) 856 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
857 return; 857 return;
858 858
859 enqueue_dl_entity(&p->dl, pi_se, flags); 859 enqueue_dl_entity(&p->dl, pi_se, flags);
@@ -1073,7 +1073,13 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1073{ 1073{
1074 update_curr_dl(rq); 1074 update_curr_dl(rq);
1075 1075
1076 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0) 1076 /*
1077 * Even when we have runtime, update_curr_dl() might have resulted in us
1078 * not being the leftmost task anymore. In that case NEED_RESCHED will
1079 * be set and schedule() will start a new hrtick for the next task.
1080 */
1081 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1082 is_leftmost(p, &rq->dl))
1077 start_hrtick_dl(rq, p); 1083 start_hrtick_dl(rq, p);
1078} 1084}
1079 1085
@@ -1166,9 +1172,6 @@ static int find_later_rq(struct task_struct *task)
1166 * We have to consider system topology and task affinity 1172 * We have to consider system topology and task affinity
1167 * first, then we can look for a suitable cpu. 1173 * first, then we can look for a suitable cpu.
1168 */ 1174 */
1169 cpumask_copy(later_mask, task_rq(task)->rd->span);
1170 cpumask_and(later_mask, later_mask, cpu_active_mask);
1171 cpumask_and(later_mask, later_mask, &task->cpus_allowed);
1172 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, 1175 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1173 task, later_mask); 1176 task, later_mask);
1174 if (best_cpu == -1) 1177 if (best_cpu == -1)
@@ -1563,6 +1566,7 @@ static void rq_online_dl(struct rq *rq)
1563 if (rq->dl.overloaded) 1566 if (rq->dl.overloaded)
1564 dl_set_overload(rq); 1567 dl_set_overload(rq);
1565 1568
1569 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1566 if (rq->dl.dl_nr_running > 0) 1570 if (rq->dl.dl_nr_running > 0)
1567 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); 1571 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1568} 1572}
@@ -1574,6 +1578,7 @@ static void rq_offline_dl(struct rq *rq)
1574 dl_clear_overload(rq); 1578 dl_clear_overload(rq);
1575 1579
1576 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 1580 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1581 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1577} 1582}
1578 1583
1579void init_sched_dl_class(void) 1584void init_sched_dl_class(void)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 92cc52001e74..8baaf858d25c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -305,6 +305,7 @@ do { \
305 PN(next_balance); 305 PN(next_balance);
306 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); 306 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
307 PN(clock); 307 PN(clock);
308 PN(clock_task);
308 P(cpu_load[0]); 309 P(cpu_load[0]);
309 P(cpu_load[1]); 310 P(cpu_load[1]);
310 P(cpu_load[2]); 311 P(cpu_load[2]);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fe331fc391f5..7ce18f3c097a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -676,7 +676,6 @@ void init_task_runnable_average(struct task_struct *p)
676{ 676{
677 u32 slice; 677 u32 slice;
678 678
679 p->se.avg.decay_count = 0;
680 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10; 679 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
681 p->se.avg.runnable_avg_sum = slice; 680 p->se.avg.runnable_avg_sum = slice;
682 p->se.avg.runnable_avg_period = slice; 681 p->se.avg.runnable_avg_period = slice;
@@ -2574,11 +2573,11 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
2574 u64 decays = atomic64_read(&cfs_rq->decay_counter); 2573 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2575 2574
2576 decays -= se->avg.decay_count; 2575 decays -= se->avg.decay_count;
2576 se->avg.decay_count = 0;
2577 if (!decays) 2577 if (!decays)
2578 return 0; 2578 return 0;
2579 2579
2580 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays); 2580 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2581 se->avg.decay_count = 0;
2582 2581
2583 return decays; 2582 return decays;
2584} 2583}
@@ -5157,7 +5156,7 @@ static void yield_task_fair(struct rq *rq)
5157 * so we don't do microscopic update in schedule() 5156 * so we don't do microscopic update in schedule()
5158 * and double the fastpath cost. 5157 * and double the fastpath cost.
5159 */ 5158 */
5160 rq->skip_clock_update = 1; 5159 rq_clock_skip_update(rq, true);
5161 } 5160 }
5162 5161
5163 set_skip_buddy(se); 5162 set_skip_buddy(se);
@@ -5949,8 +5948,8 @@ static unsigned long scale_rt_capacity(int cpu)
5949 */ 5948 */
5950 age_stamp = ACCESS_ONCE(rq->age_stamp); 5949 age_stamp = ACCESS_ONCE(rq->age_stamp);
5951 avg = ACCESS_ONCE(rq->rt_avg); 5950 avg = ACCESS_ONCE(rq->rt_avg);
5951 delta = __rq_clock_broken(rq) - age_stamp;
5952 5952
5953 delta = rq_clock(rq) - age_stamp;
5954 if (unlikely(delta < 0)) 5953 if (unlikely(delta < 0))
5955 delta = 0; 5954 delta = 0;
5956 5955
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index c47fce75e666..aaf1c1d5cf5d 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -47,7 +47,8 @@ static inline int cpu_idle_poll(void)
47 rcu_idle_enter(); 47 rcu_idle_enter();
48 trace_cpu_idle_rcuidle(0, smp_processor_id()); 48 trace_cpu_idle_rcuidle(0, smp_processor_id());
49 local_irq_enable(); 49 local_irq_enable();
50 while (!tif_need_resched()) 50 while (!tif_need_resched() &&
51 (cpu_idle_force_poll || tick_check_broadcast_expired()))
51 cpu_relax(); 52 cpu_relax();
52 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 53 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
53 rcu_idle_exit(); 54 rcu_idle_exit();
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ee15f5a0d1c1..f4d4b077eba0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -831,11 +831,14 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
831 enqueue = 1; 831 enqueue = 1;
832 832
833 /* 833 /*
834 * Force a clock update if the CPU was idle, 834 * When we're idle and a woken (rt) task is
835 * lest wakeup -> unthrottle time accumulate. 835 * throttled check_preempt_curr() will set
836 * skip_update and the time between the wakeup
837 * and this unthrottle will get accounted as
838 * 'runtime'.
836 */ 839 */
837 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 840 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
838 rq->skip_clock_update = -1; 841 rq_clock_skip_update(rq, false);
839 } 842 }
840 if (rt_rq->rt_time || rt_rq->rt_nr_running) 843 if (rt_rq->rt_time || rt_rq->rt_nr_running)
841 idle = 0; 844 idle = 0;
@@ -1337,7 +1340,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1337 curr->prio <= p->prio)) { 1340 curr->prio <= p->prio)) {
1338 int target = find_lowest_rq(p); 1341 int target = find_lowest_rq(p);
1339 1342
1340 if (target != -1) 1343 /*
1344 * Don't bother moving it if the destination CPU is
1345 * not running a lower priority task.
1346 */
1347 if (target != -1 &&
1348 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1341 cpu = target; 1349 cpu = target;
1342 } 1350 }
1343 rcu_read_unlock(); 1351 rcu_read_unlock();
@@ -1614,6 +1622,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1614 1622
1615 lowest_rq = cpu_rq(cpu); 1623 lowest_rq = cpu_rq(cpu);
1616 1624
1625 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1626 /*
1627 * Target rq has tasks of equal or higher priority,
1628 * retrying does not release any lock and is unlikely
1629 * to yield a different result.
1630 */
1631 lowest_rq = NULL;
1632 break;
1633 }
1634
1617 /* if the prio of this runqueue changed, try again */ 1635 /* if the prio of this runqueue changed, try again */
1618 if (double_lock_balance(rq, lowest_rq)) { 1636 if (double_lock_balance(rq, lowest_rq)) {
1619 /* 1637 /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9a2a45c970e7..0870db23d79c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -558,8 +558,6 @@ struct rq {
558#ifdef CONFIG_NO_HZ_FULL 558#ifdef CONFIG_NO_HZ_FULL
559 unsigned long last_sched_tick; 559 unsigned long last_sched_tick;
560#endif 560#endif
561 int skip_clock_update;
562
563 /* capture load from *all* tasks on this cpu: */ 561 /* capture load from *all* tasks on this cpu: */
564 struct load_weight load; 562 struct load_weight load;
565 unsigned long nr_load_updates; 563 unsigned long nr_load_updates;
@@ -588,6 +586,7 @@ struct rq {
588 unsigned long next_balance; 586 unsigned long next_balance;
589 struct mm_struct *prev_mm; 587 struct mm_struct *prev_mm;
590 588
589 unsigned int clock_skip_update;
591 u64 clock; 590 u64 clock;
592 u64 clock_task; 591 u64 clock_task;
593 592
@@ -687,16 +686,35 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
687#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 686#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
688#define raw_rq() raw_cpu_ptr(&runqueues) 687#define raw_rq() raw_cpu_ptr(&runqueues)
689 688
689static inline u64 __rq_clock_broken(struct rq *rq)
690{
691 return ACCESS_ONCE(rq->clock);
692}
693
690static inline u64 rq_clock(struct rq *rq) 694static inline u64 rq_clock(struct rq *rq)
691{ 695{
696 lockdep_assert_held(&rq->lock);
692 return rq->clock; 697 return rq->clock;
693} 698}
694 699
695static inline u64 rq_clock_task(struct rq *rq) 700static inline u64 rq_clock_task(struct rq *rq)
696{ 701{
702 lockdep_assert_held(&rq->lock);
697 return rq->clock_task; 703 return rq->clock_task;
698} 704}
699 705
706#define RQCF_REQ_SKIP 0x01
707#define RQCF_ACT_SKIP 0x02
708
709static inline void rq_clock_skip_update(struct rq *rq, bool skip)
710{
711 lockdep_assert_held(&rq->lock);
712 if (skip)
713 rq->clock_skip_update |= RQCF_REQ_SKIP;
714 else
715 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
716}
717
700#ifdef CONFIG_NUMA 718#ifdef CONFIG_NUMA
701enum numa_topology_type { 719enum numa_topology_type {
702 NUMA_DIRECT, 720 NUMA_DIRECT,
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 501baa9ac1be..479e4436f787 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -114,8 +114,12 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
114 trace_softirqs_off(ip); 114 trace_softirqs_off(ip);
115 raw_local_irq_restore(flags); 115 raw_local_irq_restore(flags);
116 116
117 if (preempt_count() == cnt) 117 if (preempt_count() == cnt) {
118#ifdef CONFIG_DEBUG_PREEMPT
119 current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1);
120#endif
118 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 121 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
122 }
119} 123}
120EXPORT_SYMBOL(__local_bh_disable_ip); 124EXPORT_SYMBOL(__local_bh_disable_ip);
121#endif /* CONFIG_TRACE_IRQFLAGS */ 125#endif /* CONFIG_TRACE_IRQFLAGS */
@@ -656,9 +660,8 @@ static void run_ksoftirqd(unsigned int cpu)
656 * in the task stack here. 660 * in the task stack here.
657 */ 661 */
658 __do_softirq(); 662 __do_softirq();
659 rcu_note_context_switch();
660 local_irq_enable(); 663 local_irq_enable();
661 cond_resched(); 664 cond_resched_rcu_qs();
662 return; 665 return;
663 } 666 }
664 local_irq_enable(); 667 local_irq_enable();
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index d8c724cda37b..3f5e183c3d97 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -266,7 +266,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
266/* 266/*
267 * Divide a ktime value by a nanosecond value 267 * Divide a ktime value by a nanosecond value
268 */ 268 */
269u64 ktime_divns(const ktime_t kt, s64 div) 269u64 __ktime_divns(const ktime_t kt, s64 div)
270{ 270{
271 u64 dclc; 271 u64 dclc;
272 int sft = 0; 272 int sft = 0;
@@ -282,7 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div)
282 282
283 return dclc; 283 return dclc;
284} 284}
285EXPORT_SYMBOL_GPL(ktime_divns); 285EXPORT_SYMBOL_GPL(__ktime_divns);
286#endif /* BITS_PER_LONG >= 64 */ 286#endif /* BITS_PER_LONG >= 64 */
287 287
288/* 288/*
@@ -440,6 +440,37 @@ static inline void debug_deactivate(struct hrtimer *timer)
440 trace_hrtimer_cancel(timer); 440 trace_hrtimer_cancel(timer);
441} 441}
442 442
443#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
444static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
445{
446 struct hrtimer_clock_base *base = cpu_base->clock_base;
447 ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
448 int i;
449
450 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
451 struct timerqueue_node *next;
452 struct hrtimer *timer;
453
454 next = timerqueue_getnext(&base->active);
455 if (!next)
456 continue;
457
458 timer = container_of(next, struct hrtimer, node);
459 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
460 if (expires.tv64 < expires_next.tv64)
461 expires_next = expires;
462 }
463 /*
464 * clock_was_set() might have changed base->offset of any of
465 * the clock bases so the result might be negative. Fix it up
466 * to prevent a false positive in clockevents_program_event().
467 */
468 if (expires_next.tv64 < 0)
469 expires_next.tv64 = 0;
470 return expires_next;
471}
472#endif
473
443/* High resolution timer related functions */ 474/* High resolution timer related functions */
444#ifdef CONFIG_HIGH_RES_TIMERS 475#ifdef CONFIG_HIGH_RES_TIMERS
445 476
@@ -488,32 +519,7 @@ static inline int hrtimer_hres_active(void)
488static void 519static void
489hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) 520hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
490{ 521{
491 int i; 522 ktime_t expires_next = __hrtimer_get_next_event(cpu_base);
492 struct hrtimer_clock_base *base = cpu_base->clock_base;
493 ktime_t expires, expires_next;
494
495 expires_next.tv64 = KTIME_MAX;
496
497 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
498 struct hrtimer *timer;
499 struct timerqueue_node *next;
500
501 next = timerqueue_getnext(&base->active);
502 if (!next)
503 continue;
504 timer = container_of(next, struct hrtimer, node);
505
506 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
507 /*
508 * clock_was_set() has changed base->offset so the
509 * result might be negative. Fix it up to prevent a
510 * false positive in clockevents_program_event()
511 */
512 if (expires.tv64 < 0)
513 expires.tv64 = 0;
514 if (expires.tv64 < expires_next.tv64)
515 expires_next = expires;
516 }
517 523
518 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) 524 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
519 return; 525 return;
@@ -587,6 +593,15 @@ static int hrtimer_reprogram(struct hrtimer *timer,
587 return 0; 593 return 0;
588 594
589 /* 595 /*
596 * When the target cpu of the timer is currently executing
597 * hrtimer_interrupt(), then we do not touch the clock event
598 * device. hrtimer_interrupt() will reevaluate all clock bases
599 * before reprogramming the device.
600 */
601 if (cpu_base->in_hrtirq)
602 return 0;
603
604 /*
590 * If a hang was detected in the last timer interrupt then we 605 * If a hang was detected in the last timer interrupt then we
591 * do not schedule a timer which is earlier than the expiry 606 * do not schedule a timer which is earlier than the expiry
592 * which we enforced in the hang detection. We want the system 607 * which we enforced in the hang detection. We want the system
@@ -1104,29 +1119,14 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1104ktime_t hrtimer_get_next_event(void) 1119ktime_t hrtimer_get_next_event(void)
1105{ 1120{
1106 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); 1121 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1107 struct hrtimer_clock_base *base = cpu_base->clock_base; 1122 ktime_t mindelta = { .tv64 = KTIME_MAX };
1108 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
1109 unsigned long flags; 1123 unsigned long flags;
1110 int i;
1111 1124
1112 raw_spin_lock_irqsave(&cpu_base->lock, flags); 1125 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1113 1126
1114 if (!hrtimer_hres_active()) { 1127 if (!hrtimer_hres_active())
1115 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { 1128 mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base),
1116 struct hrtimer *timer; 1129 ktime_get());
1117 struct timerqueue_node *next;
1118
1119 next = timerqueue_getnext(&base->active);
1120 if (!next)
1121 continue;
1122
1123 timer = container_of(next, struct hrtimer, node);
1124 delta.tv64 = hrtimer_get_expires_tv64(timer);
1125 delta = ktime_sub(delta, base->get_time());
1126 if (delta.tv64 < mindelta.tv64)
1127 mindelta.tv64 = delta.tv64;
1128 }
1129 }
1130 1130
1131 raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1131 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1132 1132
@@ -1253,7 +1253,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1253 raw_spin_lock(&cpu_base->lock); 1253 raw_spin_lock(&cpu_base->lock);
1254 entry_time = now = hrtimer_update_base(cpu_base); 1254 entry_time = now = hrtimer_update_base(cpu_base);
1255retry: 1255retry:
1256 expires_next.tv64 = KTIME_MAX; 1256 cpu_base->in_hrtirq = 1;
1257 /* 1257 /*
1258 * We set expires_next to KTIME_MAX here with cpu_base->lock 1258 * We set expires_next to KTIME_MAX here with cpu_base->lock
1259 * held to prevent that a timer is enqueued in our queue via 1259 * held to prevent that a timer is enqueued in our queue via
@@ -1291,28 +1291,20 @@ retry:
1291 * are right-of a not yet expired timer, because that 1291 * are right-of a not yet expired timer, because that
1292 * timer will have to trigger a wakeup anyway. 1292 * timer will have to trigger a wakeup anyway.
1293 */ 1293 */
1294 1294 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
1295 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
1296 ktime_t expires;
1297
1298 expires = ktime_sub(hrtimer_get_expires(timer),
1299 base->offset);
1300 if (expires.tv64 < 0)
1301 expires.tv64 = KTIME_MAX;
1302 if (expires.tv64 < expires_next.tv64)
1303 expires_next = expires;
1304 break; 1295 break;
1305 }
1306 1296
1307 __run_hrtimer(timer, &basenow); 1297 __run_hrtimer(timer, &basenow);
1308 } 1298 }
1309 } 1299 }
1310 1300 /* Reevaluate the clock bases for the next expiry */
1301 expires_next = __hrtimer_get_next_event(cpu_base);
1311 /* 1302 /*
1312 * Store the new expiry value so the migration code can verify 1303 * Store the new expiry value so the migration code can verify
1313 * against it. 1304 * against it.
1314 */ 1305 */
1315 cpu_base->expires_next = expires_next; 1306 cpu_base->expires_next = expires_next;
1307 cpu_base->in_hrtirq = 0;
1316 raw_spin_unlock(&cpu_base->lock); 1308 raw_spin_unlock(&cpu_base->lock);
1317 1309
1318 /* Reprogramming necessary ? */ 1310 /* Reprogramming necessary ? */
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 28bf91c60a0b..4b585e0fdd22 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -488,13 +488,13 @@ static void sync_cmos_clock(struct work_struct *work)
488 488
489 getnstimeofday64(&now); 489 getnstimeofday64(&now);
490 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { 490 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
491 struct timespec adjust = timespec64_to_timespec(now); 491 struct timespec64 adjust = now;
492 492
493 fail = -ENODEV; 493 fail = -ENODEV;
494 if (persistent_clock_is_local) 494 if (persistent_clock_is_local)
495 adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); 495 adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
496#ifdef CONFIG_GENERIC_CMOS_UPDATE 496#ifdef CONFIG_GENERIC_CMOS_UPDATE
497 fail = update_persistent_clock(adjust); 497 fail = update_persistent_clock(timespec64_to_timespec(adjust));
498#endif 498#endif
499#ifdef CONFIG_RTC_SYSTOHC 499#ifdef CONFIG_RTC_SYSTOHC
500 if (fail == -ENODEV) 500 if (fail == -ENODEV)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6a931852082f..b124af259800 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1659,24 +1659,24 @@ out:
1659} 1659}
1660 1660
1661/** 1661/**
1662 * getboottime - Return the real time of system boot. 1662 * getboottime64 - Return the real time of system boot.
1663 * @ts: pointer to the timespec to be set 1663 * @ts: pointer to the timespec64 to be set
1664 * 1664 *
1665 * Returns the wall-time of boot in a timespec. 1665 * Returns the wall-time of boot in a timespec64.
1666 * 1666 *
1667 * This is based on the wall_to_monotonic offset and the total suspend 1667 * This is based on the wall_to_monotonic offset and the total suspend
1668 * time. Calls to settimeofday will affect the value returned (which 1668 * time. Calls to settimeofday will affect the value returned (which
1669 * basically means that however wrong your real time clock is at boot time, 1669 * basically means that however wrong your real time clock is at boot time,
1670 * you get the right time here). 1670 * you get the right time here).
1671 */ 1671 */
1672void getboottime(struct timespec *ts) 1672void getboottime64(struct timespec64 *ts)
1673{ 1673{
1674 struct timekeeper *tk = &tk_core.timekeeper; 1674 struct timekeeper *tk = &tk_core.timekeeper;
1675 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); 1675 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
1676 1676
1677 *ts = ktime_to_timespec(t); 1677 *ts = ktime_to_timespec64(t);
1678} 1678}
1679EXPORT_SYMBOL_GPL(getboottime); 1679EXPORT_SYMBOL_GPL(getboottime64);
1680 1680
1681unsigned long get_seconds(void) 1681unsigned long get_seconds(void)
1682{ 1682{
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index 1c71382b283d..eb4220a132ec 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -13,5 +13,6 @@
13#define CREATE_TRACE_POINTS 13#define CREATE_TRACE_POINTS
14#include <trace/events/power.h> 14#include <trace/events/power.h>
15 15
16EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
16EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle); 17EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
17 18
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 4b9c114ee9de..6fa484de2ba1 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -261,7 +261,7 @@ void perf_trace_del(struct perf_event *p_event, int flags)
261} 261}
262 262
263void *perf_trace_buf_prepare(int size, unsigned short type, 263void *perf_trace_buf_prepare(int size, unsigned short type,
264 struct pt_regs *regs, int *rctxp) 264 struct pt_regs **regs, int *rctxp)
265{ 265{
266 struct trace_entry *entry; 266 struct trace_entry *entry;
267 unsigned long flags; 267 unsigned long flags;
@@ -280,6 +280,8 @@ void *perf_trace_buf_prepare(int size, unsigned short type,
280 if (*rctxp < 0) 280 if (*rctxp < 0)
281 return NULL; 281 return NULL;
282 282
283 if (regs)
284 *regs = this_cpu_ptr(&__perf_regs[*rctxp]);
283 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]); 285 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
284 286
285 /* zero the dead bytes from align to not leak stack to user */ 287 /* zero the dead bytes from align to not leak stack to user */
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5edb518be345..296079ae6583 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1148,7 +1148,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1148 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1148 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1149 size -= sizeof(u32); 1149 size -= sizeof(u32);
1150 1150
1151 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1151 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1152 if (!entry) 1152 if (!entry)
1153 return; 1153 return;
1154 1154
@@ -1179,7 +1179,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1179 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1179 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1180 size -= sizeof(u32); 1180 size -= sizeof(u32);
1181 1181
1182 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1182 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1183 if (!entry) 1183 if (!entry)
1184 return; 1184 return;
1185 1185
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index c6ee36fcbf90..f97f6e3a676c 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -574,7 +574,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
574 size -= sizeof(u32); 574 size -= sizeof(u32);
575 575
576 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, 576 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
577 sys_data->enter_event->event.type, regs, &rctx); 577 sys_data->enter_event->event.type, NULL, &rctx);
578 if (!rec) 578 if (!rec)
579 return; 579 return;
580 580
@@ -647,7 +647,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
647 size -= sizeof(u32); 647 size -= sizeof(u32);
648 648
649 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, 649 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
650 sys_data->exit_event->event.type, regs, &rctx); 650 sys_data->exit_event->event.type, NULL, &rctx);
651 if (!rec) 651 if (!rec)
652 return; 652 return;
653 653
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 8520acc34b18..b11441321e7a 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1111,7 +1111,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
1111 if (hlist_empty(head)) 1111 if (hlist_empty(head))
1112 goto out; 1112 goto out;
1113 1113
1114 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); 1114 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1115 if (!entry) 1115 if (!entry)
1116 goto out; 1116 goto out;
1117 1117
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5f2ce616c046..a2ca213c71ca 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1215,6 +1215,7 @@ config RCU_TORTURE_TEST
1215 tristate "torture tests for RCU" 1215 tristate "torture tests for RCU"
1216 depends on DEBUG_KERNEL 1216 depends on DEBUG_KERNEL
1217 select TORTURE_TEST 1217 select TORTURE_TEST
1218 select SRCU
1218 default n 1219 default n
1219 help 1220 help
1220 This option provides a kernel module that runs torture tests 1221 This option provides a kernel module that runs torture tests
@@ -1257,7 +1258,7 @@ config RCU_CPU_STALL_TIMEOUT
1257config RCU_CPU_STALL_INFO 1258config RCU_CPU_STALL_INFO
1258 bool "Print additional diagnostics on RCU CPU stall" 1259 bool "Print additional diagnostics on RCU CPU stall"
1259 depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL 1260 depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
1260 default n 1261 default y
1261 help 1262 help
1262 For each stalled CPU that is aware of the current RCU grace 1263 For each stalled CPU that is aware of the current RCU grace
1263 period, print out additional per-CPU diagnostic information 1264 period, print out additional per-CPU diagnostic information
diff --git a/mm/Kconfig b/mm/Kconfig
index 1d1ae6b078fd..4395b12869c8 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -325,6 +325,7 @@ config VIRT_TO_BUS
325 325
326config MMU_NOTIFIER 326config MMU_NOTIFIER
327 bool 327 bool
328 select SRCU
328 329
329config KSM 330config KSM
330 bool "Enable KSM for page merging" 331 bool "Enable KSM for page merging"
diff --git a/mm/memory.c b/mm/memory.c
index 2c3536cc6c63..d707c4dfbbb4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -754,6 +754,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
754 if (HAVE_PTE_SPECIAL) { 754 if (HAVE_PTE_SPECIAL) {
755 if (likely(!pte_special(pte))) 755 if (likely(!pte_special(pte)))
756 goto check_pfn; 756 goto check_pfn;
757 if (vma->vm_ops && vma->vm_ops->find_special_page)
758 return vma->vm_ops->find_special_page(vma, addr);
757 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 759 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
758 return NULL; 760 return NULL;
759 if (!is_zero_pfn(pfn)) 761 if (!is_zero_pfn(pfn))
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index 8eb779b9d77f..604e718d68d3 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -5,6 +5,7 @@ config SECURITY_TOMOYO
5 select SECURITYFS 5 select SECURITYFS
6 select SECURITY_PATH 6 select SECURITY_PATH
7 select SECURITY_NETWORK 7 select SECURITY_NETWORK
8 select SRCU
8 default n 9 default n
9 help 10 help
10 This selects TOMOYO Linux, pathname-based access control. 11 This selects TOMOYO Linux, pathname-based access control.
diff --git a/sound/oss/dmasound/dmasound_atari.c b/sound/oss/dmasound/dmasound_atari.c
index 13c214466d3b..1c56bf58eff9 100644
--- a/sound/oss/dmasound/dmasound_atari.c
+++ b/sound/oss/dmasound/dmasound_atari.c
@@ -851,7 +851,7 @@ static int __init AtaIrqInit(void)
851 st_mfp.tim_dt_a = 1; /* Cause interrupt after first event. */ 851 st_mfp.tim_dt_a = 1; /* Cause interrupt after first event. */
852 st_mfp.tim_ct_a = 8; /* Turn on event counting. */ 852 st_mfp.tim_ct_a = 8; /* Turn on event counting. */
853 /* Register interrupt handler. */ 853 /* Register interrupt handler. */
854 if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, IRQ_TYPE_SLOW, "DMA sound", 854 if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, 0, "DMA sound",
855 AtaInterrupt)) 855 AtaInterrupt))
856 return 0; 856 return 0;
857 st_mfp.int_en_a |= 0x20; /* Turn interrupt on. */ 857 st_mfp.int_en_a |= 0x20; /* Turn interrupt on. */
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
index 86ea2d7b8845..d2b18e887071 100644
--- a/tools/lib/api/fs/debugfs.c
+++ b/tools/lib/api/fs/debugfs.c
@@ -1,3 +1,4 @@
1#define _GNU_SOURCE
1#include <errno.h> 2#include <errno.h>
2#include <stdio.h> 3#include <stdio.h>
3#include <stdlib.h> 4#include <stdlib.h>
@@ -98,3 +99,45 @@ char *debugfs_mount(const char *mountpoint)
98out: 99out:
99 return debugfs_mountpoint; 100 return debugfs_mountpoint;
100} 101}
102
103int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename)
104{
105 char sbuf[128];
106
107 switch (err) {
108 case ENOENT:
109 if (debugfs_found) {
110 snprintf(buf, size,
111 "Error:\tFile %s/%s not found.\n"
112 "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
113 debugfs_mountpoint, filename);
114 break;
115 }
116 snprintf(buf, size, "%s",
117 "Error:\tUnable to find debugfs\n"
118 "Hint:\tWas your kernel compiled with debugfs support?\n"
119 "Hint:\tIs the debugfs filesystem mounted?\n"
120 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
121 break;
122 case EACCES:
123 snprintf(buf, size,
124 "Error:\tNo permissions to read %s/%s\n"
125 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
126 debugfs_mountpoint, filename, debugfs_mountpoint);
127 break;
128 default:
129 snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
130 break;
131 }
132
133 return 0;
134}
135
136int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
137{
138 char path[PATH_MAX];
139
140 snprintf(path, PATH_MAX, "tracing/events/%s/%s", sys, name ?: "*");
141
142 return debugfs__strerror_open(err, buf, size, path);
143}
diff --git a/tools/lib/api/fs/debugfs.h b/tools/lib/api/fs/debugfs.h
index f19d3df9609d..0739881a9897 100644
--- a/tools/lib/api/fs/debugfs.h
+++ b/tools/lib/api/fs/debugfs.h
@@ -26,4 +26,7 @@ char *debugfs_mount(const char *mountpoint);
26 26
27extern char debugfs_mountpoint[]; 27extern char debugfs_mountpoint[];
28 28
29int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename);
30int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
31
29#endif /* __API_DEBUGFS_H__ */ 32#endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index cf3a44bf1ec3..afe20ed9fac8 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -32,6 +32,7 @@
32#include <stdint.h> 32#include <stdint.h>
33#include <limits.h> 33#include <limits.h>
34 34
35#include <netinet/ip6.h>
35#include "event-parse.h" 36#include "event-parse.h"
36#include "event-utils.h" 37#include "event-utils.h"
37 38
@@ -4149,6 +4150,324 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
4149 trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 4150 trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
4150} 4151}
4151 4152
4153static void print_ip4_addr(struct trace_seq *s, char i, unsigned char *buf)
4154{
4155 const char *fmt;
4156
4157 if (i == 'i')
4158 fmt = "%03d.%03d.%03d.%03d";
4159 else
4160 fmt = "%d.%d.%d.%d";
4161
4162 trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3]);
4163}
4164
4165static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
4166{
4167 return ((unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
4168 (unsigned long)(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
4169}
4170
4171static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
4172{
4173 return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
4174}
4175
4176static void print_ip6c_addr(struct trace_seq *s, unsigned char *addr)
4177{
4178 int i, j, range;
4179 unsigned char zerolength[8];
4180 int longest = 1;
4181 int colonpos = -1;
4182 uint16_t word;
4183 uint8_t hi, lo;
4184 bool needcolon = false;
4185 bool useIPv4;
4186 struct in6_addr in6;
4187
4188 memcpy(&in6, addr, sizeof(struct in6_addr));
4189
4190 useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
4191
4192 memset(zerolength, 0, sizeof(zerolength));
4193
4194 if (useIPv4)
4195 range = 6;
4196 else
4197 range = 8;
4198
4199 /* find position of longest 0 run */
4200 for (i = 0; i < range; i++) {
4201 for (j = i; j < range; j++) {
4202 if (in6.s6_addr16[j] != 0)
4203 break;
4204 zerolength[i]++;
4205 }
4206 }
4207 for (i = 0; i < range; i++) {
4208 if (zerolength[i] > longest) {
4209 longest = zerolength[i];
4210 colonpos = i;
4211 }
4212 }
4213 if (longest == 1) /* don't compress a single 0 */
4214 colonpos = -1;
4215
4216 /* emit address */
4217 for (i = 0; i < range; i++) {
4218 if (i == colonpos) {
4219 if (needcolon || i == 0)
4220 trace_seq_printf(s, ":");
4221 trace_seq_printf(s, ":");
4222 needcolon = false;
4223 i += longest - 1;
4224 continue;
4225 }
4226 if (needcolon) {
4227 trace_seq_printf(s, ":");
4228 needcolon = false;
4229 }
4230 /* hex u16 without leading 0s */
4231 word = ntohs(in6.s6_addr16[i]);
4232 hi = word >> 8;
4233 lo = word & 0xff;
4234 if (hi)
4235 trace_seq_printf(s, "%x%02x", hi, lo);
4236 else
4237 trace_seq_printf(s, "%x", lo);
4238
4239 needcolon = true;
4240 }
4241
4242 if (useIPv4) {
4243 if (needcolon)
4244 trace_seq_printf(s, ":");
4245 print_ip4_addr(s, 'I', &in6.s6_addr[12]);
4246 }
4247
4248 return;
4249}
4250
4251static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf)
4252{
4253 int j;
4254
4255 for (j = 0; j < 16; j += 2) {
4256 trace_seq_printf(s, "%02x%02x", buf[j], buf[j+1]);
4257 if (i == 'I' && j < 14)
4258 trace_seq_printf(s, ":");
4259 }
4260}
4261
4262/*
4263 * %pi4 print an IPv4 address with leading zeros
4264 * %pI4 print an IPv4 address without leading zeros
4265 * %pi6 print an IPv6 address without colons
4266 * %pI6 print an IPv6 address with colons
4267 * %pI6c print an IPv6 address in compressed form with colons
4268 * %pISpc print an IP address based on sockaddr; p adds port.
4269 */
4270static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
4271 void *data, int size, struct event_format *event,
4272 struct print_arg *arg)
4273{
4274 unsigned char *buf;
4275
4276 if (arg->type == PRINT_FUNC) {
4277 process_defined_func(s, data, size, event, arg);
4278 return 0;
4279 }
4280
4281 if (arg->type != PRINT_FIELD) {
4282 trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
4283 return 0;
4284 }
4285
4286 if (!arg->field.field) {
4287 arg->field.field =
4288 pevent_find_any_field(event, arg->field.name);
4289 if (!arg->field.field) {
4290 do_warning("%s: field %s not found",
4291 __func__, arg->field.name);
4292 return 0;
4293 }
4294 }
4295
4296 buf = data + arg->field.field->offset;
4297
4298 if (arg->field.field->size != 4) {
4299 trace_seq_printf(s, "INVALIDIPv4");
4300 return 0;
4301 }
4302 print_ip4_addr(s, i, buf);
4303
4304 return 0;
4305}
4306
4307static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
4308 void *data, int size, struct event_format *event,
4309 struct print_arg *arg)
4310{
4311 char have_c = 0;
4312 unsigned char *buf;
4313 int rc = 0;
4314
4315 /* pI6c */
4316 if (i == 'I' && *ptr == 'c') {
4317 have_c = 1;
4318 ptr++;
4319 rc++;
4320 }
4321
4322 if (arg->type == PRINT_FUNC) {
4323 process_defined_func(s, data, size, event, arg);
4324 return rc;
4325 }
4326
4327 if (arg->type != PRINT_FIELD) {
4328 trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
4329 return rc;
4330 }
4331
4332 if (!arg->field.field) {
4333 arg->field.field =
4334 pevent_find_any_field(event, arg->field.name);
4335 if (!arg->field.field) {
4336 do_warning("%s: field %s not found",
4337 __func__, arg->field.name);
4338 return rc;
4339 }
4340 }
4341
4342 buf = data + arg->field.field->offset;
4343
4344 if (arg->field.field->size != 16) {
4345 trace_seq_printf(s, "INVALIDIPv6");
4346 return rc;
4347 }
4348
4349 if (have_c)
4350 print_ip6c_addr(s, buf);
4351 else
4352 print_ip6_addr(s, i, buf);
4353
4354 return rc;
4355}
4356
4357static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
4358 void *data, int size, struct event_format *event,
4359 struct print_arg *arg)
4360{
4361 char have_c = 0, have_p = 0;
4362 unsigned char *buf;
4363 struct sockaddr_storage *sa;
4364 int rc = 0;
4365
4366 /* pISpc */
4367 if (i == 'I') {
4368 if (*ptr == 'p') {
4369 have_p = 1;
4370 ptr++;
4371 rc++;
4372 }
4373 if (*ptr == 'c') {
4374 have_c = 1;
4375 ptr++;
4376 rc++;
4377 }
4378 }
4379
4380 if (arg->type == PRINT_FUNC) {
4381 process_defined_func(s, data, size, event, arg);
4382 return rc;
4383 }
4384
4385 if (arg->type != PRINT_FIELD) {
4386 trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
4387 return rc;
4388 }
4389
4390 if (!arg->field.field) {
4391 arg->field.field =
4392 pevent_find_any_field(event, arg->field.name);
4393 if (!arg->field.field) {
4394 do_warning("%s: field %s not found",
4395 __func__, arg->field.name);
4396 return rc;
4397 }
4398 }
4399
4400 sa = (struct sockaddr_storage *) (data + arg->field.field->offset);
4401
4402 if (sa->ss_family == AF_INET) {
4403 struct sockaddr_in *sa4 = (struct sockaddr_in *) sa;
4404
4405 if (arg->field.field->size < sizeof(struct sockaddr_in)) {
4406 trace_seq_printf(s, "INVALIDIPv4");
4407 return rc;
4408 }
4409
4410 print_ip4_addr(s, i, (unsigned char *) &sa4->sin_addr);
4411 if (have_p)
4412 trace_seq_printf(s, ":%d", ntohs(sa4->sin_port));
4413
4414
4415 } else if (sa->ss_family == AF_INET6) {
4416 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) sa;
4417
4418 if (arg->field.field->size < sizeof(struct sockaddr_in6)) {
4419 trace_seq_printf(s, "INVALIDIPv6");
4420 return rc;
4421 }
4422
4423 if (have_p)
4424 trace_seq_printf(s, "[");
4425
4426 buf = (unsigned char *) &sa6->sin6_addr;
4427 if (have_c)
4428 print_ip6c_addr(s, buf);
4429 else
4430 print_ip6_addr(s, i, buf);
4431
4432 if (have_p)
4433 trace_seq_printf(s, "]:%d", ntohs(sa6->sin6_port));
4434 }
4435
4436 return rc;
4437}
4438
4439static int print_ip_arg(struct trace_seq *s, const char *ptr,
4440 void *data, int size, struct event_format *event,
4441 struct print_arg *arg)
4442{
4443 char i = *ptr; /* 'i' or 'I' */
4444 char ver;
4445 int rc = 0;
4446
4447 ptr++;
4448 rc++;
4449
4450 ver = *ptr;
4451 ptr++;
4452 rc++;
4453
4454 switch (ver) {
4455 case '4':
4456 rc += print_ipv4_arg(s, ptr, i, data, size, event, arg);
4457 break;
4458 case '6':
4459 rc += print_ipv6_arg(s, ptr, i, data, size, event, arg);
4460 break;
4461 case 'S':
4462 rc += print_ipsa_arg(s, ptr, i, data, size, event, arg);
4463 break;
4464 default:
4465 return 0;
4466 }
4467
4468 return rc;
4469}
4470
4152static int is_printable_array(char *p, unsigned int len) 4471static int is_printable_array(char *p, unsigned int len)
4153{ 4472{
4154 unsigned int i; 4473 unsigned int i;
@@ -4337,6 +4656,15 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
4337 ptr++; 4656 ptr++;
4338 arg = arg->next; 4657 arg = arg->next;
4339 break; 4658 break;
4659 } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
4660 int n;
4661
4662 n = print_ip_arg(s, ptr+1, data, size, event, arg);
4663 if (n > 0) {
4664 ptr += n;
4665 arg = arg->next;
4666 break;
4667 }
4340 } 4668 }
4341 4669
4342 /* fall through */ 4670 /* fall through */
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index fd77d81ea748..0294c57b1f5e 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -38,7 +38,7 @@ OPTIONS
38--remove=:: 38--remove=::
39 Remove specified file from the cache. 39 Remove specified file from the cache.
40-M:: 40-M::
41--missing=:: 41--missing=::
42 List missing build ids in the cache for the specified file. 42 List missing build ids in the cache for the specified file.
43-u:: 43-u::
44--update:: 44--update::
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index cbb4f743d921..3e2aec94f806 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -89,6 +89,19 @@ raw encoding of 0x1A8 can be used:
89You should refer to the processor specific documentation for getting these 89You should refer to the processor specific documentation for getting these
90details. Some of them are referenced in the SEE ALSO section below. 90details. Some of them are referenced in the SEE ALSO section below.
91 91
92PARAMETERIZED EVENTS
93--------------------
94
95Some pmu events listed by 'perf-list' will be displayed with '?' in them. For
96example:
97
98 hv_gpci/dtbp_ptitc,phys_processor_idx=?/
99
100This means that when provided as an event, a value for '?' must
101also be supplied. For example:
102
103 perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
104
92OPTIONS 105OPTIONS
93------- 106-------
94 107
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 1d78a4064da4..43310d8661fe 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -12,11 +12,12 @@ SYNOPSIS
12 12
13DESCRIPTION 13DESCRIPTION
14----------- 14-----------
15"perf mem -t <TYPE> record" runs a command and gathers memory operation data 15"perf mem record" runs a command and gathers memory operation data
16from it, into perf.data. Perf record options are accepted and are passed through. 16from it, into perf.data. Perf record options are accepted and are passed through.
17 17
18"perf mem -t <TYPE> report" displays the result. It invokes perf report with the 18"perf mem report" displays the result. It invokes perf report with the
19right set of options to display a memory access profile. 19right set of options to display a memory access profile. By default, loads
20and stores are sampled. Use the -t option to limit to loads or stores.
20 21
21Note that on Intel systems the memory latency reported is the use-latency, 22Note that on Intel systems the memory latency reported is the use-latency,
22not the pure load (or store latency). Use latency includes any pipeline 23not the pure load (or store latency). Use latency includes any pipeline
@@ -29,7 +30,7 @@ OPTIONS
29 30
30-t:: 31-t::
31--type=:: 32--type=::
32 Select the memory operation type: load or store (default: load) 33 Select the memory operation type: load or store (default: load,store)
33 34
34-D:: 35-D::
35--dump-raw-samples=:: 36--dump-raw-samples=::
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index af9a54ece024..31e977459c51 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -33,12 +33,27 @@ OPTIONS
33 - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a 33 - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
34 hexadecimal event descriptor. 34 hexadecimal event descriptor.
35 35
36 - a hardware breakpoint event in the form of '\mem:addr[:access]' 36 - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
37 'param1', 'param2', etc are defined as formats for the PMU in
38 /sys/bus/event_sources/devices/<pmu>/format/*.
39
40 - a symbolically formed event like 'pmu/config=M,config1=N,config3=K/'
41
42 where M, N, K are numbers (in decimal, hex, octal format). Acceptable
43 values for each of 'config', 'config1' and 'config2' are defined by
44 corresponding entries in /sys/bus/event_sources/devices/<pmu>/format/*
45 param1 and param2 are defined as formats for the PMU in:
46 /sys/bus/event_sources/devices/<pmu>/format/*
47
48 - a hardware breakpoint event in the form of '\mem:addr[/len][:access]'
37 where addr is the address in memory you want to break in. 49 where addr is the address in memory you want to break in.
38 Access is the memory access type (read, write, execute) it can 50 Access is the memory access type (read, write, execute) it can
39 be passed as follows: '\mem:addr[:[r][w][x]]'. 51 be passed as follows: '\mem:addr[:[r][w][x]]'. len is the range,
52 number of bytes from specified addr, which the breakpoint will cover.
40 If you want to profile read-write accesses in 0x1000, just set 53 If you want to profile read-write accesses in 0x1000, just set
41 'mem:0x1000:rw'. 54 'mem:0x1000:rw'.
55 If you want to profile write accesses in [0x1000~1008), just set
56 'mem:0x1000/8:w'.
42 57
43--filter=<filter>:: 58--filter=<filter>::
44 Event filter. 59 Event filter.
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 21494806c0ab..a21eec05bc42 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -125,46 +125,46 @@ OPTIONS
125 is equivalent to: 125 is equivalent to:
126 126
127 perf script -f trace:<fields> -f sw:<fields> -f hw:<fields> 127 perf script -f trace:<fields> -f sw:<fields> -f hw:<fields>
128 128
129 i.e., the specified fields apply to all event types if the type string 129 i.e., the specified fields apply to all event types if the type string
130 is not given. 130 is not given.
131 131
132 The arguments are processed in the order received. A later usage can 132 The arguments are processed in the order received. A later usage can
133 reset a prior request. e.g.: 133 reset a prior request. e.g.:
134 134
135 -f trace: -f comm,tid,time,ip,sym 135 -f trace: -f comm,tid,time,ip,sym
136 136
137 The first -f suppresses trace events (field list is ""), but then the 137 The first -f suppresses trace events (field list is ""), but then the
138 second invocation sets the fields to comm,tid,time,ip,sym. In this case a 138 second invocation sets the fields to comm,tid,time,ip,sym. In this case a
139 warning is given to the user: 139 warning is given to the user:
140 140
141 "Overriding previous field request for all events." 141 "Overriding previous field request for all events."
142 142
143 Alternatively, consider the order: 143 Alternatively, consider the order:
144 144
145 -f comm,tid,time,ip,sym -f trace: 145 -f comm,tid,time,ip,sym -f trace:
146 146
147 The first -f sets the fields for all events and the second -f 147 The first -f sets the fields for all events and the second -f
148 suppresses trace events. The user is given a warning message about 148 suppresses trace events. The user is given a warning message about
149 the override, and the result of the above is that only S/W and H/W 149 the override, and the result of the above is that only S/W and H/W
150 events are displayed with the given fields. 150 events are displayed with the given fields.
151 151
152 For the 'wildcard' option if a user selected field is invalid for an 152 For the 'wildcard' option if a user selected field is invalid for an
153 event type, a message is displayed to the user that the option is 153 event type, a message is displayed to the user that the option is
154 ignored for that type. For example: 154 ignored for that type. For example:
155 155
156 $ perf script -f comm,tid,trace 156 $ perf script -f comm,tid,trace
157 'trace' not valid for hardware events. Ignoring. 157 'trace' not valid for hardware events. Ignoring.
158 'trace' not valid for software events. Ignoring. 158 'trace' not valid for software events. Ignoring.
159 159
160 Alternatively, if the type is given an invalid field is specified it 160 Alternatively, if the type is given an invalid field is specified it
161 is an error. For example: 161 is an error. For example:
162 162
163 perf script -v -f sw:comm,tid,trace 163 perf script -v -f sw:comm,tid,trace
164 'trace' not valid for software events. 164 'trace' not valid for software events.
165 165
166 At this point usage is displayed, and perf-script exits. 166 At this point usage is displayed, and perf-script exits.
167 167
168 Finally, a user may not set fields to none for all event types. 168 Finally, a user may not set fields to none for all event types.
169 i.e., -f "" is not allowed. 169 i.e., -f "" is not allowed.
170 170
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 29ee857c09c6..04e150d83e7d 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -25,10 +25,22 @@ OPTIONS
25 25
26-e:: 26-e::
27--event=:: 27--event=::
28 Select the PMU event. Selection can be a symbolic event name 28 Select the PMU event. Selection can be:
29 (use 'perf list' to list all events) or a raw PMU 29
30 event (eventsel+umask) in the form of rNNN where NNN is a 30 - a symbolic event name (use 'perf list' to list all events)
31 hexadecimal event descriptor. 31
32 - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
33 hexadecimal event descriptor.
34
35 - a symbolically formed event like 'pmu/param1=0x3,param2/' where
36 param1 and param2 are defined as formats for the PMU in
37 /sys/bus/event_sources/devices/<pmu>/format/*
38
39 - a symbolically formed event like 'pmu/config=M,config1=N,config2=K/'
40 where M, N, K are numbers (in decimal, hex, octal format).
41 Acceptable values for each of 'config', 'config1' and 'config2'
42 parameters are defined by corresponding entries in
43 /sys/bus/event_sources/devices/<pmu>/format/*
32 44
33-i:: 45-i::
34--no-inherit:: 46--no-inherit::
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index 71f2844cf97f..7ed22ff1e1ac 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -68,4 +68,17 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
68 val, opflags); 68 val, opflags);
69} 69}
70 70
71#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
72#include <pthread.h>
73static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr,
74 size_t cpusetsize,
75 cpu_set_t *cpuset)
76{
77 attr = attr;
78 cpusetsize = cpusetsize;
79 cpuset = cpuset;
80 return 0;
81}
82#endif
83
71#endif /* _FUTEX_H */ 84#endif /* _FUTEX_H */
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 77d5cae54c6a..50e6b66aea1f 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -236,10 +236,10 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
236 if (errno == ENOENT) 236 if (errno == ENOENT)
237 return false; 237 return false;
238 238
239 pr_warning("Problems with %s file, consider removing it from the cache\n", 239 pr_warning("Problems with %s file, consider removing it from the cache\n",
240 filename); 240 filename);
241 } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) { 241 } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) {
242 pr_warning("Problems with %s file, consider removing it from the cache\n", 242 pr_warning("Problems with %s file, consider removing it from the cache\n",
243 filename); 243 filename);
244 } 244 }
245 245
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 1fd96c13f199..74aada554b12 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -390,6 +390,15 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
390 } 390 }
391} 391}
392 392
393static struct data__file *fmt_to_data_file(struct perf_hpp_fmt *fmt)
394{
395 struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt);
396 void *ptr = dfmt - dfmt->idx;
397 struct data__file *d = container_of(ptr, struct data__file, fmt);
398
399 return d;
400}
401
393static struct hist_entry* 402static struct hist_entry*
394get_pair_data(struct hist_entry *he, struct data__file *d) 403get_pair_data(struct hist_entry *he, struct data__file *d)
395{ 404{
@@ -407,8 +416,7 @@ get_pair_data(struct hist_entry *he, struct data__file *d)
407static struct hist_entry* 416static struct hist_entry*
408get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt) 417get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
409{ 418{
410 void *ptr = dfmt - dfmt->idx; 419 struct data__file *d = fmt_to_data_file(&dfmt->fmt);
411 struct data__file *d = container_of(ptr, struct data__file, fmt);
412 420
413 return get_pair_data(he, d); 421 return get_pair_data(he, d);
414} 422}
@@ -430,7 +438,7 @@ static void hists__baseline_only(struct hists *hists)
430 next = rb_next(&he->rb_node_in); 438 next = rb_next(&he->rb_node_in);
431 if (!hist_entry__next_pair(he)) { 439 if (!hist_entry__next_pair(he)) {
432 rb_erase(&he->rb_node_in, root); 440 rb_erase(&he->rb_node_in, root);
433 hist_entry__free(he); 441 hist_entry__delete(he);
434 } 442 }
435 } 443 }
436} 444}
@@ -448,26 +456,30 @@ static void hists__precompute(struct hists *hists)
448 next = rb_first(root); 456 next = rb_first(root);
449 while (next != NULL) { 457 while (next != NULL) {
450 struct hist_entry *he, *pair; 458 struct hist_entry *he, *pair;
459 struct data__file *d;
460 int i;
451 461
452 he = rb_entry(next, struct hist_entry, rb_node_in); 462 he = rb_entry(next, struct hist_entry, rb_node_in);
453 next = rb_next(&he->rb_node_in); 463 next = rb_next(&he->rb_node_in);
454 464
455 pair = get_pair_data(he, &data__files[sort_compute]); 465 data__for_each_file_new(i, d) {
456 if (!pair) 466 pair = get_pair_data(he, d);
457 continue; 467 if (!pair)
468 continue;
458 469
459 switch (compute) { 470 switch (compute) {
460 case COMPUTE_DELTA: 471 case COMPUTE_DELTA:
461 compute_delta(he, pair); 472 compute_delta(he, pair);
462 break; 473 break;
463 case COMPUTE_RATIO: 474 case COMPUTE_RATIO:
464 compute_ratio(he, pair); 475 compute_ratio(he, pair);
465 break; 476 break;
466 case COMPUTE_WEIGHTED_DIFF: 477 case COMPUTE_WEIGHTED_DIFF:
467 compute_wdiff(he, pair); 478 compute_wdiff(he, pair);
468 break; 479 break;
469 default: 480 default:
470 BUG_ON(1); 481 BUG_ON(1);
482 }
471 } 483 }
472 } 484 }
473} 485}
@@ -517,7 +529,7 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
517 529
518static int64_t 530static int64_t
519hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, 531hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
520 int c) 532 int c, int sort_idx)
521{ 533{
522 bool pairs_left = hist_entry__has_pairs(left); 534 bool pairs_left = hist_entry__has_pairs(left);
523 bool pairs_right = hist_entry__has_pairs(right); 535 bool pairs_right = hist_entry__has_pairs(right);
@@ -529,8 +541,8 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
529 if (!pairs_left || !pairs_right) 541 if (!pairs_left || !pairs_right)
530 return pairs_left ? -1 : 1; 542 return pairs_left ? -1 : 1;
531 543
532 p_left = get_pair_data(left, &data__files[sort_compute]); 544 p_left = get_pair_data(left, &data__files[sort_idx]);
533 p_right = get_pair_data(right, &data__files[sort_compute]); 545 p_right = get_pair_data(right, &data__files[sort_idx]);
534 546
535 if (!p_left && !p_right) 547 if (!p_left && !p_right)
536 return 0; 548 return 0;
@@ -546,90 +558,102 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
546} 558}
547 559
548static int64_t 560static int64_t
549hist_entry__cmp_nop(struct hist_entry *left __maybe_unused, 561hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right,
562 int c, int sort_idx)
563{
564 struct hist_entry *p_right, *p_left;
565
566 p_left = get_pair_data(left, &data__files[sort_idx]);
567 p_right = get_pair_data(right, &data__files[sort_idx]);
568
569 if (!p_left && !p_right)
570 return 0;
571
572 if (!p_left || !p_right)
573 return p_left ? -1 : 1;
574
575 if (c != COMPUTE_DELTA) {
576 /*
577 * The delta can be computed without the baseline, but
578 * others are not. Put those entries which have no
579 * values below.
580 */
581 if (left->dummy && right->dummy)
582 return 0;
583
584 if (left->dummy || right->dummy)
585 return left->dummy ? 1 : -1;
586 }
587
588 return __hist_entry__cmp_compute(p_left, p_right, c);
589}
590
591static int64_t
592hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused,
593 struct hist_entry *left __maybe_unused,
550 struct hist_entry *right __maybe_unused) 594 struct hist_entry *right __maybe_unused)
551{ 595{
552 return 0; 596 return 0;
553} 597}
554 598
555static int64_t 599static int64_t
556hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right) 600hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused,
601 struct hist_entry *left, struct hist_entry *right)
557{ 602{
558 if (sort_compute)
559 return 0;
560
561 if (left->stat.period == right->stat.period) 603 if (left->stat.period == right->stat.period)
562 return 0; 604 return 0;
563 return left->stat.period > right->stat.period ? 1 : -1; 605 return left->stat.period > right->stat.period ? 1 : -1;
564} 606}
565 607
566static int64_t 608static int64_t
567hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right) 609hist_entry__cmp_delta(struct perf_hpp_fmt *fmt,
610 struct hist_entry *left, struct hist_entry *right)
568{ 611{
569 return hist_entry__cmp_compute(right, left, COMPUTE_DELTA); 612 struct data__file *d = fmt_to_data_file(fmt);
613
614 return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, d->idx);
570} 615}
571 616
572static int64_t 617static int64_t
573hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right) 618hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt,
619 struct hist_entry *left, struct hist_entry *right)
574{ 620{
575 return hist_entry__cmp_compute(right, left, COMPUTE_RATIO); 621 struct data__file *d = fmt_to_data_file(fmt);
622
623 return hist_entry__cmp_compute(right, left, COMPUTE_RATIO, d->idx);
576} 624}
577 625
578static int64_t 626static int64_t
579hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right) 627hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt,
628 struct hist_entry *left, struct hist_entry *right)
580{ 629{
581 return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF); 630 struct data__file *d = fmt_to_data_file(fmt);
631
632 return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx);
582} 633}
583 634
584static void insert_hist_entry_by_compute(struct rb_root *root, 635static int64_t
585 struct hist_entry *he, 636hist_entry__cmp_delta_idx(struct perf_hpp_fmt *fmt __maybe_unused,
586 int c) 637 struct hist_entry *left, struct hist_entry *right)
587{ 638{
588 struct rb_node **p = &root->rb_node; 639 return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA,
589 struct rb_node *parent = NULL; 640 sort_compute);
590 struct hist_entry *iter;
591
592 while (*p != NULL) {
593 parent = *p;
594 iter = rb_entry(parent, struct hist_entry, rb_node);
595 if (hist_entry__cmp_compute(he, iter, c) < 0)
596 p = &(*p)->rb_left;
597 else
598 p = &(*p)->rb_right;
599 }
600
601 rb_link_node(&he->rb_node, parent, p);
602 rb_insert_color(&he->rb_node, root);
603} 641}
604 642
605static void hists__compute_resort(struct hists *hists) 643static int64_t
644hist_entry__cmp_ratio_idx(struct perf_hpp_fmt *fmt __maybe_unused,
645 struct hist_entry *left, struct hist_entry *right)
606{ 646{
607 struct rb_root *root; 647 return hist_entry__cmp_compute_idx(right, left, COMPUTE_RATIO,
608 struct rb_node *next; 648 sort_compute);
609 649}
610 if (sort__need_collapse)
611 root = &hists->entries_collapsed;
612 else
613 root = hists->entries_in;
614
615 hists->entries = RB_ROOT;
616 next = rb_first(root);
617
618 hists__reset_stats(hists);
619 hists__reset_col_len(hists);
620
621 while (next != NULL) {
622 struct hist_entry *he;
623
624 he = rb_entry(next, struct hist_entry, rb_node_in);
625 next = rb_next(&he->rb_node_in);
626
627 insert_hist_entry_by_compute(&hists->entries, he, compute);
628 hists__inc_stats(hists, he);
629 650
630 if (!he->filtered) 651static int64_t
631 hists__calc_col_len(hists, he); 652hist_entry__cmp_wdiff_idx(struct perf_hpp_fmt *fmt __maybe_unused,
632 } 653 struct hist_entry *left, struct hist_entry *right)
654{
655 return hist_entry__cmp_compute_idx(right, left, COMPUTE_WEIGHTED_DIFF,
656 sort_compute);
633} 657}
634 658
635static void hists__process(struct hists *hists) 659static void hists__process(struct hists *hists)
@@ -637,12 +661,8 @@ static void hists__process(struct hists *hists)
637 if (show_baseline_only) 661 if (show_baseline_only)
638 hists__baseline_only(hists); 662 hists__baseline_only(hists);
639 663
640 if (sort_compute) { 664 hists__precompute(hists);
641 hists__precompute(hists); 665 hists__output_resort(hists, NULL);
642 hists__compute_resort(hists);
643 } else {
644 hists__output_resort(hists, NULL);
645 }
646 666
647 hists__fprintf(hists, true, 0, 0, 0, stdout); 667 hists__fprintf(hists, true, 0, 0, 0, stdout);
648} 668}
@@ -841,7 +861,7 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
841 char pfmt[20] = " "; 861 char pfmt[20] = " ";
842 862
843 if (!pair) 863 if (!pair)
844 goto dummy_print; 864 goto no_print;
845 865
846 switch (comparison_method) { 866 switch (comparison_method) {
847 case COMPUTE_DELTA: 867 case COMPUTE_DELTA:
@@ -850,8 +870,6 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
850 else 870 else
851 diff = compute_delta(he, pair); 871 diff = compute_delta(he, pair);
852 872
853 if (fabs(diff) < 0.01)
854 goto dummy_print;
855 scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1); 873 scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1);
856 return percent_color_snprintf(hpp->buf, hpp->size, 874 return percent_color_snprintf(hpp->buf, hpp->size,
857 pfmt, diff); 875 pfmt, diff);
@@ -883,6 +901,9 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
883 } 901 }
884dummy_print: 902dummy_print:
885 return scnprintf(hpp->buf, hpp->size, "%*s", 903 return scnprintf(hpp->buf, hpp->size, "%*s",
904 dfmt->header_width, "N/A");
905no_print:
906 return scnprintf(hpp->buf, hpp->size, "%*s",
886 dfmt->header_width, pfmt); 907 dfmt->header_width, pfmt);
887} 908}
888 909
@@ -932,14 +953,15 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
932 else 953 else
933 diff = compute_delta(he, pair); 954 diff = compute_delta(he, pair);
934 955
935 if (fabs(diff) >= 0.01) 956 scnprintf(buf, size, "%+4.2F%%", diff);
936 scnprintf(buf, size, "%+4.2F%%", diff);
937 break; 957 break;
938 958
939 case PERF_HPP_DIFF__RATIO: 959 case PERF_HPP_DIFF__RATIO:
940 /* No point for ratio number if we are dummy.. */ 960 /* No point for ratio number if we are dummy.. */
941 if (he->dummy) 961 if (he->dummy) {
962 scnprintf(buf, size, "N/A");
942 break; 963 break;
964 }
943 965
944 if (pair->diff.computed) 966 if (pair->diff.computed)
945 ratio = pair->diff.period_ratio; 967 ratio = pair->diff.period_ratio;
@@ -952,8 +974,10 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
952 974
953 case PERF_HPP_DIFF__WEIGHTED_DIFF: 975 case PERF_HPP_DIFF__WEIGHTED_DIFF:
954 /* No point for wdiff number if we are dummy.. */ 976 /* No point for wdiff number if we are dummy.. */
955 if (he->dummy) 977 if (he->dummy) {
978 scnprintf(buf, size, "N/A");
956 break; 979 break;
980 }
957 981
958 if (pair->diff.computed) 982 if (pair->diff.computed)
959 wdiff = pair->diff.wdiff; 983 wdiff = pair->diff.wdiff;
@@ -1105,9 +1129,10 @@ static void data__hpp_register(struct data__file *d, int idx)
1105 perf_hpp__register_sort_field(fmt); 1129 perf_hpp__register_sort_field(fmt);
1106} 1130}
1107 1131
1108static void ui_init(void) 1132static int ui_init(void)
1109{ 1133{
1110 struct data__file *d; 1134 struct data__file *d;
1135 struct perf_hpp_fmt *fmt;
1111 int i; 1136 int i;
1112 1137
1113 data__for_each_file(i, d) { 1138 data__for_each_file(i, d) {
@@ -1137,6 +1162,46 @@ static void ui_init(void)
1137 data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD : 1162 data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD :
1138 PERF_HPP_DIFF__PERIOD_BASELINE); 1163 PERF_HPP_DIFF__PERIOD_BASELINE);
1139 } 1164 }
1165
1166 if (!sort_compute)
1167 return 0;
1168
1169 /*
1170 * Prepend an fmt to sort on columns at 'sort_compute' first.
1171 * This fmt is added only to the sort list but not to the
1172 * output fields list.
1173 *
1174 * Note that this column (data) can be compared twice - one
1175 * for this 'sort_compute' fmt and another for the normal
1176 * diff_hpp_fmt. But it shouldn't a problem as most entries
1177 * will be sorted out by first try or baseline and comparing
1178 * is not a costly operation.
1179 */
1180 fmt = zalloc(sizeof(*fmt));
1181 if (fmt == NULL) {
1182 pr_err("Memory allocation failed\n");
1183 return -1;
1184 }
1185
1186 fmt->cmp = hist_entry__cmp_nop;
1187 fmt->collapse = hist_entry__cmp_nop;
1188
1189 switch (compute) {
1190 case COMPUTE_DELTA:
1191 fmt->sort = hist_entry__cmp_delta_idx;
1192 break;
1193 case COMPUTE_RATIO:
1194 fmt->sort = hist_entry__cmp_ratio_idx;
1195 break;
1196 case COMPUTE_WEIGHTED_DIFF:
1197 fmt->sort = hist_entry__cmp_wdiff_idx;
1198 break;
1199 default:
1200 BUG_ON(1);
1201 }
1202
1203 list_add(&fmt->sort_list, &perf_hpp__sort_list);
1204 return 0;
1140} 1205}
1141 1206
1142static int data_init(int argc, const char **argv) 1207static int data_init(int argc, const char **argv)
@@ -1202,7 +1267,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
1202 if (data_init(argc, argv) < 0) 1267 if (data_init(argc, argv) < 0)
1203 return -1; 1268 return -1;
1204 1269
1205 ui_init(); 1270 if (ui_init() < 0)
1271 return -1;
1206 1272
1207 sort__mode = SORT_MODE__DIFF; 1273 sort__mode = SORT_MODE__DIFF;
1208 1274
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 84df2deed988..a13641e066f5 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -343,6 +343,7 @@ static int __cmd_inject(struct perf_inject *inject)
343 int ret = -EINVAL; 343 int ret = -EINVAL;
344 struct perf_session *session = inject->session; 344 struct perf_session *session = inject->session;
345 struct perf_data_file *file_out = &inject->output; 345 struct perf_data_file *file_out = &inject->output;
346 int fd = perf_data_file__fd(file_out);
346 347
347 signal(SIGINT, sig_handler); 348 signal(SIGINT, sig_handler);
348 349
@@ -376,7 +377,7 @@ static int __cmd_inject(struct perf_inject *inject)
376 } 377 }
377 378
378 if (!file_out->is_pipe) 379 if (!file_out->is_pipe)
379 lseek(file_out->fd, session->header.data_offset, SEEK_SET); 380 lseek(fd, session->header.data_offset, SEEK_SET);
380 381
381 ret = perf_session__process_events(session, &inject->tool); 382 ret = perf_session__process_events(session, &inject->tool);
382 383
@@ -385,7 +386,7 @@ static int __cmd_inject(struct perf_inject *inject)
385 perf_header__set_feat(&session->header, 386 perf_header__set_feat(&session->header,
386 HEADER_BUILD_ID); 387 HEADER_BUILD_ID);
387 session->header.data_size = inject->bytes_written; 388 session->header.data_size = inject->bytes_written;
388 perf_session__write_header(session, session->evlist, file_out->fd, true); 389 perf_session__write_header(session, session->evlist, fd, true);
389 } 390 }
390 391
391 return ret; 392 return ret;
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 24db6ffe2957..9b5663950a4d 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -7,44 +7,47 @@
7#include "util/session.h" 7#include "util/session.h"
8#include "util/data.h" 8#include "util/data.h"
9 9
10#define MEM_OPERATION_LOAD "load" 10#define MEM_OPERATION_LOAD 0x1
11#define MEM_OPERATION_STORE "store" 11#define MEM_OPERATION_STORE 0x2
12
13static const char *mem_operation = MEM_OPERATION_LOAD;
14 12
15struct perf_mem { 13struct perf_mem {
16 struct perf_tool tool; 14 struct perf_tool tool;
17 char const *input_name; 15 char const *input_name;
18 bool hide_unresolved; 16 bool hide_unresolved;
19 bool dump_raw; 17 bool dump_raw;
18 int operation;
20 const char *cpu_list; 19 const char *cpu_list;
21 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 20 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
22}; 21};
23 22
24static int __cmd_record(int argc, const char **argv) 23static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
25{ 24{
26 int rec_argc, i = 0, j; 25 int rec_argc, i = 0, j;
27 const char **rec_argv; 26 const char **rec_argv;
28 char event[64];
29 int ret; 27 int ret;
30 28
31 rec_argc = argc + 4; 29 rec_argc = argc + 7; /* max number of arguments */
32 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 30 rec_argv = calloc(rec_argc + 1, sizeof(char *));
33 if (!rec_argv) 31 if (!rec_argv)
34 return -1; 32 return -1;
35 33
36 rec_argv[i++] = strdup("record"); 34 rec_argv[i++] = "record";
37 if (!strcmp(mem_operation, MEM_OPERATION_LOAD))
38 rec_argv[i++] = strdup("-W");
39 rec_argv[i++] = strdup("-d");
40 rec_argv[i++] = strdup("-e");
41 35
42 if (strcmp(mem_operation, MEM_OPERATION_LOAD)) 36 if (mem->operation & MEM_OPERATION_LOAD)
43 sprintf(event, "cpu/mem-stores/pp"); 37 rec_argv[i++] = "-W";
44 else 38
45 sprintf(event, "cpu/mem-loads/pp"); 39 rec_argv[i++] = "-d";
40
41 if (mem->operation & MEM_OPERATION_LOAD) {
42 rec_argv[i++] = "-e";
43 rec_argv[i++] = "cpu/mem-loads/pp";
44 }
45
46 if (mem->operation & MEM_OPERATION_STORE) {
47 rec_argv[i++] = "-e";
48 rec_argv[i++] = "cpu/mem-stores/pp";
49 }
46 50
47 rec_argv[i++] = strdup(event);
48 for (j = 1; j < argc; j++, i++) 51 for (j = 1; j < argc; j++, i++)
49 rec_argv[i] = argv[j]; 52 rec_argv[i] = argv[j];
50 53
@@ -162,17 +165,17 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
162 if (!rep_argv) 165 if (!rep_argv)
163 return -1; 166 return -1;
164 167
165 rep_argv[i++] = strdup("report"); 168 rep_argv[i++] = "report";
166 rep_argv[i++] = strdup("--mem-mode"); 169 rep_argv[i++] = "--mem-mode";
167 rep_argv[i++] = strdup("-n"); /* display number of samples */ 170 rep_argv[i++] = "-n"; /* display number of samples */
168 171
169 /* 172 /*
170 * there is no weight (cost) associated with stores, so don't print 173 * there is no weight (cost) associated with stores, so don't print
171 * the column 174 * the column
172 */ 175 */
173 if (strcmp(mem_operation, MEM_OPERATION_LOAD)) 176 if (!(mem->operation & MEM_OPERATION_LOAD))
174 rep_argv[i++] = strdup("--sort=mem,sym,dso,symbol_daddr," 177 rep_argv[i++] = "--sort=mem,sym,dso,symbol_daddr,"
175 "dso_daddr,tlb,locked"); 178 "dso_daddr,tlb,locked";
176 179
177 for (j = 1; j < argc; j++, i++) 180 for (j = 1; j < argc; j++, i++)
178 rep_argv[i] = argv[j]; 181 rep_argv[i] = argv[j];
@@ -182,6 +185,75 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
182 return ret; 185 return ret;
183} 186}
184 187
188struct mem_mode {
189 const char *name;
190 int mode;
191};
192
193#define MEM_OPT(n, m) \
194 { .name = n, .mode = (m) }
195
196#define MEM_END { .name = NULL }
197
198static const struct mem_mode mem_modes[]={
199 MEM_OPT("load", MEM_OPERATION_LOAD),
200 MEM_OPT("store", MEM_OPERATION_STORE),
201 MEM_END
202};
203
204static int
205parse_mem_ops(const struct option *opt, const char *str, int unset)
206{
207 int *mode = (int *)opt->value;
208 const struct mem_mode *m;
209 char *s, *os = NULL, *p;
210 int ret = -1;
211
212 if (unset)
213 return 0;
214
215 /* str may be NULL in case no arg is passed to -t */
216 if (str) {
217 /* because str is read-only */
218 s = os = strdup(str);
219 if (!s)
220 return -1;
221
222 /* reset mode */
223 *mode = 0;
224
225 for (;;) {
226 p = strchr(s, ',');
227 if (p)
228 *p = '\0';
229
230 for (m = mem_modes; m->name; m++) {
231 if (!strcasecmp(s, m->name))
232 break;
233 }
234 if (!m->name) {
235 fprintf(stderr, "unknown sampling op %s,"
236 " check man page\n", s);
237 goto error;
238 }
239
240 *mode |= m->mode;
241
242 if (!p)
243 break;
244
245 s = p + 1;
246 }
247 }
248 ret = 0;
249
250 if (*mode == 0)
251 *mode = MEM_OPERATION_LOAD;
252error:
253 free(os);
254 return ret;
255}
256
185int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused) 257int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
186{ 258{
187 struct stat st; 259 struct stat st;
@@ -197,10 +269,15 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
197 .ordered_events = true, 269 .ordered_events = true,
198 }, 270 },
199 .input_name = "perf.data", 271 .input_name = "perf.data",
272 /*
273 * default to both load an store sampling
274 */
275 .operation = MEM_OPERATION_LOAD | MEM_OPERATION_STORE,
200 }; 276 };
201 const struct option mem_options[] = { 277 const struct option mem_options[] = {
202 OPT_STRING('t', "type", &mem_operation, 278 OPT_CALLBACK('t', "type", &mem.operation,
203 "type", "memory operations(load/store)"), 279 "type", "memory operations(load,store) Default load,store",
280 parse_mem_ops),
204 OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw, 281 OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw,
205 "dump raw samples in ASCII"), 282 "dump raw samples in ASCII"),
206 OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved, 283 OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved,
@@ -225,7 +302,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
225 argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, 302 argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
226 mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); 303 mem_usage, PARSE_OPT_STOP_AT_NON_OPTION);
227 304
228 if (!argc || !(strncmp(argv[0], "rec", 3) || mem_operation)) 305 if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
229 usage_with_options(mem_usage, mem_options); 306 usage_with_options(mem_usage, mem_options);
230 307
231 if (!mem.input_name || !strlen(mem.input_name)) { 308 if (!mem.input_name || !strlen(mem.input_name)) {
@@ -236,7 +313,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
236 } 313 }
237 314
238 if (!strncmp(argv[0], "rec", 3)) 315 if (!strncmp(argv[0], "rec", 3))
239 return __cmd_record(argc, argv); 316 return __cmd_record(argc, argv, &mem);
240 else if (!strncmp(argv[0], "rep", 3)) 317 else if (!strncmp(argv[0], "rep", 3))
241 return report_events(argc, argv, &mem); 318 return report_events(argc, argv, &mem);
242 else 319 else
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 8648c6d3003d..404ab3434052 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -190,16 +190,30 @@ out:
190 return rc; 190 return rc;
191} 191}
192 192
193static int process_sample_event(struct perf_tool *tool,
194 union perf_event *event,
195 struct perf_sample *sample,
196 struct perf_evsel *evsel,
197 struct machine *machine)
198{
199 struct record *rec = container_of(tool, struct record, tool);
200
201 rec->samples++;
202
203 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
204}
205
193static int process_buildids(struct record *rec) 206static int process_buildids(struct record *rec)
194{ 207{
195 struct perf_data_file *file = &rec->file; 208 struct perf_data_file *file = &rec->file;
196 struct perf_session *session = rec->session; 209 struct perf_session *session = rec->session;
197 u64 start = session->header.data_offset;
198 210
199 u64 size = lseek(file->fd, 0, SEEK_CUR); 211 u64 size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
200 if (size == 0) 212 if (size == 0)
201 return 0; 213 return 0;
202 214
215 file->size = size;
216
203 /* 217 /*
204 * During this process, it'll load kernel map and replace the 218 * During this process, it'll load kernel map and replace the
205 * dso->long_name to a real pathname it found. In this case 219 * dso->long_name to a real pathname it found. In this case
@@ -211,9 +225,7 @@ static int process_buildids(struct record *rec)
211 */ 225 */
212 symbol_conf.ignore_vmlinux_buildid = true; 226 symbol_conf.ignore_vmlinux_buildid = true;
213 227
214 return __perf_session__process_events(session, start, 228 return perf_session__process_events(session, &rec->tool);
215 size - start,
216 size, &build_id__mark_dso_hit_ops);
217} 229}
218 230
219static void perf_event__synthesize_guest_os(struct machine *machine, void *data) 231static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
@@ -322,6 +334,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
322 struct perf_data_file *file = &rec->file; 334 struct perf_data_file *file = &rec->file;
323 struct perf_session *session; 335 struct perf_session *session;
324 bool disabled = false, draining = false; 336 bool disabled = false, draining = false;
337 int fd;
325 338
326 rec->progname = argv[0]; 339 rec->progname = argv[0];
327 340
@@ -336,6 +349,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
336 return -1; 349 return -1;
337 } 350 }
338 351
352 fd = perf_data_file__fd(file);
339 rec->session = session; 353 rec->session = session;
340 354
341 record__init_features(rec); 355 record__init_features(rec);
@@ -360,12 +374,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
360 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); 374 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
361 375
362 if (file->is_pipe) { 376 if (file->is_pipe) {
363 err = perf_header__write_pipe(file->fd); 377 err = perf_header__write_pipe(fd);
364 if (err < 0) 378 if (err < 0)
365 goto out_child; 379 goto out_child;
366 } else { 380 } else {
367 err = perf_session__write_header(session, rec->evlist, 381 err = perf_session__write_header(session, rec->evlist, fd, false);
368 file->fd, false);
369 if (err < 0) 382 if (err < 0)
370 goto out_child; 383 goto out_child;
371 } 384 }
@@ -397,7 +410,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
397 * return this more properly and also 410 * return this more properly and also
398 * propagate errors that now are calling die() 411 * propagate errors that now are calling die()
399 */ 412 */
400 err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist, 413 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
401 process_synthesized_event); 414 process_synthesized_event);
402 if (err <= 0) { 415 if (err <= 0) {
403 pr_err("Couldn't record tracing data.\n"); 416 pr_err("Couldn't record tracing data.\n");
@@ -504,19 +517,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
504 goto out_child; 517 goto out_child;
505 } 518 }
506 519
507 if (!quiet) { 520 if (!quiet)
508 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); 521 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
509 522
510 /*
511 * Approximate RIP event size: 24 bytes.
512 */
513 fprintf(stderr,
514 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
515 (double)rec->bytes_written / 1024.0 / 1024.0,
516 file->path,
517 rec->bytes_written / 24);
518 }
519
520out_child: 523out_child:
521 if (forks) { 524 if (forks) {
522 int exit_status; 525 int exit_status;
@@ -535,13 +538,29 @@ out_child:
535 } else 538 } else
536 status = err; 539 status = err;
537 540
541 /* this will be recalculated during process_buildids() */
542 rec->samples = 0;
543
538 if (!err && !file->is_pipe) { 544 if (!err && !file->is_pipe) {
539 rec->session->header.data_size += rec->bytes_written; 545 rec->session->header.data_size += rec->bytes_written;
540 546
541 if (!rec->no_buildid) 547 if (!rec->no_buildid)
542 process_buildids(rec); 548 process_buildids(rec);
543 perf_session__write_header(rec->session, rec->evlist, 549 perf_session__write_header(rec->session, rec->evlist, fd, true);
544 file->fd, true); 550 }
551
552 if (!err && !quiet) {
553 char samples[128];
554
555 if (rec->samples)
556 scnprintf(samples, sizeof(samples),
557 " (%" PRIu64 " samples)", rec->samples);
558 else
559 samples[0] = '\0';
560
561 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
562 perf_data_file__size(file) / 1024.0 / 1024.0,
563 file->path, samples);
545 } 564 }
546 565
547out_delete_session: 566out_delete_session:
@@ -720,6 +739,13 @@ static struct record record = {
720 .default_per_cpu = true, 739 .default_per_cpu = true,
721 }, 740 },
722 }, 741 },
742 .tool = {
743 .sample = process_sample_event,
744 .fork = perf_event__process_fork,
745 .comm = perf_event__process_comm,
746 .mmap = perf_event__process_mmap,
747 .mmap2 = perf_event__process_mmap2,
748 },
723}; 749};
724 750
725#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: " 751#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 072ae8ad67fc..2f91094e228b 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -86,17 +86,6 @@ static int report__config(const char *var, const char *value, void *cb)
86 return perf_default_config(var, value, cb); 86 return perf_default_config(var, value, cb);
87} 87}
88 88
89static void report__inc_stats(struct report *rep, struct hist_entry *he)
90{
91 /*
92 * The @he is either of a newly created one or an existing one
93 * merging current sample. We only want to count a new one so
94 * checking ->nr_events being 1.
95 */
96 if (he->stat.nr_events == 1)
97 rep->nr_entries++;
98}
99
100static int hist_iter__report_callback(struct hist_entry_iter *iter, 89static int hist_iter__report_callback(struct hist_entry_iter *iter,
101 struct addr_location *al, bool single, 90 struct addr_location *al, bool single,
102 void *arg) 91 void *arg)
@@ -108,8 +97,6 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
108 struct mem_info *mi; 97 struct mem_info *mi;
109 struct branch_info *bi; 98 struct branch_info *bi;
110 99
111 report__inc_stats(rep, he);
112
113 if (!ui__has_annotation()) 100 if (!ui__has_annotation())
114 return 0; 101 return 0;
115 102
@@ -499,6 +486,9 @@ static int __cmd_report(struct report *rep)
499 486
500 report__warn_kptr_restrict(rep); 487 report__warn_kptr_restrict(rep);
501 488
489 evlist__for_each(session->evlist, pos)
490 rep->nr_entries += evsel__hists(pos)->nr_entries;
491
502 if (use_browser == 0) { 492 if (use_browser == 0) {
503 if (verbose > 3) 493 if (verbose > 3)
504 perf_session__fprintf(session, stdout); 494 perf_session__fprintf(session, stdout);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 891086376381..e598e4e98170 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1730,7 +1730,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1730 "detailed run - start a lot of events"), 1730 "detailed run - start a lot of events"),
1731 OPT_BOOLEAN('S', "sync", &sync_run, 1731 OPT_BOOLEAN('S', "sync", &sync_run,
1732 "call sync() before starting a run"), 1732 "call sync() before starting a run"),
1733 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 1733 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1734 "print large numbers with thousands\' separators", 1734 "print large numbers with thousands\' separators",
1735 stat__set_big_num), 1735 stat__set_big_num),
1736 OPT_STRING('C', "cpu", &target.cpu_list, "cpu", 1736 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 616f0fcb4701..c4c7eac69de4 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -165,7 +165,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
165 err ? "[unknown]" : uts.release, perf_version_string); 165 err ? "[unknown]" : uts.release, perf_version_string);
166 if (use_browser <= 0) 166 if (use_browser <= 0)
167 sleep(5); 167 sleep(5);
168 168
169 map->erange_warned = true; 169 map->erange_warned = true;
170} 170}
171 171
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index badfabc6a01f..7e935f1083ec 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -929,66 +929,66 @@ static struct syscall_fmt {
929 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, }, 929 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
930 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), }, 930 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
931 { .name = "close", .errmsg = true, 931 { .name = "close", .errmsg = true,
932 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, }, 932 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
933 { .name = "connect", .errmsg = true, }, 933 { .name = "connect", .errmsg = true, },
934 { .name = "dup", .errmsg = true, 934 { .name = "dup", .errmsg = true,
935 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 935 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
936 { .name = "dup2", .errmsg = true, 936 { .name = "dup2", .errmsg = true,
937 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 937 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
938 { .name = "dup3", .errmsg = true, 938 { .name = "dup3", .errmsg = true,
939 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 939 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
940 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), }, 940 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
941 { .name = "eventfd2", .errmsg = true, 941 { .name = "eventfd2", .errmsg = true,
942 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, }, 942 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
943 { .name = "faccessat", .errmsg = true, 943 { .name = "faccessat", .errmsg = true,
944 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 944 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
945 { .name = "fadvise64", .errmsg = true, 945 { .name = "fadvise64", .errmsg = true,
946 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 946 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
947 { .name = "fallocate", .errmsg = true, 947 { .name = "fallocate", .errmsg = true,
948 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 948 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
949 { .name = "fchdir", .errmsg = true, 949 { .name = "fchdir", .errmsg = true,
950 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 950 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
951 { .name = "fchmod", .errmsg = true, 951 { .name = "fchmod", .errmsg = true,
952 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 952 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
953 { .name = "fchmodat", .errmsg = true, 953 { .name = "fchmodat", .errmsg = true,
954 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 954 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
955 { .name = "fchown", .errmsg = true, 955 { .name = "fchown", .errmsg = true,
956 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 956 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
957 { .name = "fchownat", .errmsg = true, 957 { .name = "fchownat", .errmsg = true,
958 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 958 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
959 { .name = "fcntl", .errmsg = true, 959 { .name = "fcntl", .errmsg = true,
960 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 960 .arg_scnprintf = { [0] = SCA_FD, /* fd */
961 [1] = SCA_STRARRAY, /* cmd */ }, 961 [1] = SCA_STRARRAY, /* cmd */ },
962 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, }, 962 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
963 { .name = "fdatasync", .errmsg = true, 963 { .name = "fdatasync", .errmsg = true,
964 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 964 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
965 { .name = "flock", .errmsg = true, 965 { .name = "flock", .errmsg = true,
966 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 966 .arg_scnprintf = { [0] = SCA_FD, /* fd */
967 [1] = SCA_FLOCK, /* cmd */ }, }, 967 [1] = SCA_FLOCK, /* cmd */ }, },
968 { .name = "fsetxattr", .errmsg = true, 968 { .name = "fsetxattr", .errmsg = true,
969 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 969 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
970 { .name = "fstat", .errmsg = true, .alias = "newfstat", 970 { .name = "fstat", .errmsg = true, .alias = "newfstat",
971 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 971 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
972 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", 972 { .name = "fstatat", .errmsg = true, .alias = "newfstatat",
973 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 973 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
974 { .name = "fstatfs", .errmsg = true, 974 { .name = "fstatfs", .errmsg = true,
975 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 975 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
976 { .name = "fsync", .errmsg = true, 976 { .name = "fsync", .errmsg = true,
977 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 977 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
978 { .name = "ftruncate", .errmsg = true, 978 { .name = "ftruncate", .errmsg = true,
979 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 979 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
980 { .name = "futex", .errmsg = true, 980 { .name = "futex", .errmsg = true,
981 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, }, 981 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
982 { .name = "futimesat", .errmsg = true, 982 { .name = "futimesat", .errmsg = true,
983 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 983 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
984 { .name = "getdents", .errmsg = true, 984 { .name = "getdents", .errmsg = true,
985 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 985 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
986 { .name = "getdents64", .errmsg = true, 986 { .name = "getdents64", .errmsg = true,
987 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 987 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
988 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), }, 988 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
989 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 989 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
990 { .name = "ioctl", .errmsg = true, 990 { .name = "ioctl", .errmsg = true,
991 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 991 .arg_scnprintf = { [0] = SCA_FD, /* fd */
992#if defined(__i386__) || defined(__x86_64__) 992#if defined(__i386__) || defined(__x86_64__)
993/* 993/*
994 * FIXME: Make this available to all arches. 994 * FIXME: Make this available to all arches.
@@ -1002,7 +1002,7 @@ static struct syscall_fmt {
1002 { .name = "kill", .errmsg = true, 1002 { .name = "kill", .errmsg = true,
1003 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, 1003 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
1004 { .name = "linkat", .errmsg = true, 1004 { .name = "linkat", .errmsg = true,
1005 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 1005 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
1006 { .name = "lseek", .errmsg = true, 1006 { .name = "lseek", .errmsg = true,
1007 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 1007 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1008 [2] = SCA_STRARRAY, /* whence */ }, 1008 [2] = SCA_STRARRAY, /* whence */ },
@@ -1012,9 +1012,9 @@ static struct syscall_fmt {
1012 .arg_scnprintf = { [0] = SCA_HEX, /* start */ 1012 .arg_scnprintf = { [0] = SCA_HEX, /* start */
1013 [2] = SCA_MADV_BHV, /* behavior */ }, }, 1013 [2] = SCA_MADV_BHV, /* behavior */ }, },
1014 { .name = "mkdirat", .errmsg = true, 1014 { .name = "mkdirat", .errmsg = true,
1015 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 1015 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
1016 { .name = "mknodat", .errmsg = true, 1016 { .name = "mknodat", .errmsg = true,
1017 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 1017 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
1018 { .name = "mlock", .errmsg = true, 1018 { .name = "mlock", .errmsg = true,
1019 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 1019 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1020 { .name = "mlockall", .errmsg = true, 1020 { .name = "mlockall", .errmsg = true,
@@ -1036,9 +1036,9 @@ static struct syscall_fmt {
1036 { .name = "munmap", .errmsg = true, 1036 { .name = "munmap", .errmsg = true,
1037 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 1037 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1038 { .name = "name_to_handle_at", .errmsg = true, 1038 { .name = "name_to_handle_at", .errmsg = true,
1039 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1039 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1040 { .name = "newfstatat", .errmsg = true, 1040 { .name = "newfstatat", .errmsg = true,
1041 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1041 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1042 { .name = "open", .errmsg = true, 1042 { .name = "open", .errmsg = true,
1043 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, }, 1043 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
1044 { .name = "open_by_handle_at", .errmsg = true, 1044 { .name = "open_by_handle_at", .errmsg = true,
@@ -1052,20 +1052,20 @@ static struct syscall_fmt {
1052 { .name = "poll", .errmsg = true, .timeout = true, }, 1052 { .name = "poll", .errmsg = true, .timeout = true, },
1053 { .name = "ppoll", .errmsg = true, .timeout = true, }, 1053 { .name = "ppoll", .errmsg = true, .timeout = true, },
1054 { .name = "pread", .errmsg = true, .alias = "pread64", 1054 { .name = "pread", .errmsg = true, .alias = "pread64",
1055 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1055 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1056 { .name = "preadv", .errmsg = true, .alias = "pread", 1056 { .name = "preadv", .errmsg = true, .alias = "pread",
1057 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1057 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1058 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), }, 1058 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
1059 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", 1059 { .name = "pwrite", .errmsg = true, .alias = "pwrite64",
1060 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1060 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1061 { .name = "pwritev", .errmsg = true, 1061 { .name = "pwritev", .errmsg = true,
1062 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1062 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1063 { .name = "read", .errmsg = true, 1063 { .name = "read", .errmsg = true,
1064 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1064 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1065 { .name = "readlinkat", .errmsg = true, 1065 { .name = "readlinkat", .errmsg = true,
1066 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1066 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1067 { .name = "readv", .errmsg = true, 1067 { .name = "readv", .errmsg = true,
1068 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1068 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1069 { .name = "recvfrom", .errmsg = true, 1069 { .name = "recvfrom", .errmsg = true,
1070 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, 1070 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
1071 { .name = "recvmmsg", .errmsg = true, 1071 { .name = "recvmmsg", .errmsg = true,
@@ -1073,7 +1073,7 @@ static struct syscall_fmt {
1073 { .name = "recvmsg", .errmsg = true, 1073 { .name = "recvmsg", .errmsg = true,
1074 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, }, 1074 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
1075 { .name = "renameat", .errmsg = true, 1075 { .name = "renameat", .errmsg = true,
1076 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1076 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1077 { .name = "rt_sigaction", .errmsg = true, 1077 { .name = "rt_sigaction", .errmsg = true,
1078 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, }, 1078 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
1079 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), }, 1079 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
@@ -1091,7 +1091,7 @@ static struct syscall_fmt {
1091 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), }, 1091 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
1092 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 1092 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
1093 { .name = "shutdown", .errmsg = true, 1093 { .name = "shutdown", .errmsg = true,
1094 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1094 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1095 { .name = "socket", .errmsg = true, 1095 { .name = "socket", .errmsg = true,
1096 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */ 1096 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
1097 [1] = SCA_SK_TYPE, /* type */ }, 1097 [1] = SCA_SK_TYPE, /* type */ },
@@ -1102,7 +1102,7 @@ static struct syscall_fmt {
1102 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, }, 1102 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
1103 { .name = "stat", .errmsg = true, .alias = "newstat", }, 1103 { .name = "stat", .errmsg = true, .alias = "newstat", },
1104 { .name = "symlinkat", .errmsg = true, 1104 { .name = "symlinkat", .errmsg = true,
1105 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 1105 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1106 { .name = "tgkill", .errmsg = true, 1106 { .name = "tgkill", .errmsg = true,
1107 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, }, 1107 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
1108 { .name = "tkill", .errmsg = true, 1108 { .name = "tkill", .errmsg = true,
@@ -1113,9 +1113,9 @@ static struct syscall_fmt {
1113 { .name = "utimensat", .errmsg = true, 1113 { .name = "utimensat", .errmsg = true,
1114 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, }, 1114 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
1115 { .name = "write", .errmsg = true, 1115 { .name = "write", .errmsg = true,
1116 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1116 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1117 { .name = "writev", .errmsg = true, 1117 { .name = "writev", .errmsg = true,
1118 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 1118 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1119}; 1119};
1120 1120
1121static int syscall_fmt__cmp(const void *name, const void *fmtp) 1121static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -1191,7 +1191,7 @@ static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1191 1191
1192 if (thread__priv(thread) == NULL) 1192 if (thread__priv(thread) == NULL)
1193 thread__set_priv(thread, thread_trace__new()); 1193 thread__set_priv(thread, thread_trace__new());
1194 1194
1195 if (thread__priv(thread) == NULL) 1195 if (thread__priv(thread) == NULL)
1196 goto fail; 1196 goto fail;
1197 1197
@@ -2056,23 +2056,24 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
2056 if (trace->trace_syscalls && 2056 if (trace->trace_syscalls &&
2057 perf_evlist__add_syscall_newtp(evlist, trace__sys_enter, 2057 perf_evlist__add_syscall_newtp(evlist, trace__sys_enter,
2058 trace__sys_exit)) 2058 trace__sys_exit))
2059 goto out_error_tp; 2059 goto out_error_raw_syscalls;
2060 2060
2061 if (trace->trace_syscalls) 2061 if (trace->trace_syscalls)
2062 perf_evlist__add_vfs_getname(evlist); 2062 perf_evlist__add_vfs_getname(evlist);
2063 2063
2064 if ((trace->trace_pgfaults & TRACE_PFMAJ) && 2064 if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
2065 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) 2065 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
2066 goto out_error_tp; 2066 goto out_error_mem;
2067 }
2067 2068
2068 if ((trace->trace_pgfaults & TRACE_PFMIN) && 2069 if ((trace->trace_pgfaults & TRACE_PFMIN) &&
2069 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN)) 2070 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
2070 goto out_error_tp; 2071 goto out_error_mem;
2071 2072
2072 if (trace->sched && 2073 if (trace->sched &&
2073 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime", 2074 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2074 trace__sched_stat_runtime)) 2075 trace__sched_stat_runtime))
2075 goto out_error_tp; 2076 goto out_error_sched_stat_runtime;
2076 2077
2077 err = perf_evlist__create_maps(evlist, &trace->opts.target); 2078 err = perf_evlist__create_maps(evlist, &trace->opts.target);
2078 if (err < 0) { 2079 if (err < 0) {
@@ -2202,8 +2203,12 @@ out:
2202{ 2203{
2203 char errbuf[BUFSIZ]; 2204 char errbuf[BUFSIZ];
2204 2205
2205out_error_tp: 2206out_error_sched_stat_runtime:
2206 perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf)); 2207 debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2208 goto out_error;
2209
2210out_error_raw_syscalls:
2211 debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2207 goto out_error; 2212 goto out_error;
2208 2213
2209out_error_mmap: 2214out_error_mmap:
@@ -2217,6 +2222,9 @@ out_error:
2217 fprintf(trace->output, "%s\n", errbuf); 2222 fprintf(trace->output, "%s\n", errbuf);
2218 goto out_delete_evlist; 2223 goto out_delete_evlist;
2219} 2224}
2225out_error_mem:
2226 fprintf(trace->output, "Not enough memory to run!\n");
2227 goto out_delete_evlist;
2220} 2228}
2221 2229
2222static int trace__replay(struct trace *trace) 2230static int trace__replay(struct trace *trace)
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 648e31ff4021..cc224080b525 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -198,6 +198,7 @@ CORE_FEATURE_TESTS = \
198 libpython-version \ 198 libpython-version \
199 libslang \ 199 libslang \
200 libunwind \ 200 libunwind \
201 pthread-attr-setaffinity-np \
201 stackprotector-all \ 202 stackprotector-all \
202 timerfd \ 203 timerfd \
203 libdw-dwarf-unwind \ 204 libdw-dwarf-unwind \
@@ -226,6 +227,7 @@ VF_FEATURE_TESTS = \
226 libelf-getphdrnum \ 227 libelf-getphdrnum \
227 libelf-mmap \ 228 libelf-mmap \
228 libpython-version \ 229 libpython-version \
230 pthread-attr-setaffinity-np \
229 stackprotector-all \ 231 stackprotector-all \
230 timerfd \ 232 timerfd \
231 libunwind-debug-frame \ 233 libunwind-debug-frame \
@@ -301,6 +303,10 @@ ifeq ($(feature-sync-compare-and-swap), 1)
301 CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT 303 CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
302endif 304endif
303 305
306ifeq ($(feature-pthread-attr-setaffinity-np), 1)
307 CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP
308endif
309
304ifndef NO_BIONIC 310ifndef NO_BIONIC
305 $(call feature_check,bionic) 311 $(call feature_check,bionic)
306 ifeq ($(feature-bionic), 1) 312 ifeq ($(feature-bionic), 1)
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 53f19b5dbc37..42ac05aaf8ac 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -25,6 +25,7 @@ FILES= \
25 test-libslang.bin \ 25 test-libslang.bin \
26 test-libunwind.bin \ 26 test-libunwind.bin \
27 test-libunwind-debug-frame.bin \ 27 test-libunwind-debug-frame.bin \
28 test-pthread-attr-setaffinity-np.bin \
28 test-stackprotector-all.bin \ 29 test-stackprotector-all.bin \
29 test-timerfd.bin \ 30 test-timerfd.bin \
30 test-libdw-dwarf-unwind.bin \ 31 test-libdw-dwarf-unwind.bin \
@@ -47,6 +48,9 @@ test-all.bin:
47test-hello.bin: 48test-hello.bin:
48 $(BUILD) 49 $(BUILD)
49 50
51test-pthread-attr-setaffinity-np.bin:
52 $(BUILD) -Werror -lpthread
53
50test-stackprotector-all.bin: 54test-stackprotector-all.bin:
51 $(BUILD) -Werror -fstack-protector-all 55 $(BUILD) -Werror -fstack-protector-all
52 56
diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c
index 652e0098eba6..6d4d09323922 100644
--- a/tools/perf/config/feature-checks/test-all.c
+++ b/tools/perf/config/feature-checks/test-all.c
@@ -97,6 +97,10 @@
97# include "test-zlib.c" 97# include "test-zlib.c"
98#undef main 98#undef main
99 99
100#define main main_test_pthread_attr_setaffinity_np
101# include "test-pthread_attr_setaffinity_np.c"
102#undef main
103
100int main(int argc, char *argv[]) 104int main(int argc, char *argv[])
101{ 105{
102 main_test_libpython(); 106 main_test_libpython();
@@ -121,6 +125,7 @@ int main(int argc, char *argv[])
121 main_test_libdw_dwarf_unwind(); 125 main_test_libdw_dwarf_unwind();
122 main_test_sync_compare_and_swap(argc, argv); 126 main_test_sync_compare_and_swap(argc, argv);
123 main_test_zlib(); 127 main_test_zlib();
128 main_test_pthread_attr_setaffinity_np();
124 129
125 return 0; 130 return 0;
126} 131}
diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
new file mode 100644
index 000000000000..0a0d3ecb4e8a
--- /dev/null
+++ b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
@@ -0,0 +1,14 @@
1#include <stdint.h>
2#include <pthread.h>
3
4int main(void)
5{
6 int ret = 0;
7 pthread_attr_t thread_attr;
8
9 pthread_attr_init(&thread_attr);
10 /* don't care abt exact args, just the API itself in libpthread */
11 ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
12
13 return ret;
14}
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index c9b4b6269b51..1091bd47adfd 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -104,7 +104,6 @@ class Event(dict):
104 continue 104 continue
105 if not self.compare_data(self[t], other[t]): 105 if not self.compare_data(self[t], other[t]):
106 log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) 106 log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
107
108 107
109# Test file description needs to have following sections: 108# Test file description needs to have following sections:
110# [config] 109# [config]
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 8d110dec393e..18619966454c 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -140,7 +140,7 @@ static void del_hist_entries(struct hists *hists)
140 he = rb_entry(node, struct hist_entry, rb_node); 140 he = rb_entry(node, struct hist_entry, rb_node);
141 rb_erase(node, root_out); 141 rb_erase(node, root_out);
142 rb_erase(&he->rb_node_in, root_in); 142 rb_erase(&he->rb_node_in, root_in);
143 hist_entry__free(he); 143 hist_entry__delete(he);
144 } 144 }
145} 145}
146 146
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index f5547610da02..b52c9faea224 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -106,7 +106,7 @@ static void del_hist_entries(struct hists *hists)
106 he = rb_entry(node, struct hist_entry, rb_node); 106 he = rb_entry(node, struct hist_entry, rb_node);
107 rb_erase(node, root_out); 107 rb_erase(node, root_out);
108 rb_erase(&he->rb_node_in, root_in); 108 rb_erase(&he->rb_node_in, root_in);
109 hist_entry__free(he); 109 hist_entry__delete(he);
110 } 110 }
111} 111}
112 112
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 69a71ff84e01..75709d2b17b4 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -222,7 +222,6 @@ tarpkg:
222 @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \ 222 @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
223 echo "- $@: $$cmd" && echo $$cmd > $@ && \ 223 echo "- $@: $$cmd" && echo $$cmd > $@ && \
224 ( eval $$cmd ) >> $@ 2>&1 224 ( eval $$cmd ) >> $@ 2>&1
225
226 225
227all: $(run) $(run_O) tarpkg 226all: $(run) $(run_O) tarpkg
228 @echo OK 227 @echo OK
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 7f2f51f93619..1cdab0ce00e2 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1145,6 +1145,49 @@ static int test__pinned_group(struct perf_evlist *evlist)
1145 return 0; 1145 return 0;
1146} 1146}
1147 1147
1148static int test__checkevent_breakpoint_len(struct perf_evlist *evlist)
1149{
1150 struct perf_evsel *evsel = perf_evlist__first(evlist);
1151
1152 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
1153 TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
1154 TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
1155 TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
1156 evsel->attr.bp_type);
1157 TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 ==
1158 evsel->attr.bp_len);
1159
1160 return 0;
1161}
1162
1163static int test__checkevent_breakpoint_len_w(struct perf_evlist *evlist)
1164{
1165 struct perf_evsel *evsel = perf_evlist__first(evlist);
1166
1167 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
1168 TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
1169 TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
1170 TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W ==
1171 evsel->attr.bp_type);
1172 TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 ==
1173 evsel->attr.bp_len);
1174
1175 return 0;
1176}
1177
1178static int
1179test__checkevent_breakpoint_len_rw_modifier(struct perf_evlist *evlist)
1180{
1181 struct perf_evsel *evsel = perf_evlist__first(evlist);
1182
1183 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
1184 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
1185 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
1186 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
1187
1188 return test__checkevent_breakpoint_rw(evlist);
1189}
1190
1148static int count_tracepoints(void) 1191static int count_tracepoints(void)
1149{ 1192{
1150 char events_path[PATH_MAX]; 1193 char events_path[PATH_MAX];
@@ -1420,6 +1463,21 @@ static struct evlist_test test__events[] = {
1420 .check = test__pinned_group, 1463 .check = test__pinned_group,
1421 .id = 41, 1464 .id = 41,
1422 }, 1465 },
1466 {
1467 .name = "mem:0/1",
1468 .check = test__checkevent_breakpoint_len,
1469 .id = 42,
1470 },
1471 {
1472 .name = "mem:0/2:w",
1473 .check = test__checkevent_breakpoint_len_w,
1474 .id = 43,
1475 },
1476 {
1477 .name = "mem:0/4:rw:u",
1478 .check = test__checkevent_breakpoint_len_rw_modifier,
1479 .id = 44
1480 },
1423#if defined(__s390x__) 1481#if defined(__s390x__)
1424 { 1482 {
1425 .name = "kvm-s390:kvm_s390_create_vm", 1483 .name = "kvm-s390:kvm_s390_create_vm",
@@ -1471,7 +1529,7 @@ static int test_event(struct evlist_test *e)
1471 } else { 1529 } else {
1472 ret = e->check(evlist); 1530 ret = e->check(evlist);
1473 } 1531 }
1474 1532
1475 perf_evlist__delete(evlist); 1533 perf_evlist__delete(evlist);
1476 1534
1477 return ret; 1535 return ret;
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 4908c648a597..30c02181e78b 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -110,7 +110,7 @@ static bool samples_same(const struct perf_sample *s1,
110 110
111 if (type & PERF_SAMPLE_STACK_USER) { 111 if (type & PERF_SAMPLE_STACK_USER) {
112 COMP(user_stack.size); 112 COMP(user_stack.size);
113 if (memcmp(s1->user_stack.data, s1->user_stack.data, 113 if (memcmp(s1->user_stack.data, s2->user_stack.data,
114 s1->user_stack.size)) { 114 s1->user_stack.size)) {
115 pr_debug("Samples differ at 'user_stack'\n"); 115 pr_debug("Samples differ at 'user_stack'\n");
116 return false; 116 return false;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 1e0a2fd80115..9d32e3c0cfee 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -517,7 +517,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
517 } 517 }
518 518
519 annotate_browser__set_top(browser, dl, idx); 519 annotate_browser__set_top(browser, dl, idx);
520 520
521 return true; 521 return true;
522} 522}
523 523
@@ -867,7 +867,6 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
867 867
868 ++browser->nr_jumps; 868 ++browser->nr_jumps;
869 } 869 }
870
871} 870}
872 871
873static inline int width_jumps(int n) 872static inline int width_jumps(int n)
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 482adae3cc44..25d608394d74 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -285,7 +285,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
285} 285}
286 286
287#define __HPP_SORT_FN(_type, _field) \ 287#define __HPP_SORT_FN(_type, _field) \
288static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ 288static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
289 struct hist_entry *a, struct hist_entry *b) \
289{ \ 290{ \
290 return __hpp__sort(a, b, he_get_##_field); \ 291 return __hpp__sort(a, b, he_get_##_field); \
291} 292}
@@ -312,7 +313,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
312} 313}
313 314
314#define __HPP_SORT_ACC_FN(_type, _field) \ 315#define __HPP_SORT_ACC_FN(_type, _field) \
315static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ 316static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
317 struct hist_entry *a, struct hist_entry *b) \
316{ \ 318{ \
317 return __hpp__sort_acc(a, b, he_get_acc_##_field); \ 319 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
318} 320}
@@ -331,7 +333,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
331} 333}
332 334
333#define __HPP_SORT_RAW_FN(_type, _field) \ 335#define __HPP_SORT_RAW_FN(_type, _field) \
334static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ 336static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
337 struct hist_entry *a, struct hist_entry *b) \
335{ \ 338{ \
336 return __hpp__sort(a, b, he_get_raw_##_field); \ 339 return __hpp__sort(a, b, he_get_raw_##_field); \
337} 340}
@@ -361,7 +364,8 @@ HPP_PERCENT_ACC_FNS(overhead_acc, period)
361HPP_RAW_FNS(samples, nr_events) 364HPP_RAW_FNS(samples, nr_events)
362HPP_RAW_FNS(period, period) 365HPP_RAW_FNS(period, period)
363 366
364static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused, 367static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
368 struct hist_entry *a __maybe_unused,
365 struct hist_entry *b __maybe_unused) 369 struct hist_entry *b __maybe_unused)
366{ 370{
367 return 0; 371 return 0;
diff --git a/tools/perf/ui/progress.h b/tools/perf/ui/progress.h
index f34f89eb607c..717d39d3052b 100644
--- a/tools/perf/ui/progress.h
+++ b/tools/perf/ui/progress.h
@@ -4,12 +4,12 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6void ui_progress__finish(void); 6void ui_progress__finish(void);
7 7
8struct ui_progress { 8struct ui_progress {
9 const char *title; 9 const char *title;
10 u64 curr, next, step, total; 10 u64 curr, next, step, total;
11}; 11};
12 12
13void ui_progress__init(struct ui_progress *p, u64 total, const char *title); 13void ui_progress__init(struct ui_progress *p, u64 total, const char *title);
14void ui_progress__update(struct ui_progress *p, u64 adv); 14void ui_progress__update(struct ui_progress *p, u64 adv);
15 15
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
index 1c8b9afd5d6e..88f5143a5981 100644
--- a/tools/perf/ui/tui/helpline.c
+++ b/tools/perf/ui/tui/helpline.c
@@ -9,6 +9,7 @@
9#include "../libslang.h" 9#include "../libslang.h"
10 10
11char ui_helpline__last_msg[1024]; 11char ui_helpline__last_msg[1024];
12bool tui_helpline__set;
12 13
13static void tui_helpline__pop(void) 14static void tui_helpline__pop(void)
14{ 15{
@@ -35,6 +36,8 @@ static int tui_helpline__show(const char *format, va_list ap)
35 sizeof(ui_helpline__last_msg) - backlog, format, ap); 36 sizeof(ui_helpline__last_msg) - backlog, format, ap);
36 backlog += ret; 37 backlog += ret;
37 38
39 tui_helpline__set = true;
40
38 if (ui_helpline__last_msg[backlog - 1] == '\n') { 41 if (ui_helpline__last_msg[backlog - 1] == '\n') {
39 ui_helpline__puts(ui_helpline__last_msg); 42 ui_helpline__puts(ui_helpline__last_msg);
40 SLsmg_refresh(); 43 SLsmg_refresh();
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 3c38f25b1695..b77e1d771363 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -17,6 +17,7 @@
17static volatile int ui__need_resize; 17static volatile int ui__need_resize;
18 18
19extern struct perf_error_ops perf_tui_eops; 19extern struct perf_error_ops perf_tui_eops;
20extern bool tui_helpline__set;
20 21
21extern void hist_browser__init_hpp(void); 22extern void hist_browser__init_hpp(void);
22 23
@@ -159,7 +160,7 @@ out:
159 160
160void ui__exit(bool wait_for_ok) 161void ui__exit(bool wait_for_ok)
161{ 162{
162 if (wait_for_ok) 163 if (wait_for_ok && tui_helpline__set)
163 ui__question_window("Fatal Error", 164 ui__question_window("Fatal Error",
164 ui_helpline__last_msg, 165 ui_helpline__last_msg,
165 "Press any key...", 0); 166 "Press any key...", 0);
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 01bc4e23a2cf..61bf9128e1f2 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -239,7 +239,7 @@ static int mov__parse(struct ins_operands *ops)
239 *s = '\0'; 239 *s = '\0';
240 ops->source.raw = strdup(ops->raw); 240 ops->source.raw = strdup(ops->raw);
241 *s = ','; 241 *s = ',';
242 242
243 if (ops->source.raw == NULL) 243 if (ops->source.raw == NULL)
244 return -1; 244 return -1;
245 245
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index f4654183d391..55355b3d4f85 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -5,132 +5,6 @@
5 5
6int perf_use_color_default = -1; 6int perf_use_color_default = -1;
7 7
8static int parse_color(const char *name, int len)
9{
10 static const char * const color_names[] = {
11 "normal", "black", "red", "green", "yellow",
12 "blue", "magenta", "cyan", "white"
13 };
14 char *end;
15 int i;
16
17 for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
18 const char *str = color_names[i];
19 if (!strncasecmp(name, str, len) && !str[len])
20 return i - 1;
21 }
22 i = strtol(name, &end, 10);
23 if (end - name == len && i >= -1 && i <= 255)
24 return i;
25 return -2;
26}
27
28static int parse_attr(const char *name, int len)
29{
30 static const int attr_values[] = { 1, 2, 4, 5, 7 };
31 static const char * const attr_names[] = {
32 "bold", "dim", "ul", "blink", "reverse"
33 };
34 unsigned int i;
35
36 for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
37 const char *str = attr_names[i];
38 if (!strncasecmp(name, str, len) && !str[len])
39 return attr_values[i];
40 }
41 return -1;
42}
43
44void color_parse(const char *value, const char *var, char *dst)
45{
46 color_parse_mem(value, strlen(value), var, dst);
47}
48
49void color_parse_mem(const char *value, int value_len, const char *var,
50 char *dst)
51{
52 const char *ptr = value;
53 int len = value_len;
54 int attr = -1;
55 int fg = -2;
56 int bg = -2;
57
58 if (!strncasecmp(value, "reset", len)) {
59 strcpy(dst, PERF_COLOR_RESET);
60 return;
61 }
62
63 /* [fg [bg]] [attr] */
64 while (len > 0) {
65 const char *word = ptr;
66 int val, wordlen = 0;
67
68 while (len > 0 && !isspace(word[wordlen])) {
69 wordlen++;
70 len--;
71 }
72
73 ptr = word + wordlen;
74 while (len > 0 && isspace(*ptr)) {
75 ptr++;
76 len--;
77 }
78
79 val = parse_color(word, wordlen);
80 if (val >= -1) {
81 if (fg == -2) {
82 fg = val;
83 continue;
84 }
85 if (bg == -2) {
86 bg = val;
87 continue;
88 }
89 goto bad;
90 }
91 val = parse_attr(word, wordlen);
92 if (val < 0 || attr != -1)
93 goto bad;
94 attr = val;
95 }
96
97 if (attr >= 0 || fg >= 0 || bg >= 0) {
98 int sep = 0;
99
100 *dst++ = '\033';
101 *dst++ = '[';
102 if (attr >= 0) {
103 *dst++ = '0' + attr;
104 sep++;
105 }
106 if (fg >= 0) {
107 if (sep++)
108 *dst++ = ';';
109 if (fg < 8) {
110 *dst++ = '3';
111 *dst++ = '0' + fg;
112 } else {
113 dst += sprintf(dst, "38;5;%d", fg);
114 }
115 }
116 if (bg >= 0) {
117 if (sep++)
118 *dst++ = ';';
119 if (bg < 8) {
120 *dst++ = '4';
121 *dst++ = '0' + bg;
122 } else {
123 dst += sprintf(dst, "48;5;%d", bg);
124 }
125 }
126 *dst++ = 'm';
127 }
128 *dst = 0;
129 return;
130bad:
131 die("bad color value '%.*s' for variable '%s'", value_len, value, var);
132}
133
134int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty) 8int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
135{ 9{
136 if (value) { 10 if (value) {
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 0a594b8a0c26..38146f922c54 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -30,8 +30,6 @@ extern int perf_use_color_default;
30int perf_color_default_config(const char *var, const char *value, void *cb); 30int perf_color_default_config(const char *var, const char *value, void *cb);
31 31
32int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty); 32int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
33void color_parse(const char *value, const char *var, char *dst);
34void color_parse_mem(const char *value, int len, const char *var, char *dst);
35int color_vsnprintf(char *bf, size_t size, const char *color, 33int color_vsnprintf(char *bf, size_t size, const char *color,
36 const char *fmt, va_list args); 34 const char *fmt, va_list args);
37int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args); 35int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 45be944d450a..c2f7d3b90966 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -532,12 +532,8 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
532 break; 532 break;
533 533
534 cache_offset = offset & DSO__DATA_CACHE_MASK; 534 cache_offset = offset & DSO__DATA_CACHE_MASK;
535 ret = -EINVAL;
536 535
537 if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET)) 536 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
538 break;
539
540 ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE);
541 if (ret <= 0) 537 if (ret <= 0)
542 break; 538 break;
543 539
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 3782c82c6e44..ced92841ff97 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -139,6 +139,7 @@ struct dso {
139 u32 status_seen; 139 u32 status_seen;
140 size_t file_size; 140 size_t file_size;
141 struct list_head open_entry; 141 struct list_head open_entry;
142 u64 frame_offset;
142 } data; 143 } data;
143 144
144 union { /* Tool specific area */ 145 union { /* Tool specific area */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 2e507b5025a3..28b8ce86bf12 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1436,33 +1436,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1436 return printed + fprintf(fp, "\n"); 1436 return printed + fprintf(fp, "\n");
1437} 1437}
1438 1438
1439int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1440 int err, char *buf, size_t size)
1441{
1442 char sbuf[128];
1443
1444 switch (err) {
1445 case ENOENT:
1446 scnprintf(buf, size, "%s",
1447 "Error:\tUnable to find debugfs\n"
1448 "Hint:\tWas your kernel compiled with debugfs support?\n"
1449 "Hint:\tIs the debugfs filesystem mounted?\n"
1450 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1451 break;
1452 case EACCES:
1453 scnprintf(buf, size,
1454 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1455 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1456 debugfs_mountpoint, debugfs_mountpoint);
1457 break;
1458 default:
1459 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1460 break;
1461 }
1462
1463 return 0;
1464}
1465
1466int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, 1439int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1467 int err, char *buf, size_t size) 1440 int err, char *buf, size_t size)
1468{ 1441{
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 0ba93f67ab94..c94a9e03ecf1 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -183,7 +183,6 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
183 183
184size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); 184size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
185 185
186int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
187int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); 186int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
188int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size); 187int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
189 188
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1e90c8557ede..ea51a90e20a0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -709,6 +709,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
709 if (opts->sample_weight) 709 if (opts->sample_weight)
710 perf_evsel__set_sample_bit(evsel, WEIGHT); 710 perf_evsel__set_sample_bit(evsel, WEIGHT);
711 711
712 attr->task = track;
712 attr->mmap = track; 713 attr->mmap = track;
713 attr->mmap2 = track && !perf_missing_features.mmap2; 714 attr->mmap2 = track && !perf_missing_features.mmap2;
714 attr->comm = track; 715 attr->comm = track;
@@ -797,6 +798,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
797 798
798int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) 799int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
799{ 800{
801 if (ncpus == 0 || nthreads == 0)
802 return 0;
803
800 if (evsel->system_wide) 804 if (evsel->system_wide)
801 nthreads = 1; 805 nthreads = 1;
802 806
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b20e40c74468..1f407f7352a7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2237,6 +2237,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2237 * - unique number to identify actual perf.data files 2237 * - unique number to identify actual perf.data files
2238 * - encode endianness of file 2238 * - encode endianness of file
2239 */ 2239 */
2240 ph->version = PERF_HEADER_VERSION_2;
2240 2241
2241 /* check magic number with one endianness */ 2242 /* check magic number with one endianness */
2242 if (magic == __perf_magic2) 2243 if (magic == __perf_magic2)
@@ -2247,7 +2248,6 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2247 return -1; 2248 return -1;
2248 2249
2249 ph->needs_swap = true; 2250 ph->needs_swap = true;
2250 ph->version = PERF_HEADER_VERSION_2;
2251 2251
2252 return 0; 2252 return 0;
2253} 2253}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 182395546ddc..70b48a65064c 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -241,6 +241,20 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
241 return he->stat.period == 0; 241 return he->stat.period == 0;
242} 242}
243 243
244static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
245{
246 rb_erase(&he->rb_node, &hists->entries);
247
248 if (sort__need_collapse)
249 rb_erase(&he->rb_node_in, &hists->entries_collapsed);
250
251 --hists->nr_entries;
252 if (!he->filtered)
253 --hists->nr_non_filtered_entries;
254
255 hist_entry__delete(he);
256}
257
244void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 258void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
245{ 259{
246 struct rb_node *next = rb_first(&hists->entries); 260 struct rb_node *next = rb_first(&hists->entries);
@@ -258,16 +272,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
258 (zap_kernel && n->level != '.') || 272 (zap_kernel && n->level != '.') ||
259 hists__decay_entry(hists, n)) && 273 hists__decay_entry(hists, n)) &&
260 !n->used) { 274 !n->used) {
261 rb_erase(&n->rb_node, &hists->entries); 275 hists__delete_entry(hists, n);
262
263 if (sort__need_collapse)
264 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
265
266 --hists->nr_entries;
267 if (!n->filtered)
268 --hists->nr_non_filtered_entries;
269
270 hist_entry__free(n);
271 } 276 }
272 } 277 }
273} 278}
@@ -281,16 +286,7 @@ void hists__delete_entries(struct hists *hists)
281 n = rb_entry(next, struct hist_entry, rb_node); 286 n = rb_entry(next, struct hist_entry, rb_node);
282 next = rb_next(&n->rb_node); 287 next = rb_next(&n->rb_node);
283 288
284 rb_erase(&n->rb_node, &hists->entries); 289 hists__delete_entry(hists, n);
285
286 if (sort__need_collapse)
287 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
288
289 --hists->nr_entries;
290 if (!n->filtered)
291 --hists->nr_non_filtered_entries;
292
293 hist_entry__free(n);
294 } 290 }
295} 291}
296 292
@@ -433,6 +429,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
433 if (!he) 429 if (!he)
434 return NULL; 430 return NULL;
435 431
432 hists->nr_entries++;
433
436 rb_link_node(&he->rb_node_in, parent, p); 434 rb_link_node(&he->rb_node_in, parent, p);
437 rb_insert_color(&he->rb_node_in, hists->entries_in); 435 rb_insert_color(&he->rb_node_in, hists->entries_in);
438out: 436out:
@@ -915,7 +913,7 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
915 if (perf_hpp__should_skip(fmt)) 913 if (perf_hpp__should_skip(fmt))
916 continue; 914 continue;
917 915
918 cmp = fmt->cmp(left, right); 916 cmp = fmt->cmp(fmt, left, right);
919 if (cmp) 917 if (cmp)
920 break; 918 break;
921 } 919 }
@@ -933,7 +931,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
933 if (perf_hpp__should_skip(fmt)) 931 if (perf_hpp__should_skip(fmt))
934 continue; 932 continue;
935 933
936 cmp = fmt->collapse(left, right); 934 cmp = fmt->collapse(fmt, left, right);
937 if (cmp) 935 if (cmp)
938 break; 936 break;
939 } 937 }
@@ -941,7 +939,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
941 return cmp; 939 return cmp;
942} 940}
943 941
944void hist_entry__free(struct hist_entry *he) 942void hist_entry__delete(struct hist_entry *he)
945{ 943{
946 zfree(&he->branch_info); 944 zfree(&he->branch_info);
947 zfree(&he->mem_info); 945 zfree(&he->mem_info);
@@ -981,7 +979,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
981 iter->callchain, 979 iter->callchain,
982 he->callchain); 980 he->callchain);
983 } 981 }
984 hist_entry__free(he); 982 hist_entry__delete(he);
985 return false; 983 return false;
986 } 984 }
987 985
@@ -1063,7 +1061,7 @@ static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1063 if (perf_hpp__should_skip(fmt)) 1061 if (perf_hpp__should_skip(fmt))
1064 continue; 1062 continue;
1065 1063
1066 cmp = fmt->sort(a, b); 1064 cmp = fmt->sort(fmt, a, b);
1067 if (cmp) 1065 if (cmp)
1068 break; 1066 break;
1069 } 1067 }
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 46bd50344f85..2b690d028907 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -119,7 +119,7 @@ int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
119int hist_entry__transaction_len(void); 119int hist_entry__transaction_len(void);
120int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size, 120int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
121 struct hists *hists); 121 struct hists *hists);
122void hist_entry__free(struct hist_entry *); 122void hist_entry__delete(struct hist_entry *he);
123 123
124void hists__output_resort(struct hists *hists, struct ui_progress *prog); 124void hists__output_resort(struct hists *hists, struct ui_progress *prog);
125void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); 125void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
@@ -195,9 +195,12 @@ struct perf_hpp_fmt {
195 struct hist_entry *he); 195 struct hist_entry *he);
196 int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 196 int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
197 struct hist_entry *he); 197 struct hist_entry *he);
198 int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b); 198 int64_t (*cmp)(struct perf_hpp_fmt *fmt,
199 int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b); 199 struct hist_entry *a, struct hist_entry *b);
200 int64_t (*sort)(struct hist_entry *a, struct hist_entry *b); 200 int64_t (*collapse)(struct perf_hpp_fmt *fmt,
201 struct hist_entry *a, struct hist_entry *b);
202 int64_t (*sort)(struct perf_hpp_fmt *fmt,
203 struct hist_entry *a, struct hist_entry *b);
201 204
202 struct list_head list; 205 struct list_head list;
203 struct list_head sort_list; 206 struct list_head sort_list;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 77b43fe43d55..7f8ec6ce2823 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -526,7 +526,7 @@ do { \
526} 526}
527 527
528int parse_events_add_breakpoint(struct list_head *list, int *idx, 528int parse_events_add_breakpoint(struct list_head *list, int *idx,
529 void *ptr, char *type) 529 void *ptr, char *type, u64 len)
530{ 530{
531 struct perf_event_attr attr; 531 struct perf_event_attr attr;
532 532
@@ -536,14 +536,15 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
536 if (parse_breakpoint_type(type, &attr)) 536 if (parse_breakpoint_type(type, &attr))
537 return -EINVAL; 537 return -EINVAL;
538 538
539 /* 539 /* Provide some defaults if len is not specified */
540 * We should find a nice way to override the access length 540 if (!len) {
541 * Provide some defaults for now 541 if (attr.bp_type == HW_BREAKPOINT_X)
542 */ 542 len = sizeof(long);
543 if (attr.bp_type == HW_BREAKPOINT_X) 543 else
544 attr.bp_len = sizeof(long); 544 len = HW_BREAKPOINT_LEN_4;
545 else 545 }
546 attr.bp_len = HW_BREAKPOINT_LEN_4; 546
547 attr.bp_len = len;
547 548
548 attr.type = PERF_TYPE_BREAKPOINT; 549 attr.type = PERF_TYPE_BREAKPOINT;
549 attr.sample_period = 1; 550 attr.sample_period = 1;
@@ -1121,7 +1122,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
1121 return; 1122 return;
1122 1123
1123 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 1124 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
1124 if (subsys_glob != NULL && 1125 if (subsys_glob != NULL &&
1125 !strglobmatch(sys_dirent.d_name, subsys_glob)) 1126 !strglobmatch(sys_dirent.d_name, subsys_glob))
1126 continue; 1127 continue;
1127 1128
@@ -1132,7 +1133,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
1132 continue; 1133 continue;
1133 1134
1134 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 1135 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
1135 if (event_glob != NULL && 1136 if (event_glob != NULL &&
1136 !strglobmatch(evt_dirent.d_name, event_glob)) 1137 !strglobmatch(evt_dirent.d_name, event_glob))
1137 continue; 1138 continue;
1138 1139
@@ -1305,7 +1306,7 @@ static void print_symbol_events(const char *event_glob, unsigned type,
1305 1306
1306 for (i = 0; i < max; i++, syms++) { 1307 for (i = 0; i < max; i++, syms++) {
1307 1308
1308 if (event_glob != NULL && 1309 if (event_glob != NULL &&
1309 !(strglobmatch(syms->symbol, event_glob) || 1310 !(strglobmatch(syms->symbol, event_glob) ||
1310 (syms->alias && strglobmatch(syms->alias, event_glob)))) 1311 (syms->alias && strglobmatch(syms->alias, event_glob))))
1311 continue; 1312 continue;
@@ -1366,7 +1367,7 @@ void print_events(const char *event_glob, bool name_only)
1366 printf("\n"); 1367 printf("\n");
1367 1368
1368 printf(" %-50s [%s]\n", 1369 printf(" %-50s [%s]\n",
1369 "mem:<addr>[:access]", 1370 "mem:<addr>[/len][:access]",
1370 event_type_descriptors[PERF_TYPE_BREAKPOINT]); 1371 event_type_descriptors[PERF_TYPE_BREAKPOINT]);
1371 printf("\n"); 1372 printf("\n");
1372 } 1373 }
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index db2cf78ff0f3..ff6e1fa4111e 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -71,6 +71,7 @@ struct parse_events_term {
71 int type_val; 71 int type_val;
72 int type_term; 72 int type_term;
73 struct list_head list; 73 struct list_head list;
74 bool used;
74}; 75};
75 76
76struct parse_events_evlist { 77struct parse_events_evlist {
@@ -104,7 +105,7 @@ int parse_events_add_numeric(struct list_head *list, int *idx,
104int parse_events_add_cache(struct list_head *list, int *idx, 105int parse_events_add_cache(struct list_head *list, int *idx,
105 char *type, char *op_result1, char *op_result2); 106 char *type, char *op_result1, char *op_result2);
106int parse_events_add_breakpoint(struct list_head *list, int *idx, 107int parse_events_add_breakpoint(struct list_head *list, int *idx,
107 void *ptr, char *type); 108 void *ptr, char *type, u64 len);
108int parse_events_add_pmu(struct list_head *list, int *idx, 109int parse_events_add_pmu(struct list_head *list, int *idx,
109 char *pmu , struct list_head *head_config); 110 char *pmu , struct list_head *head_config);
110enum perf_pmu_event_symbol_type 111enum perf_pmu_event_symbol_type
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 906630bbf8eb..94eacb6c1ef7 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -159,6 +159,7 @@ branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
159<mem>{ 159<mem>{
160{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); } 160{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); }
161: { return ':'; } 161: { return ':'; }
162"/" { return '/'; }
162{num_dec} { return value(yyscanner, 10); } 163{num_dec} { return value(yyscanner, 10); }
163{num_hex} { return value(yyscanner, 16); } 164{num_hex} { return value(yyscanner, 16); }
164 /* 165 /*
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 93c4c9fbc922..72def077dbbf 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -326,6 +326,28 @@ PE_NAME_CACHE_TYPE
326} 326}
327 327
328event_legacy_mem: 328event_legacy_mem:
329PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc
330{
331 struct parse_events_evlist *data = _data;
332 struct list_head *list;
333
334 ALLOC_LIST(list);
335 ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
336 (void *) $2, $6, $4));
337 $$ = list;
338}
339|
340PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc
341{
342 struct parse_events_evlist *data = _data;
343 struct list_head *list;
344
345 ALLOC_LIST(list);
346 ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
347 (void *) $2, NULL, $4));
348 $$ = list;
349}
350|
329PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc 351PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
330{ 352{
331 struct parse_events_evlist *data = _data; 353 struct parse_events_evlist *data = _data;
@@ -333,7 +355,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
333 355
334 ALLOC_LIST(list); 356 ALLOC_LIST(list);
335 ABORT_ON(parse_events_add_breakpoint(list, &data->idx, 357 ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
336 (void *) $2, $4)); 358 (void *) $2, $4, 0));
337 $$ = list; 359 $$ = list;
338} 360}
339| 361|
@@ -344,7 +366,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
344 366
345 ALLOC_LIST(list); 367 ALLOC_LIST(list);
346 ABORT_ON(parse_events_add_breakpoint(list, &data->idx, 368 ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
347 (void *) $2, NULL)); 369 (void *) $2, NULL, 0));
348 $$ = list; 370 $$ = list;
349} 371}
350 372
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index f62dee7bd924..4a015f77e2b5 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -46,7 +46,7 @@ static int get_value(struct parse_opt_ctx_t *p,
46 return opterror(opt, "is not usable", flags); 46 return opterror(opt, "is not usable", flags);
47 47
48 if (opt->flags & PARSE_OPT_EXCLUSIVE) { 48 if (opt->flags & PARSE_OPT_EXCLUSIVE) {
49 if (p->excl_opt) { 49 if (p->excl_opt && p->excl_opt != opt) {
50 char msg[128]; 50 char msg[128];
51 51
52 if (((flags & OPT_SHORT) && p->excl_opt->short_name) || 52 if (((flags & OPT_SHORT) && p->excl_opt->short_name) ||
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 5c9c4947cfb4..48411674da0f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -551,31 +551,68 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
551} 551}
552 552
553/* 553/*
554 * Term is a string term, and might be a param-term. Try to look up it's value
555 * in the remaining terms.
556 * - We have a term like "base-or-format-term=param-term",
557 * - We need to find the value supplied for "param-term" (with param-term named
558 * in a config string) later on in the term list.
559 */
560static int pmu_resolve_param_term(struct parse_events_term *term,
561 struct list_head *head_terms,
562 __u64 *value)
563{
564 struct parse_events_term *t;
565
566 list_for_each_entry(t, head_terms, list) {
567 if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
568 if (!strcmp(t->config, term->config)) {
569 t->used = true;
570 *value = t->val.num;
571 return 0;
572 }
573 }
574 }
575
576 if (verbose)
577 printf("Required parameter '%s' not specified\n", term->config);
578
579 return -1;
580}
581
582/*
554 * Setup one of config[12] attr members based on the 583 * Setup one of config[12] attr members based on the
555 * user input data - term parameter. 584 * user input data - term parameter.
556 */ 585 */
557static int pmu_config_term(struct list_head *formats, 586static int pmu_config_term(struct list_head *formats,
558 struct perf_event_attr *attr, 587 struct perf_event_attr *attr,
559 struct parse_events_term *term, 588 struct parse_events_term *term,
589 struct list_head *head_terms,
560 bool zero) 590 bool zero)
561{ 591{
562 struct perf_pmu_format *format; 592 struct perf_pmu_format *format;
563 __u64 *vp; 593 __u64 *vp;
594 __u64 val;
595
596 /*
597 * If this is a parameter we've already used for parameterized-eval,
598 * skip it in normal eval.
599 */
600 if (term->used)
601 return 0;
564 602
565 /* 603 /*
566 * Support only for hardcoded and numnerial terms.
567 * Hardcoded terms should be already in, so nothing 604 * Hardcoded terms should be already in, so nothing
568 * to be done for them. 605 * to be done for them.
569 */ 606 */
570 if (parse_events__is_hardcoded_term(term)) 607 if (parse_events__is_hardcoded_term(term))
571 return 0; 608 return 0;
572 609
573 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
574 return -EINVAL;
575
576 format = pmu_find_format(formats, term->config); 610 format = pmu_find_format(formats, term->config);
577 if (!format) 611 if (!format) {
612 if (verbose)
613 printf("Invalid event/parameter '%s'\n", term->config);
578 return -EINVAL; 614 return -EINVAL;
615 }
579 616
580 switch (format->value) { 617 switch (format->value) {
581 case PERF_PMU_FORMAT_VALUE_CONFIG: 618 case PERF_PMU_FORMAT_VALUE_CONFIG:
@@ -592,11 +629,25 @@ static int pmu_config_term(struct list_head *formats,
592 } 629 }
593 630
594 /* 631 /*
595 * XXX If we ever decide to go with string values for 632 * Either directly use a numeric term, or try to translate string terms
596 * non-hardcoded terms, here's the place to translate 633 * using event parameters.
597 * them into value.
598 */ 634 */
599 pmu_format_value(format->bits, term->val.num, vp, zero); 635 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
636 val = term->val.num;
637 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
638 if (strcmp(term->val.str, "?")) {
639 if (verbose)
640 pr_info("Invalid sysfs entry %s=%s\n",
641 term->config, term->val.str);
642 return -EINVAL;
643 }
644
645 if (pmu_resolve_param_term(term, head_terms, &val))
646 return -EINVAL;
647 } else
648 return -EINVAL;
649
650 pmu_format_value(format->bits, val, vp, zero);
600 return 0; 651 return 0;
601} 652}
602 653
@@ -607,9 +658,10 @@ int perf_pmu__config_terms(struct list_head *formats,
607{ 658{
608 struct parse_events_term *term; 659 struct parse_events_term *term;
609 660
610 list_for_each_entry(term, head_terms, list) 661 list_for_each_entry(term, head_terms, list) {
611 if (pmu_config_term(formats, attr, term, zero)) 662 if (pmu_config_term(formats, attr, term, head_terms, zero))
612 return -EINVAL; 663 return -EINVAL;
664 }
613 665
614 return 0; 666 return 0;
615} 667}
@@ -767,10 +819,36 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
767 set_bit(b, bits); 819 set_bit(b, bits);
768} 820}
769 821
822static int sub_non_neg(int a, int b)
823{
824 if (b > a)
825 return 0;
826 return a - b;
827}
828
770static char *format_alias(char *buf, int len, struct perf_pmu *pmu, 829static char *format_alias(char *buf, int len, struct perf_pmu *pmu,
771 struct perf_pmu_alias *alias) 830 struct perf_pmu_alias *alias)
772{ 831{
773 snprintf(buf, len, "%s/%s/", pmu->name, alias->name); 832 struct parse_events_term *term;
833 int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
834
835 list_for_each_entry(term, &alias->terms, list) {
836 if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
837 used += snprintf(buf + used, sub_non_neg(len, used),
838 ",%s=%s", term->config,
839 term->val.str);
840 }
841
842 if (sub_non_neg(len, used) > 0) {
843 buf[used] = '/';
844 used++;
845 }
846 if (sub_non_neg(len, used) > 0) {
847 buf[used] = '\0';
848 used++;
849 } else
850 buf[len - 1] = '\0';
851
774 return buf; 852 return buf;
775} 853}
776 854
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 3dda85ca50c1..d906d0ad5d40 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -768,7 +768,7 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
768 Py_DECREF(file); 768 Py_DECREF(file);
769 goto free_list; 769 goto free_list;
770 } 770 }
771 771
772 Py_DECREF(file); 772 Py_DECREF(file);
773 } 773 }
774 774
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index d808a328f4dc..0c815a40a6e8 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -89,7 +89,7 @@ static void handler_call_die(const char *handler_name)
89 89
90/* 90/*
91 * Insert val into into the dictionary and decrement the reference counter. 91 * Insert val into into the dictionary and decrement the reference counter.
92 * This is necessary for dictionaries since PyDict_SetItemString() does not 92 * This is necessary for dictionaries since PyDict_SetItemString() does not
93 * steal a reference, as opposed to PyTuple_SetItem(). 93 * steal a reference, as opposed to PyTuple_SetItem().
94 */ 94 */
95static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val) 95static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5f0e05a76c05..0baf75f12b7c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -274,7 +274,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
274 if (tool->id_index == NULL) 274 if (tool->id_index == NULL)
275 tool->id_index = process_id_index_stub; 275 tool->id_index = process_id_index_stub;
276} 276}
277 277
278static void swap_sample_id_all(union perf_event *event, void *data) 278static void swap_sample_id_all(union perf_event *event, void *data)
279{ 279{
280 void *end = (void *) event + event->header.size; 280 void *end = (void *) event + event->header.size;
@@ -1251,9 +1251,9 @@ fetch_mmaped_event(struct perf_session *session,
1251#define NUM_MMAPS 128 1251#define NUM_MMAPS 128
1252#endif 1252#endif
1253 1253
1254int __perf_session__process_events(struct perf_session *session, 1254static int __perf_session__process_events(struct perf_session *session,
1255 u64 data_offset, u64 data_size, 1255 u64 data_offset, u64 data_size,
1256 u64 file_size, struct perf_tool *tool) 1256 u64 file_size, struct perf_tool *tool)
1257{ 1257{
1258 int fd = perf_data_file__fd(session->file); 1258 int fd = perf_data_file__fd(session->file);
1259 u64 head, page_offset, file_offset, file_pos, size; 1259 u64 head, page_offset, file_offset, file_pos, size;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index dc26ebf60fe4..6d663dc76404 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -49,9 +49,6 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
49 union perf_event **event_ptr, 49 union perf_event **event_ptr,
50 struct perf_sample *sample); 50 struct perf_sample *sample);
51 51
52int __perf_session__process_events(struct perf_session *session,
53 u64 data_offset, u64 data_size, u64 size,
54 struct perf_tool *tool);
55int perf_session__process_events(struct perf_session *session, 52int perf_session__process_events(struct perf_session *session,
56 struct perf_tool *tool); 53 struct perf_tool *tool);
57 54
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 9139dda9f9a3..7a39c1ed8d37 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1304,6 +1304,37 @@ static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1304 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1304 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1305} 1305}
1306 1306
1307static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1308 struct hist_entry *a, struct hist_entry *b)
1309{
1310 struct hpp_sort_entry *hse;
1311
1312 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1313 return hse->se->se_cmp(a, b);
1314}
1315
1316static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1317 struct hist_entry *a, struct hist_entry *b)
1318{
1319 struct hpp_sort_entry *hse;
1320 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1321
1322 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1323 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1324 return collapse_fn(a, b);
1325}
1326
1327static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1328 struct hist_entry *a, struct hist_entry *b)
1329{
1330 struct hpp_sort_entry *hse;
1331 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1332
1333 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1334 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1335 return sort_fn(a, b);
1336}
1337
1307static struct hpp_sort_entry * 1338static struct hpp_sort_entry *
1308__sort_dimension__alloc_hpp(struct sort_dimension *sd) 1339__sort_dimension__alloc_hpp(struct sort_dimension *sd)
1309{ 1340{
@@ -1322,9 +1353,9 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1322 hse->hpp.entry = __sort__hpp_entry; 1353 hse->hpp.entry = __sort__hpp_entry;
1323 hse->hpp.color = NULL; 1354 hse->hpp.color = NULL;
1324 1355
1325 hse->hpp.cmp = sd->entry->se_cmp; 1356 hse->hpp.cmp = __sort__hpp_cmp;
1326 hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp; 1357 hse->hpp.collapse = __sort__hpp_collapse;
1327 hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse; 1358 hse->hpp.sort = __sort__hpp_sort;
1328 1359
1329 INIT_LIST_HEAD(&hse->hpp.list); 1360 INIT_LIST_HEAD(&hse->hpp.list);
1330 INIT_LIST_HEAD(&hse->hpp.sort_list); 1361 INIT_LIST_HEAD(&hse->hpp.sort_list);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 06fcd1bf98b6..b24f9d8727a8 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -574,13 +574,16 @@ static int decompress_kmodule(struct dso *dso, const char *name,
574 const char *ext = strrchr(name, '.'); 574 const char *ext = strrchr(name, '.');
575 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; 575 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
576 576
577 if ((type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && 577 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
578 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP) || 578 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
579 type != dso->symtab_type) 579 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
580 return -1; 580 return -1;
581 581
582 if (!ext || !is_supported_compression(ext + 1)) 582 if (!ext || !is_supported_compression(ext + 1)) {
583 return -1; 583 ext = strrchr(dso->name, '.');
584 if (!ext || !is_supported_compression(ext + 1))
585 return -1;
586 }
584 587
585 fd = mkstemp(tmpbuf); 588 fd = mkstemp(tmpbuf);
586 if (fd < 0) 589 if (fd < 0)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index a194702a0a2f..a69066865a55 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -685,7 +685,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
685 struct machine *machine = kmaps->machine; 685 struct machine *machine = kmaps->machine;
686 struct map *curr_map = map; 686 struct map *curr_map = map;
687 struct symbol *pos; 687 struct symbol *pos;
688 int count = 0, moved = 0; 688 int count = 0, moved = 0;
689 struct rb_root *root = &dso->symbols[map->type]; 689 struct rb_root *root = &dso->symbols[map->type];
690 struct rb_node *next = rb_first(root); 690 struct rb_node *next = rb_first(root);
691 int kernel_range = 0; 691 int kernel_range = 0;
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 6edf535f65c2..e3c40a520a25 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -266,14 +266,17 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
266 u64 *fde_count) 266 u64 *fde_count)
267{ 267{
268 int ret = -EINVAL, fd; 268 int ret = -EINVAL, fd;
269 u64 offset; 269 u64 offset = dso->data.frame_offset;
270 270
271 fd = dso__data_fd(dso, machine); 271 if (offset == 0) {
272 if (fd < 0) 272 fd = dso__data_fd(dso, machine);
273 return -EINVAL; 273 if (fd < 0)
274 return -EINVAL;
274 275
275 /* Check the .eh_frame section for unwinding info */ 276 /* Check the .eh_frame section for unwinding info */
276 offset = elf_section_offset(fd, ".eh_frame_hdr"); 277 offset = elf_section_offset(fd, ".eh_frame_hdr");
278 dso->data.frame_offset = offset;
279 }
277 280
278 if (offset) 281 if (offset)
279 ret = unwind_spec_ehframe(dso, machine, offset, 282 ret = unwind_spec_ehframe(dso, machine, offset,
@@ -287,14 +290,20 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
287static int read_unwind_spec_debug_frame(struct dso *dso, 290static int read_unwind_spec_debug_frame(struct dso *dso,
288 struct machine *machine, u64 *offset) 291 struct machine *machine, u64 *offset)
289{ 292{
290 int fd = dso__data_fd(dso, machine); 293 int fd;
294 u64 ofs = dso->data.frame_offset;
291 295
292 if (fd < 0) 296 if (ofs == 0) {
293 return -EINVAL; 297 fd = dso__data_fd(dso, machine);
298 if (fd < 0)
299 return -EINVAL;
294 300
295 /* Check the .debug_frame section for unwinding info */ 301 /* Check the .debug_frame section for unwinding info */
296 *offset = elf_section_offset(fd, ".debug_frame"); 302 ofs = elf_section_offset(fd, ".debug_frame");
303 dso->data.frame_offset = ofs;
304 }
297 305
306 *offset = ofs;
298 if (*offset) 307 if (*offset)
299 return 0; 308 return 0;
300 309
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index f4b953354ff7..eec688041500 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 2f0f34a36db4..5da129e10aa2 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/oslibcfs.c b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
index c13ff9c51d74..b51e40a9a120 100644
--- a/tools/power/acpi/os_specific/service_layers/oslibcfs.c
+++ b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 0dc2485dedf5..92f1fd700344 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index 733f9e490fc4..e153fcb12b1a 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index 99b47b6194a3..3853a7350440 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 7ccb073f8316..6858c0893c91 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index a2d37d610639..84bdef0136cb 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 24d32968802d..c736adf5fb55 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index d470046a6d81..8f2fe168228e 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 853b4da22c3e..d0ba6535f5af 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2014, Intel Corp. 8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
index abe14b7f36e9..bb99cde3f5f9 100755
--- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh
+++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
@@ -24,7 +24,7 @@
24 24
25ncpus=`grep '^processor' /proc/cpuinfo | wc -l` 25ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
26idlecpus=`mpstat | tail -1 | \ 26idlecpus=`mpstat | tail -1 | \
27 awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'` 27 awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
28awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null ' 28awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
29BEGIN { 29BEGIN {
30 cpus2use = idlecpus; 30 cpus2use = idlecpus;
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index d6cc07fc137f..559e01ac86be 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -30,6 +30,7 @@ else
30 echo Unreadable results directory: $i 30 echo Unreadable results directory: $i
31 exit 1 31 exit 1
32fi 32fi
33. tools/testing/selftests/rcutorture/bin/functions.sh
33 34
34configfile=`echo $i | sed -e 's/^.*\///'` 35configfile=`echo $i | sed -e 's/^.*\///'`
35ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'` 36ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
@@ -48,4 +49,21 @@ else
48 title="$title ($ngpsps per second)" 49 title="$title ($ngpsps per second)"
49 fi 50 fi
50 echo $title 51 echo $title
52 nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'`
53 if test -z "$nclosecalls"
54 then
55 exit 0
56 fi
57 if test "$nclosecalls" -eq 0
58 then
59 exit 0
60 fi
61 # Compute number of close calls per tenth of an hour
62 nclosecalls10=`awk -v nclosecalls=$nclosecalls -v dur=$dur 'BEGIN { print int(nclosecalls * 36000 / dur) }' < /dev/null`
63 if test $nclosecalls10 -gt 5 -a $nclosecalls -gt 1
64 then
65 print_bug $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
66 else
67 print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
68 fi
51fi 69fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 8ca9f21f2efc..5236e073919d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -8,9 +8,9 @@
8# 8#
9# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args 9# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
10# 10#
11# qemu-args defaults to "-nographic", along with arguments specifying the 11# qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
12# number of CPUs and other options generated from 12# arguments specifying the number of CPUs and other
13# the underlying CPU architecture. 13# options generated from the underlying CPU architecture.
14# boot_args defaults to value returned by the per_version_boot_params 14# boot_args defaults to value returned by the per_version_boot_params
15# shell function. 15# shell function.
16# 16#
@@ -138,7 +138,7 @@ then
138fi 138fi
139 139
140# Generate -smp qemu argument. 140# Generate -smp qemu argument.
141qemu_args="-nographic $qemu_args" 141qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args"
142cpu_count=`configNR_CPUS.sh $config_template` 142cpu_count=`configNR_CPUS.sh $config_template`
143cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"` 143cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
144vcpus=`identify_qemu_vcpus` 144vcpus=`identify_qemu_vcpus`
@@ -168,6 +168,7 @@ then
168 touch $resdir/buildonly 168 touch $resdir/buildonly
169 exit 0 169 exit 0
170fi 170fi
171echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log
171echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd 172echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
172( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) & 173( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
173qemu_pid=$! 174qemu_pid=$!
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
index 499d1e598e42..a6b57622c2e5 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -26,12 +26,15 @@
26# 26#
27# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 27# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
28 28
29T=$1 29F=$1
30title=$2 30title=$2
31T=/tmp/parse-build.sh.$$
32trap 'rm -rf $T' 0
33mkdir $T
31 34
32. functions.sh 35. functions.sh
33 36
34if grep -q CC < $T 37if grep -q CC < $F
35then 38then
36 : 39 :
37else 40else
@@ -39,18 +42,21 @@ else
39 exit 1 42 exit 1
40fi 43fi
41 44
42if grep -q "error:" < $T 45if grep -q "error:" < $F
43then 46then
44 print_bug $title build errors: 47 print_bug $title build errors:
45 grep "error:" < $T 48 grep "error:" < $F
46 exit 2 49 exit 2
47fi 50fi
48exit 0
49 51
50if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T 52grep warning: < $F > $T/warnings
53grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings
54grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings
55cat $T/hwarnings $T/cwarnings > $T/rcuwarnings
56if test -s $T/rcuwarnings
51then 57then
52 print_warning $title build errors: 58 print_warning $title build errors:
53 egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T 59 cat $T/rcuwarnings
54 exit 2 60 exit 2
55fi 61fi
56exit 0 62exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index f962ba4cf68b..d8f35cf116be 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -36,7 +36,7 @@ if grep -Pq '\x00' < $file
36then 36then
37 print_warning Console output contains nul bytes, old qemu still running? 37 print_warning Console output contains nul bytes, old qemu still running?
38fi 38fi
39egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T 39egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
40if test -s $T 40if test -s $T
41then 41then
42 print_warning Assertion failure in $file $title 42 print_warning Assertion failure in $file $title